diff --git a/.golangci.yml b/.golangci.yml index bc3f402736b..dd3923645de 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -26,7 +26,6 @@ linters: - errchkjson #TODO: enable me - unused #TODO: enable me - testifylint #TODO: enable me - - perfsprint #TODO: enable me - gocheckcompilerdirectives - protogetter enable: @@ -36,6 +35,7 @@ linters: - wastedassign - gofmt - gocritic + - perfsprint # - revive # - forcetypeassert # - stylecheck diff --git a/accounts/abi/argument.go b/accounts/abi/argument.go index ac1fc94a540..6bdbe84a61a 100644 --- a/accounts/abi/argument.go +++ b/accounts/abi/argument.go @@ -21,6 +21,7 @@ package abi import ( "encoding/json" + "errors" "fmt" "reflect" "strings" @@ -82,7 +83,7 @@ func (arguments Arguments) isTuple() bool { func (arguments Arguments) Unpack(data []byte) ([]interface{}, error) { if len(data) == 0 { if len(arguments) != 0 { - return nil, fmt.Errorf("abi: attempting to unmarshall an empty string while arguments are expected") + return nil, errors.New("abi: attempting to unmarshall an empty string while arguments are expected") } // Nothing to unmarshal, return default variables nonIndexedArgs := arguments.NonIndexed() @@ -99,11 +100,11 @@ func (arguments Arguments) Unpack(data []byte) ([]interface{}, error) { func (arguments Arguments) UnpackIntoMap(v map[string]interface{}, data []byte) error { // Make sure map is not nil if v == nil { - return fmt.Errorf("abi: cannot unpack into a nil map") + return errors.New("abi: cannot unpack into a nil map") } if len(data) == 0 { if len(arguments) != 0 { - return fmt.Errorf("abi: attempting to unmarshall an empty string while arguments are expected") + return errors.New("abi: attempting to unmarshall an empty string while arguments are expected") } return nil // Nothing to unmarshal, return } diff --git a/accounts/abi/bind/base.go b/accounts/abi/bind/base.go index cee0a0c59b6..6b1b39b28e4 100644 --- a/accounts/abi/bind/base.go +++ b/accounts/abi/bind/base.go @@ -218,7 +218,7 @@ func (c *BoundContract) transact(opts *TransactOpts, contract *libcommon.Address if opts.Value != nil { overflow := value.SetFromBig(opts.Value) if overflow { - return nil, fmt.Errorf("opts.Value higher than 2^256-1") + return nil, errors.New("opts.Value higher than 2^256-1") } } var nonce uint64 @@ -240,7 +240,7 @@ func (c *BoundContract) transact(opts *TransactOpts, contract *libcommon.Address } gasPrice, overflow := uint256.FromBig(gasPriceBig) if overflow { - return nil, fmt.Errorf("gasPriceBig higher than 2^256-1") + return nil, errors.New("gasPriceBig higher than 2^256-1") } gasLimit := opts.GasLimit if gasLimit == 0 { diff --git a/accounts/abi/reflect.go b/accounts/abi/reflect.go index 84d1b34195c..4e381f3847f 100644 --- a/accounts/abi/reflect.go +++ b/accounts/abi/reflect.go @@ -233,7 +233,7 @@ func mapArgNamesToStructFields(argNames []string, value reflect.Value) (map[stri structFieldName := ToCamelCase(argName) if structFieldName == "" { - return nil, fmt.Errorf("abi: purely underscored output cannot unpack to struct") + return nil, errors.New("abi: purely underscored output cannot unpack to struct") } // this abi has already been paired, skip it... unless there exists another, yet unassigned diff --git a/accounts/abi/type.go b/accounts/abi/type.go index 25c9f45c461..3ad141b1120 100644 --- a/accounts/abi/type.go +++ b/accounts/abi/type.go @@ -71,7 +71,7 @@ var ( func NewType(t string, internalType string, components []ArgumentMarshaling) (typ Type, err error) { // check that array brackets are equal if they exist if strings.Count(t, "[") != strings.Count(t, "]") { - return Type{}, fmt.Errorf("invalid arg type in abi") + return Type{}, errors.New("invalid arg type in abi") } typ.stringKind = t @@ -110,7 +110,7 @@ func NewType(t string, internalType string, components []ArgumentMarshaling) (ty } typ.stringKind = embeddedType.stringKind + sliced } else { - return Type{}, fmt.Errorf("invalid formatting of array type") + return Type{}, errors.New("invalid formatting of array type") } return typ, err } diff --git a/accounts/abi/unpack.go b/accounts/abi/unpack.go index 3753c57bbdd..0d509bd48be 100644 --- a/accounts/abi/unpack.go +++ b/accounts/abi/unpack.go @@ -21,6 +21,7 @@ package abi import ( "encoding/binary" + "errors" "fmt" "math/big" "reflect" @@ -98,7 +99,7 @@ func readBool(word []byte) (bool, error) { // readFunctionType enforces that standard by always presenting it as a 24-array (address + sig = 24 bytes) func readFunctionType(t Type, word []byte) (funcTy [24]byte, err error) { if t.T != FunctionTy { - return [24]byte{}, fmt.Errorf("abi: invalid type in call to make function type byte array") + return [24]byte{}, errors.New("abi: invalid type in call to make function type byte array") } if garbage := binary.BigEndian.Uint64(word[24:32]); garbage != 0 { err = fmt.Errorf("abi: got improperly encoded function type, got %v", word) @@ -111,7 +112,7 @@ func readFunctionType(t Type, word []byte) (funcTy [24]byte, err error) { // ReadFixedBytes uses reflection to create a fixed array to be read from. func ReadFixedBytes(t Type, word []byte) (interface{}, error) { if t.T != FixedBytesTy { - return nil, fmt.Errorf("abi: invalid type in call to make fixed byte array") + return nil, errors.New("abi: invalid type in call to make fixed byte array") } // convert array := reflect.New(t.GetType()).Elem() @@ -140,7 +141,7 @@ func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error) // declare our array refSlice = reflect.New(t.GetType()).Elem() } else { - return nil, fmt.Errorf("abi: invalid type in array/slice unpacking stage") + return nil, errors.New("abi: invalid type in array/slice unpacking stage") } // Arrays have packed elements, resulting in longer unpack steps. diff --git a/cl/aggregation/pool_impl.go b/cl/aggregation/pool_impl.go index 5cdef889808..38afe528255 100644 --- a/cl/aggregation/pool_impl.go +++ b/cl/aggregation/pool_impl.go @@ -18,7 +18,7 @@ package aggregation import ( "context" - "fmt" + "errors" "sync" "time" @@ -30,7 +30,7 @@ import ( "github.com/erigontech/erigon/cl/utils/eth_clock" ) -var ErrIsSuperset = fmt.Errorf("attestation is superset of existing attestation") +var ErrIsSuperset = errors.New("attestation is superset of existing attestation") var ( blsAggregate = bls.AggregateSignatures @@ -89,7 +89,7 @@ func (p *aggregationPoolImpl) AddAttestation(inAtt *solid.Attestation) error { return err } if len(merged) != 96 { - return fmt.Errorf("merged signature is too long") + return errors.New("merged signature is too long") } var mergedSig [96]byte copy(mergedSig[:], merged) diff --git a/cl/antiquary/tests/tests.go b/cl/antiquary/tests/tests.go index feacc4c31c1..74681e445b0 100644 --- a/cl/antiquary/tests/tests.go +++ b/cl/antiquary/tests/tests.go @@ -181,7 +181,7 @@ func GetBellatrixRandom() ([]*cltypes.SignedBeaconBlock, *state.CachingBeaconSta for i := 0; i < 96; i++ { block := cltypes.NewSignedBeaconBlock(&clparams.MainnetBeaconConfig) // Lets do te - b, err := bellatrixFS.ReadFile("test_data/bellatrix/blocks_" + strconv.FormatInt(int64(i), 10) + ".ssz_snappy") + b, err := bellatrixFS.ReadFile("test_data/bellatrix/blocks_" + strconv.Itoa(i) + ".ssz_snappy") if err != nil { panic(err) } diff --git a/cl/beacon/beaconhttp/api.go b/cl/beacon/beaconhttp/api.go index 262f66feb7d..8b8f0bf2292 100644 --- a/cl/beacon/beaconhttp/api.go +++ b/cl/beacon/beaconhttp/api.go @@ -147,7 +147,7 @@ func HandleEndpoint[T any](h EndpointHandler[T]) http.HandlerFunc { case strings.Contains(contentType, "text/event-stream"): return default: - http.Error(w, fmt.Sprintf("content type must include application/json, application/octet-stream, or text/event-stream, got %s", contentType), http.StatusBadRequest) + http.Error(w, "content type must include application/json, application/octet-stream, or text/event-stream, got "+contentType, http.StatusBadRequest) } }) } diff --git a/cl/beacon/beaconhttp/args.go b/cl/beacon/beaconhttp/args.go index d4ff2c04171..72572601756 100644 --- a/cl/beacon/beaconhttp/args.go +++ b/cl/beacon/beaconhttp/args.go @@ -17,7 +17,7 @@ package beaconhttp import ( - "fmt" + "errors" "net/http" "regexp" "strconv" @@ -73,7 +73,7 @@ func EpochFromRequest(r *http.Request) (uint64, error) { regex := regexp.MustCompile(`^\d+$`) epoch := chi.URLParam(r, "epoch") if !regex.MatchString(epoch) { - return 0, fmt.Errorf("invalid path variable: {epoch}") + return 0, errors.New("invalid path variable: {epoch}") } epochMaybe, err := strconv.ParseUint(epoch, 10, 64) if err != nil { @@ -95,7 +95,7 @@ func BlockIdFromRequest(r *http.Request) (*SegmentID, error) { blockId := chi.URLParam(r, "block_id") if !regex.MatchString(blockId) { - return nil, fmt.Errorf("invalid path variable: {block_id}") + return nil, errors.New("invalid path variable: {block_id}") } if blockId == "head" { @@ -122,7 +122,7 @@ func StateIdFromRequest(r *http.Request) (*SegmentID, error) { stateId := chi.URLParam(r, "state_id") if !regex.MatchString(stateId) { - return nil, fmt.Errorf("invalid path variable: {state_id}") + return nil, errors.New("invalid path variable: {state_id}") } if stateId == "head" { @@ -154,17 +154,17 @@ func HashFromQueryParams(r *http.Request, name string) (*common.Hash, error) { } // check if hashstr is an hex string if len(hashStr) != 2+2*32 { - return nil, fmt.Errorf("invalid hash length") + return nil, errors.New("invalid hash length") } if hashStr[:2] != "0x" { - return nil, fmt.Errorf("invalid hash prefix") + return nil, errors.New("invalid hash prefix") } notHex, err := regexp.MatchString("[^0-9A-Fa-f]", hashStr[2:]) if err != nil { return nil, err } if notHex { - return nil, fmt.Errorf("invalid hash characters") + return nil, errors.New("invalid hash characters") } hash := common.HexToHash(hashStr) diff --git a/cl/beacon/builder/client.go b/cl/beacon/builder/client.go index 8a64d426995..510d502bdb5 100644 --- a/cl/beacon/builder/client.go +++ b/cl/beacon/builder/client.go @@ -36,7 +36,7 @@ import ( var _ BuilderClient = &builderClient{} var ( - ErrNoContent = fmt.Errorf("no http content") + ErrNoContent = errors.New("no http content") ) type builderClient struct { diff --git a/cl/beacon/handler/attestation_rewards.go b/cl/beacon/handler/attestation_rewards.go index a218adf0d74..a555eb28593 100644 --- a/cl/beacon/handler/attestation_rewards.go +++ b/cl/beacon/handler/attestation_rewards.go @@ -18,7 +18,7 @@ package handler import ( "encoding/json" - "fmt" + "errors" "io" "net/http" @@ -94,7 +94,7 @@ func (a *ApiHandler) PostEthV1BeaconRewardsAttestations(w http.ResponseWriter, r } headEpoch := headSlot / a.beaconChainCfg.SlotsPerEpoch if epoch > headEpoch { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("epoch is in the future")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("epoch is in the future")) } // Few cases to handle: // 1) finalized data @@ -115,14 +115,14 @@ func (a *ApiHandler) PostEthV1BeaconRewardsAttestations(w http.ResponseWriter, r continue } if version == clparams.Phase0Version { - return nil, beaconhttp.NewEndpointError(http.StatusHTTPVersionNotSupported, fmt.Errorf("phase0 state is not supported when there is no antiquation")) + return nil, beaconhttp.NewEndpointError(http.StatusHTTPVersionNotSupported, errors.New("phase0 state is not supported when there is no antiquation")) } inactivityScores, err := a.forkchoiceStore.GetInactivitiesScores(blockRoot) if err != nil { return nil, err } if inactivityScores == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("no inactivity scores found for this epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("no inactivity scores found for this epoch")) } prevPartecipation, err := a.forkchoiceStore.GetPreviousPartecipationIndicies(blockRoot) @@ -130,24 +130,24 @@ func (a *ApiHandler) PostEthV1BeaconRewardsAttestations(w http.ResponseWriter, r return nil, err } if prevPartecipation == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("no previous partecipation found for this epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("no previous partecipation found for this epoch")) } validatorSet, err := a.forkchoiceStore.GetValidatorSet(blockRoot) if err != nil { return nil, err } if validatorSet == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("no validator set found for this epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("no validator set found for this epoch")) } ok, finalizedCheckpoint, _, _ := a.forkchoiceStore.GetFinalityCheckpoints(blockRoot) if !ok { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("no finalized checkpoint found for this epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("no finalized checkpoint found for this epoch")) } return a.computeAttestationsRewardsForAltair(validatorSet, inactivityScores, prevPartecipation, a.isInactivityLeaking(epoch, finalizedCheckpoint), filterIndicies, epoch) } - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("no block found for this epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("no block found for this epoch")) } root, err := a.findEpochRoot(tx, epoch) @@ -159,7 +159,7 @@ func (a *ApiHandler) PostEthV1BeaconRewardsAttestations(w http.ResponseWriter, r return nil, err } if lastSlotPtr == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("no block found for this epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("no block found for this epoch")) } lastSlot := *lastSlotPtr @@ -168,7 +168,7 @@ func (a *ApiHandler) PostEthV1BeaconRewardsAttestations(w http.ResponseWriter, r return nil, err } if lastSlot > stateProgress { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("requested range is not yet processed or the node is not archivial")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("requested range is not yet processed or the node is not archivial")) } epochData, err := state_accessors.ReadEpochData(tx, a.beaconChainCfg.RoundSlotToEpoch(lastSlot)) @@ -181,7 +181,7 @@ func (a *ApiHandler) PostEthV1BeaconRewardsAttestations(w http.ResponseWriter, r return nil, err } if validatorSet == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("no validator set found for this epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("no validator set found for this epoch")) } _, previousIdx, err := a.stateReader.ReadPartecipations(tx, lastSlot) diff --git a/cl/beacon/handler/blobs.go b/cl/beacon/handler/blobs.go index fef1bf82660..718cb4643ac 100644 --- a/cl/beacon/handler/blobs.go +++ b/cl/beacon/handler/blobs.go @@ -17,7 +17,7 @@ package handler import ( - "fmt" + "errors" "net/http" "strconv" @@ -50,7 +50,7 @@ func (a *ApiHandler) GetEthV1BeaconBlobSidecars(w http.ResponseWriter, r *http.R return nil, err } if slot == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("block not found")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("block not found")) } if a.caplinSnapshots != nil && *slot <= a.caplinSnapshots.FrozenBlobs() { out, err := a.caplinSnapshots.ReadBlobSidecars(*slot) diff --git a/cl/beacon/handler/block_production.go b/cl/beacon/handler/block_production.go index c6b607f8927..fa78eedd70e 100644 --- a/cl/beacon/handler/block_production.go +++ b/cl/beacon/handler/block_production.go @@ -68,7 +68,7 @@ const ( ) var ( - errBuilderNotEnabled = fmt.Errorf("builder is not enabled") + errBuilderNotEnabled = errors.New("builder is not enabled") ) var defaultGraffitiString = "Caplin" @@ -88,14 +88,14 @@ func (a *ApiHandler) GetEthV1ValidatorAttestationData( if slot == nil || committeeIndex == nil { return nil, beaconhttp.NewEndpointError( http.StatusBadRequest, - fmt.Errorf("slot and committee_index url params are required"), + errors.New("slot and committee_index url params are required"), ) } headState := a.syncedData.HeadState() if headState == nil { return nil, beaconhttp.NewEndpointError( http.StatusServiceUnavailable, - fmt.Errorf("beacon node is still syncing"), + errors.New("beacon node is still syncing"), ) } @@ -164,7 +164,7 @@ func (a *ApiHandler) GetEthV3ValidatorBlock( if s == nil { return nil, beaconhttp.NewEndpointError( http.StatusServiceUnavailable, - fmt.Errorf("node is syncing"), + errors.New("node is syncing"), ) } @@ -400,7 +400,7 @@ func (a *ApiHandler) getBuilderPayload( if err != nil { return nil, err } else if header == nil { - return nil, fmt.Errorf("no error but nil header") + return nil, errors.New("no error but nil header") } // check the version @@ -419,10 +419,10 @@ func (a *ApiHandler) getBuilderPayload( for i := 0; i < header.Data.Message.BlobKzgCommitments.Len(); i++ { c := header.Data.Message.BlobKzgCommitments.Get(i) if c == nil { - return nil, fmt.Errorf("nil blob kzg commitment") + return nil, errors.New("nil blob kzg commitment") } if len(c) != length.Bytes48 { - return nil, fmt.Errorf("invalid blob kzg commitment length") + return nil, errors.New("invalid blob kzg commitment length") } } } @@ -626,7 +626,7 @@ func (a *ApiHandler) produceBeaconBody( wg.Wait() if executionPayload == nil { - return nil, 0, fmt.Errorf("failed to produce execution payload") + return nil, 0, errors.New("failed to produce execution payload") } beaconBody.ExecutionPayload = executionPayload return beaconBody, executionValue, nil @@ -859,7 +859,7 @@ func (a *ApiHandler) publishBlindedBlocks(w http.ResponseWriter, r *http.Request // check commitments blockCommitments := signedBlindedBlock.Block.Body.BlobKzgCommitments if len(blobsBundle.Commitments) != blockCommitments.Len() { - return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Errorf("commitments length mismatch")) + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, errors.New("commitments length mismatch")) } for i := range blobsBundle.Commitments { // add the bundle to recently produced blobs @@ -885,7 +885,7 @@ func (a *ApiHandler) parseEthConsensusVersion( apiVersion int, ) (clparams.StateVersion, error) { if str == "" && apiVersion == 2 { - return 0, fmt.Errorf("Eth-Consensus-Version header is required") + return 0, errors.New("Eth-Consensus-Version header is required") } if str == "" && apiVersion == 1 { currentEpoch := a.ethClock.GetCurrentEpoch() @@ -931,7 +931,7 @@ func (a *ApiHandler) parseRequestBeaconBlock( block.SignedBlock.Block.SetVersion(version) return block, nil } - return nil, fmt.Errorf("invalid content type") + return nil, errors.New("invalid content type") } func (a *ApiHandler) broadcastBlock(ctx context.Context, blk *cltypes.SignedBeaconBlock) error { diff --git a/cl/beacon/handler/blocks.go b/cl/beacon/handler/blocks.go index 32c1e99d84f..59926604d73 100644 --- a/cl/beacon/handler/blocks.go +++ b/cl/beacon/handler/blocks.go @@ -18,6 +18,7 @@ package handler import ( "context" + "errors" "fmt" "net/http" @@ -56,7 +57,7 @@ func (a *ApiHandler) rootFromBlockId(ctx context.Context, tx kv.Tx, blockId *bea return libcommon.Hash{}, err } if root == (libcommon.Hash{}) { - return libcommon.Hash{}, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("genesis block not found")) + return libcommon.Hash{}, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("genesis block not found")) } case blockId.GetSlot() != nil: root, err = beacon_indicies.ReadCanonicalBlockRoot(tx, *blockId.GetSlot()) @@ -70,7 +71,7 @@ func (a *ApiHandler) rootFromBlockId(ctx context.Context, tx kv.Tx, blockId *bea // first check if it exists root = *blockId.GetRoot() default: - return libcommon.Hash{}, beaconhttp.NewEndpointError(http.StatusInternalServerError, fmt.Errorf("cannot parse block id")) + return libcommon.Hash{}, beaconhttp.NewEndpointError(http.StatusInternalServerError, errors.New("cannot parse block id")) } return } diff --git a/cl/beacon/handler/builder.go b/cl/beacon/handler/builder.go index 9f7a6e0e1da..649f58dbd4c 100644 --- a/cl/beacon/handler/builder.go +++ b/cl/beacon/handler/builder.go @@ -18,7 +18,7 @@ package handler import ( "encoding/json" - "fmt" + "errors" "net/http" libcommon "github.com/erigontech/erigon-lib/common" @@ -53,17 +53,17 @@ func (a *ApiHandler) GetEth1V1BuilderStatesExpectedWithdrawals(w http.ResponseWr return nil, err } if slot == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("state not found")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("state not found")) } if a.beaconChainCfg.GetCurrentStateVersion(*slot/a.beaconChainCfg.SlotsPerEpoch) < clparams.CapellaVersion { - return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Errorf("the specified state is not a capella state")) + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, errors.New("the specified state is not a capella state")) } headRoot, _, err := a.forkchoiceStore.GetHead() if err != nil { return nil, err } if a.syncedData.Syncing() { - return nil, beaconhttp.NewEndpointError(http.StatusServiceUnavailable, fmt.Errorf("beacon node is syncing")) + return nil, beaconhttp.NewEndpointError(http.StatusServiceUnavailable, errors.New("beacon node is syncing")) } if root == headRoot { return newBeaconResponse(state.ExpectedWithdrawals(a.syncedData.HeadState(), state.Epoch(a.syncedData.HeadState()))).WithFinalized(false), nil @@ -71,7 +71,7 @@ func (a *ApiHandler) GetEth1V1BuilderStatesExpectedWithdrawals(w http.ResponseWr lookAhead := 1024 for currSlot := *slot + 1; currSlot < *slot+uint64(lookAhead); currSlot++ { if currSlot > a.syncedData.HeadSlot() { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("state not found")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("state not found")) } blockRoot, err := beacon_indicies.ReadCanonicalBlockRoot(tx, currSlot) if err != nil { @@ -87,7 +87,7 @@ func (a *ApiHandler) GetEth1V1BuilderStatesExpectedWithdrawals(w http.ResponseWr return newBeaconResponse(blk.Block.Body.ExecutionPayload.Withdrawals).WithFinalized(false).WithOptimistic(isOptimistic), nil } - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("state not found")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("state not found")) } func (a *ApiHandler) PostEthV1BuilderRegisterValidator(w http.ResponseWriter, r *http.Request) (*beaconhttp.BeaconResponse, error) { @@ -96,7 +96,7 @@ func (a *ApiHandler) PostEthV1BuilderRegisterValidator(w http.ResponseWriter, r return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err) } if len(registerReq) == 0 { - return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Errorf("empty request")) + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, errors.New("empty request")) } if err := a.builderClient.RegisterValidator(r.Context(), registerReq); err != nil { return nil, beaconhttp.NewEndpointError(http.StatusInternalServerError, err) diff --git a/cl/beacon/handler/committees.go b/cl/beacon/handler/committees.go index 7eb38c20a6e..3866d31e9a3 100644 --- a/cl/beacon/handler/committees.go +++ b/cl/beacon/handler/committees.go @@ -17,6 +17,7 @@ package handler import ( + "errors" "fmt" "net/http" "strconv" @@ -89,7 +90,7 @@ func (a *ApiHandler) getCommittees(w http.ResponseWriter, r *http.Request) (*bea // non-finality case s := a.syncedData.HeadState() if s == nil { - return nil, beaconhttp.NewEndpointError(http.StatusServiceUnavailable, fmt.Errorf("node is syncing")) + return nil, beaconhttp.NewEndpointError(http.StatusServiceUnavailable, errors.New("node is syncing")) } if epoch > state.Epoch(s)+1 { return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Errorf("epoch %d is too far in the future", epoch)) diff --git a/cl/beacon/handler/duties_attester.go b/cl/beacon/handler/duties_attester.go index 89aac15efbb..b6804596f3c 100644 --- a/cl/beacon/handler/duties_attester.go +++ b/cl/beacon/handler/duties_attester.go @@ -18,6 +18,7 @@ package handler import ( "encoding/json" + "errors" "fmt" "net/http" "strconv" @@ -63,7 +64,7 @@ func (a *ApiHandler) getAttesterDuties(w http.ResponseWriter, r *http.Request) ( } s := a.syncedData.HeadState() if s == nil { - return nil, beaconhttp.NewEndpointError(http.StatusServiceUnavailable, fmt.Errorf("node is syncing")) + return nil, beaconhttp.NewEndpointError(http.StatusServiceUnavailable, errors.New("node is syncing")) } dependentRoot := a.getDependentRoot(s, epoch) @@ -101,7 +102,7 @@ func (a *ApiHandler) getAttesterDuties(w http.ResponseWriter, r *http.Request) ( // non-finality case if s == nil { - return nil, beaconhttp.NewEndpointError(http.StatusServiceUnavailable, fmt.Errorf("node is syncing")) + return nil, beaconhttp.NewEndpointError(http.StatusServiceUnavailable, errors.New("node is syncing")) } if epoch > state.Epoch(s)+3 { diff --git a/cl/beacon/handler/duties_proposer.go b/cl/beacon/handler/duties_proposer.go index e71a5c1fd2b..6c513ff551b 100644 --- a/cl/beacon/handler/duties_proposer.go +++ b/cl/beacon/handler/duties_proposer.go @@ -19,7 +19,7 @@ package handler import ( "crypto/sha256" "encoding/binary" - "fmt" + "errors" "net/http" "sync" @@ -45,7 +45,7 @@ func (a *ApiHandler) getDutiesProposer(w http.ResponseWriter, r *http.Request) ( } s := a.syncedData.HeadState() if s == nil { - return nil, beaconhttp.NewEndpointError(http.StatusServiceUnavailable, fmt.Errorf("node is syncing")) + return nil, beaconhttp.NewEndpointError(http.StatusServiceUnavailable, errors.New("node is syncing")) } dependentRoot := a.getDependentRoot(s, epoch) if epoch < a.forkchoiceStore.FinalizedCheckpoint().Epoch() { @@ -60,7 +60,7 @@ func (a *ApiHandler) getDutiesProposer(w http.ResponseWriter, r *http.Request) ( return nil, err } if len(indiciesBytes) != int(a.beaconChainCfg.SlotsPerEpoch*4) { - return nil, beaconhttp.NewEndpointError(http.StatusInternalServerError, fmt.Errorf("proposer duties is corrupted")) + return nil, beaconhttp.NewEndpointError(http.StatusInternalServerError, errors.New("proposer duties is corrupted")) } duties := make([]proposerDuties, a.beaconChainCfg.SlotsPerEpoch) for i := uint64(0); i < a.beaconChainCfg.SlotsPerEpoch; i++ { @@ -86,7 +86,7 @@ func (a *ApiHandler) getDutiesProposer(w http.ResponseWriter, r *http.Request) ( // We need to compute our duties state := a.syncedData.HeadState() if state == nil { - return nil, beaconhttp.NewEndpointError(http.StatusServiceUnavailable, fmt.Errorf("beacon node is syncing")) + return nil, beaconhttp.NewEndpointError(http.StatusServiceUnavailable, errors.New("beacon node is syncing")) } diff --git a/cl/beacon/handler/events.go b/cl/beacon/handler/events.go index 6ca1448ac6b..e9afe593e6a 100644 --- a/cl/beacon/handler/events.go +++ b/cl/beacon/handler/events.go @@ -19,7 +19,6 @@ package handler import ( "bytes" "encoding/json" - "fmt" "net/http" "sync" @@ -51,7 +50,7 @@ func (a *ApiHandler) EventSourceGetV1Events(w http.ResponseWriter, r *http.Reque topics := r.URL.Query()["topics"] for _, v := range topics { if _, ok := validTopics[v]; !ok { - http.Error(w, fmt.Sprintf("invalid Topic: %s", v), http.StatusBadRequest) + http.Error(w, "invalid Topic: "+v, http.StatusBadRequest) } } var mu sync.Mutex diff --git a/cl/beacon/handler/forkchoice.go b/cl/beacon/handler/forkchoice.go index e5cbfe34687..2563f79d52b 100644 --- a/cl/beacon/handler/forkchoice.go +++ b/cl/beacon/handler/forkchoice.go @@ -18,7 +18,7 @@ package handler import ( "encoding/json" - "fmt" + "errors" "net/http" "strconv" @@ -27,7 +27,7 @@ import ( func (a *ApiHandler) GetEthV2DebugBeaconHeads(w http.ResponseWriter, r *http.Request) (*beaconhttp.BeaconResponse, error) { if a.syncedData.Syncing() { - return nil, beaconhttp.NewEndpointError(http.StatusServiceUnavailable, fmt.Errorf("beacon node is syncing")) + return nil, beaconhttp.NewEndpointError(http.StatusServiceUnavailable, errors.New("beacon node is syncing")) } hash, slotNumber, err := a.forkchoiceStore.GetHead() if err != nil { diff --git a/cl/beacon/handler/lightclient.go b/cl/beacon/handler/lightclient.go index f14a58814b3..82c36931632 100644 --- a/cl/beacon/handler/lightclient.go +++ b/cl/beacon/handler/lightclient.go @@ -18,7 +18,7 @@ package handler import ( "encoding/json" - "fmt" + "errors" "net/http" "github.com/erigontech/erigon/cl/beacon/beaconhttp" @@ -45,7 +45,7 @@ func (a *ApiHandler) GetEthV1BeaconLightClientBootstrap(w http.ResponseWriter, r bootstrap, ok := a.forkchoiceStore.GetLightClientBootstrap(root) if !ok { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("bootstrap object evicted")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("bootstrap object evicted")) } return newBeaconResponse(bootstrap).WithVersion(bootstrap.Header.Version()), nil } @@ -53,7 +53,7 @@ func (a *ApiHandler) GetEthV1BeaconLightClientBootstrap(w http.ResponseWriter, r func (a *ApiHandler) GetEthV1BeaconLightClientOptimisticUpdate(w http.ResponseWriter, r *http.Request) (*beaconhttp.BeaconResponse, error) { update := a.forkchoiceStore.NewestLightClientUpdate() if update == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("no optimistic update loaded yet, try again later. it may take a few minutes for it to load.")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("no optimistic update loaded yet, try again later. it may take a few minutes for it to load.")) } version := update.AttestedHeader.Version() return newBeaconResponse(&cltypes.LightClientOptimisticUpdate{ @@ -66,7 +66,7 @@ func (a *ApiHandler) GetEthV1BeaconLightClientOptimisticUpdate(w http.ResponseWr func (a *ApiHandler) GetEthV1BeaconLightClientFinalityUpdate(w http.ResponseWriter, r *http.Request) (*beaconhttp.BeaconResponse, error) { update := a.forkchoiceStore.NewestLightClientUpdate() if update == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("no finility update loaded yet, try again later. it may take a few minutes for it to load.")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("no finility update loaded yet, try again later. it may take a few minutes for it to load.")) } version := update.AttestedHeader.Version() return newBeaconResponse(&cltypes.LightClientFinalityUpdate{ diff --git a/cl/beacon/handler/lighthouse.go b/cl/beacon/handler/lighthouse.go index c120fcd4f48..201c98c1c6d 100644 --- a/cl/beacon/handler/lighthouse.go +++ b/cl/beacon/handler/lighthouse.go @@ -17,7 +17,7 @@ package handler import ( - "fmt" + "errors" "net/http" "github.com/erigontech/erigon-lib/common" @@ -84,32 +84,32 @@ func (a *ApiHandler) GetLighthouseValidatorInclusionGlobal(w http.ResponseWriter } activeBalance, ok := a.forkchoiceStore.TotalActiveBalance(root) if !ok { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("active balance not found for current epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("active balance not found for current epoch")) } prevActiveBalance, ok := a.forkchoiceStore.TotalActiveBalance(prevRoot) if !ok { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("active balance not found for previous epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("active balance not found for previous epoch")) } validatorSet, err := a.forkchoiceStore.GetValidatorSet(root) if err != nil { return nil, err } if validatorSet == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("validator set not found for current epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("validator set not found for current epoch")) } currentEpochPartecipation, err := a.forkchoiceStore.GetCurrentPartecipationIndicies(root) if err != nil { return nil, err } if currentEpochPartecipation == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("partecipation not found for current epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("partecipation not found for current epoch")) } previousEpochPartecipation, err := a.forkchoiceStore.GetPreviousPartecipationIndicies(root) if err != nil { return nil, err } if previousEpochPartecipation == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("partecipation not found for previous epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("partecipation not found for previous epoch")) } return newBeaconResponse(a.computeLighthouseValidatorInclusionGlobal(epoch, activeBalance, prevActiveBalance, validatorSet, currentEpochPartecipation, previousEpochPartecipation)), nil } @@ -120,14 +120,14 @@ func (a *ApiHandler) GetLighthouseValidatorInclusionGlobal(w http.ResponseWriter return nil, err } if epochData == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("epoch data not found for current epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("epoch data not found for current epoch")) } prevEpochData, err := state_accessors.ReadEpochData(tx, prevEpoch*a.beaconChainCfg.SlotsPerEpoch) if err != nil { return nil, err } if prevEpochData == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("epoch data not found for previous epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("epoch data not found for previous epoch")) } // read the validator set validatorSet, err := a.stateReader.ReadValidatorsForHistoricalState(tx, slot) @@ -135,17 +135,17 @@ func (a *ApiHandler) GetLighthouseValidatorInclusionGlobal(w http.ResponseWriter return nil, err } if validatorSet == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("validator set not found for current epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("validator set not found for current epoch")) } currentEpochPartecipation, previousEpochPartecipation, err := a.stateReader.ReadPartecipations(tx, slot+(a.beaconChainCfg.SlotsPerEpoch-1)) if err != nil { return nil, err } if currentEpochPartecipation == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("partecipation not found for current epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("partecipation not found for current epoch")) } if previousEpochPartecipation == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("partecipation not found for previous epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("partecipation not found for previous epoch")) } return newBeaconResponse(a.computeLighthouseValidatorInclusionGlobal(epoch, epochData.TotalActiveBalance, prevEpochData.TotalActiveBalance, validatorSet, currentEpochPartecipation, previousEpochPartecipation)), nil } @@ -242,32 +242,32 @@ func (a *ApiHandler) GetLighthouseValidatorInclusion(w http.ResponseWriter, r *h } activeBalance, ok := a.forkchoiceStore.TotalActiveBalance(root) if !ok { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("active balance not found for current epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("active balance not found for current epoch")) } prevActiveBalance, ok := a.forkchoiceStore.TotalActiveBalance(prevRoot) if !ok { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("active balance not found for previous epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("active balance not found for previous epoch")) } validatorSet, err := a.forkchoiceStore.GetValidatorSet(root) if err != nil { return nil, err } if validatorSet == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("validator set not found for current epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("validator set not found for current epoch")) } currentEpochPartecipation, err := a.forkchoiceStore.GetCurrentPartecipationIndicies(root) if err != nil { return nil, err } if currentEpochPartecipation == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("partecipation not found for current epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("partecipation not found for current epoch")) } previousEpochPartecipation, err := a.forkchoiceStore.GetPreviousPartecipationIndicies(root) if err != nil { return nil, err } if previousEpochPartecipation == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("partecipation not found for previous epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("partecipation not found for previous epoch")) } return newBeaconResponse(a.computeLighthouseValidatorInclusion(int(validatorIndex), prevEpoch, epoch, activeBalance, prevActiveBalance, validatorSet, currentEpochPartecipation, previousEpochPartecipation)), nil } @@ -278,14 +278,14 @@ func (a *ApiHandler) GetLighthouseValidatorInclusion(w http.ResponseWriter, r *h return nil, err } if epochData == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("epoch data not found for current epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("epoch data not found for current epoch")) } prevEpochData, err := state_accessors.ReadEpochData(tx, prevEpoch*a.beaconChainCfg.SlotsPerEpoch) if err != nil { return nil, err } if prevEpochData == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("epoch data not found for previous epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("epoch data not found for previous epoch")) } // read the validator set validatorSet, err := a.stateReader.ReadValidatorsForHistoricalState(tx, slot) @@ -293,17 +293,17 @@ func (a *ApiHandler) GetLighthouseValidatorInclusion(w http.ResponseWriter, r *h return nil, err } if validatorSet == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("validator set not found for current epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("validator set not found for current epoch")) } currentEpochPartecipation, previousEpochPartecipation, err := a.stateReader.ReadPartecipations(tx, slot+(a.beaconChainCfg.SlotsPerEpoch-1)) if err != nil { return nil, err } if currentEpochPartecipation == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("partecipation not found for current epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("partecipation not found for current epoch")) } if previousEpochPartecipation == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("partecipation not found for previous epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("partecipation not found for previous epoch")) } return newBeaconResponse(a.computeLighthouseValidatorInclusion(int(validatorIndex), prevEpoch, epoch, epochData.TotalActiveBalance, prevEpochData.TotalActiveBalance, validatorSet, currentEpochPartecipation, previousEpochPartecipation)), nil } diff --git a/cl/beacon/handler/rewards.go b/cl/beacon/handler/rewards.go index 01c904d9be0..398a4565f46 100644 --- a/cl/beacon/handler/rewards.go +++ b/cl/beacon/handler/rewards.go @@ -18,7 +18,7 @@ package handler import ( "encoding/json" - "fmt" + "errors" "io" "net/http" "sort" @@ -62,7 +62,7 @@ func (a *ApiHandler) GetEthV1BeaconRewardsBlocks(w http.ResponseWriter, r *http. return nil, err } if blk == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("block not found")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("block not found")) } slot := blk.Header.Slot isFinalized := slot <= a.forkchoiceStore.FinalizedSlot() @@ -70,7 +70,7 @@ func (a *ApiHandler) GetEthV1BeaconRewardsBlocks(w http.ResponseWriter, r *http. // finalized case blkRewards, ok := a.forkchoiceStore.BlockRewards(root) if !ok { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("block not found")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("block not found")) } return newBeaconResponse(blockRewardsResponse{ ProposerIndex: blk.Header.ProposerIndex, @@ -86,7 +86,7 @@ func (a *ApiHandler) GetEthV1BeaconRewardsBlocks(w http.ResponseWriter, r *http. return nil, err } if slotData == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("could not read historical block rewards, node may not be archive or it still processing historical states")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("could not read historical block rewards, node may not be archive or it still processing historical states")) } return newBeaconResponse(blockRewardsResponse{ ProposerIndex: blk.Header.ProposerIndex, @@ -142,12 +142,12 @@ func (a *ApiHandler) PostEthV1BeaconRewardsSyncCommittees(w http.ResponseWriter, return nil, err } if blk == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("block not found")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("block not found")) } slot := blk.Block.Slot version := a.beaconChainCfg.GetCurrentStateVersion(blk.Block.Slot / a.beaconChainCfg.SlotsPerEpoch) if version < clparams.AltairVersion { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("sync committee rewards not available before Altair fork")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("sync committee rewards not available before Altair fork")) } // retrieve the state we need ----------------------------------------------- // We need: @@ -167,14 +167,14 @@ func (a *ApiHandler) PostEthV1BeaconRewardsSyncCommittees(w http.ResponseWriter, ) if isFinalized { if !isCanonical { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("non-canonical finalized block not found")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("non-canonical finalized block not found")) } epochData, err := state_accessors.ReadEpochData(tx, blk.Block.Slot) if err != nil { return nil, err } if epochData == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("could not read historical sync committee rewards, node may not be archive or it still processing historical states")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("could not read historical sync committee rewards, node may not be archive or it still processing historical states")) } totalActiveBalance = epochData.TotalActiveBalance syncCommittee, err = state_accessors.ReadCurrentSyncCommittee(tx, a.beaconChainCfg.RoundSlotToSyncCommitteePeriod(blk.Block.Slot)) @@ -182,17 +182,17 @@ func (a *ApiHandler) PostEthV1BeaconRewardsSyncCommittees(w http.ResponseWriter, return nil, err } if syncCommittee == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("could not read historical sync committee, node may not be archive or it still processing historical states")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("could not read historical sync committee, node may not be archive or it still processing historical states")) } } else { var ok bool syncCommittee, _, ok = a.forkchoiceStore.GetSyncCommittees(a.beaconChainCfg.SyncCommitteePeriod(slot)) if !ok { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("non-finalized sync committee not found")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("non-finalized sync committee not found")) } totalActiveBalance, ok = a.forkchoiceStore.TotalActiveBalance(root) if !ok { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("non-finalized total active balance not found")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("non-finalized total active balance not found")) } } committee := syncCommittee.GetCommittee() @@ -217,7 +217,7 @@ func (a *ApiHandler) PostEthV1BeaconRewardsSyncCommittees(w http.ResponseWriter, return nil, err } if !ok { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("sync committee public key not found")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("sync committee public key not found")) } if len(filterIndiciesSet) > 0 { if _, ok := filterIndiciesSet[idx]; !ok { diff --git a/cl/beacon/handler/states.go b/cl/beacon/handler/states.go index 2654fb4ee80..17c0d4662c8 100644 --- a/cl/beacon/handler/states.go +++ b/cl/beacon/handler/states.go @@ -18,6 +18,7 @@ package handler import ( "context" + "errors" "fmt" "net/http" "strconv" @@ -53,7 +54,7 @@ func (a *ApiHandler) blockRootFromStateId(ctx context.Context, tx kv.Tx, stateId return libcommon.Hash{}, http.StatusInternalServerError, err } if root == (libcommon.Hash{}) { - return libcommon.Hash{}, http.StatusNotFound, fmt.Errorf("genesis block not found") + return libcommon.Hash{}, http.StatusNotFound, errors.New("genesis block not found") } return case stateId.GetSlot() != nil: @@ -72,7 +73,7 @@ func (a *ApiHandler) blockRootFromStateId(ctx context.Context, tx kv.Tx, stateId } return default: - return libcommon.Hash{}, http.StatusInternalServerError, fmt.Errorf("cannot parse state id") + return libcommon.Hash{}, http.StatusInternalServerError, errors.New("cannot parse state id") } } @@ -345,7 +346,7 @@ func (a *ApiHandler) getSyncCommittees(w http.ResponseWriter, r *http.Request) ( if requestPeriod == statePeriod+1 { committee = nextSyncCommittee.GetCommittee() } else if requestPeriod != statePeriod { - return nil, fmt.Errorf("epoch is outside the sync committee period of the state") + return nil, errors.New("epoch is outside the sync committee period of the state") } } // Lastly construct the response diff --git a/cl/beacon/handler/subscription.go b/cl/beacon/handler/subscription.go index 9eb1e5e4fa2..d8f9e652aa0 100644 --- a/cl/beacon/handler/subscription.go +++ b/cl/beacon/handler/subscription.go @@ -19,6 +19,7 @@ package handler import ( "context" "encoding/json" + "errors" "fmt" "net/http" "strconv" @@ -118,15 +119,15 @@ func parseSyncCommitteeContribution(r *http.Request) (slot, subcommitteeIndex ui blockRootStr := r.URL.Query().Get("beacon_block_root") // check if they required fields are present if slotStr == "" { - err = fmt.Errorf("slot as query param is required") + err = errors.New("slot as query param is required") return } if subCommitteeIndexStr == "" { - err = fmt.Errorf("subcommittee_index as query param is required") + err = errors.New("subcommittee_index as query param is required") return } if blockRootStr == "" { - err = fmt.Errorf("beacon_block_root as query param is required") + err = errors.New("beacon_block_root as query param is required") return } slot, err = strconv.ParseUint(slotStr, 10, 64) diff --git a/cl/beacon/handler/validators.go b/cl/beacon/handler/validators.go index a1f2b51d149..2616da0db54 100644 --- a/cl/beacon/handler/validators.go +++ b/cl/beacon/handler/validators.go @@ -180,7 +180,7 @@ func parseStatuses(s []string) ([]validatorStatus, error) { statuses := make([]validatorStatus, 0, len(s)) if len(s) > maxValidatorsLookupFilter { - return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Errorf("too many statuses requested")) + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, errors.New("too many statuses requested")) } for _, status := range s { @@ -208,7 +208,7 @@ func checkValidValidatorId(s string) (bool, error) { } // If it is not 0x prefixed, then it must be a number, check if it is a base-10 number if _, err := strconv.ParseUint(s, 10, 64); err != nil { - return false, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Errorf("invalid validator id")) + return false, beaconhttp.NewEndpointError(http.StatusBadRequest, errors.New("invalid validator id")) } return false, nil } @@ -248,7 +248,7 @@ func (a *ApiHandler) GetEthV1BeaconStatesValidators(w http.ResponseWriter, r *ht } if len(validatorIds) > maxValidatorsLookupFilter { - http.Error(w, fmt.Errorf("too many validators requested").Error(), http.StatusBadRequest) + http.Error(w, errors.New("too many validators requested").Error(), http.StatusBadRequest) return } a.writeValidatorsResponse(w, r, tx, blockId, blockRoot, validatorIds, queryFilters) @@ -288,7 +288,7 @@ func (a *ApiHandler) PostEthV1BeaconStatesValidators(w http.ResponseWriter, r *h } if len(req.Ids) > maxValidatorsLookupFilter { - http.Error(w, fmt.Errorf("too many validators requested").Error(), http.StatusBadRequest) + http.Error(w, errors.New("too many validators requested").Error(), http.StatusBadRequest) return } @@ -320,7 +320,7 @@ func (a *ApiHandler) writeValidatorsResponse( if blockId.Head() { // Lets see if we point to head, if yes then we need to look at the head state we always keep. s := a.syncedData.HeadState() if s == nil { - http.Error(w, fmt.Errorf("node is not synced").Error(), http.StatusServiceUnavailable) + http.Error(w, errors.New("node is not synced").Error(), http.StatusServiceUnavailable) return } responseValidators(w, filterIndicies, statusFilters, state.Epoch(s), s.Balances(), s.Validators(), false, isOptimistic) @@ -333,7 +333,7 @@ func (a *ApiHandler) writeValidatorsResponse( } if slot == nil { - http.Error(w, fmt.Errorf("state not found").Error(), http.StatusNotFound) + http.Error(w, errors.New("state not found").Error(), http.StatusNotFound) return } stateEpoch := *slot / a.beaconChainCfg.SlotsPerEpoch @@ -361,7 +361,7 @@ func (a *ApiHandler) writeValidatorsResponse( return } if balances == nil { - http.Error(w, fmt.Errorf("balances not found").Error(), http.StatusNotFound) + http.Error(w, errors.New("balances not found").Error(), http.StatusNotFound) return } validators, err := a.forkchoiceStore.GetValidatorSet(blockRoot) @@ -370,7 +370,7 @@ func (a *ApiHandler) writeValidatorsResponse( return } if validators == nil { - http.Error(w, fmt.Errorf("validators not found").Error(), http.StatusNotFound) + http.Error(w, errors.New("validators not found").Error(), http.StatusNotFound) return } responseValidators(w, filterIndicies, statusFilters, stateEpoch, balances, validators, *slot <= a.forkchoiceStore.FinalizedSlot(), isOptimistic) @@ -398,7 +398,7 @@ func parseQueryValidatorIndex(tx kv.Tx, id string) (uint64, error) { return 0, err } if !ok { - return 0, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("validator not found")) + return 0, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("validator not found")) } return idx, nil } @@ -457,13 +457,13 @@ func (a *ApiHandler) GetEthV1BeaconStatesValidator(w http.ResponseWriter, r *htt if blockId.Head() { // Lets see if we point to head, if yes then we need to look at the head state we always keep. s := a.syncedData.HeadState() if s == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("node is not synced")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("node is not synced")) } if s.ValidatorLength() <= int(validatorIndex) { return newBeaconResponse([]int{}).WithFinalized(false), nil } if s == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("node is not synced")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("node is not synced")) } return responseValidator(validatorIndex, state.Epoch(s), s.Balances(), s.Validators(), false, isOptimistic) } @@ -473,7 +473,7 @@ func (a *ApiHandler) GetEthV1BeaconStatesValidator(w http.ResponseWriter, r *htt } if slot == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("state not found")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("state not found")) } stateEpoch := *slot / a.beaconChainCfg.SlotsPerEpoch @@ -493,14 +493,14 @@ func (a *ApiHandler) GetEthV1BeaconStatesValidator(w http.ResponseWriter, r *htt return nil, err } if balances == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("balances not found")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("balances not found")) } validators, err := a.forkchoiceStore.GetValidatorSet(blockRoot) if err != nil { return nil, err } if validators == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("validators not found")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("validators not found")) } return responseValidator(validatorIndex, stateEpoch, balances, validators, *slot <= a.forkchoiceStore.FinalizedSlot(), isOptimistic) } @@ -521,7 +521,7 @@ func (a *ApiHandler) PostEthV1BeaconValidatorsBalances(w http.ResponseWriter, r } if len(validatorIds) > maxValidatorsLookupFilter { - http.Error(w, fmt.Errorf("too many validators requested").Error(), http.StatusBadRequest) + http.Error(w, errors.New("too many validators requested").Error(), http.StatusBadRequest) return } @@ -543,7 +543,7 @@ func (a *ApiHandler) GetEthV1BeaconValidatorsBalances(w http.ResponseWriter, r * } if len(validatorIds) > maxValidatorsLookupFilter { - http.Error(w, fmt.Errorf("too many validators requested").Error(), http.StatusBadRequest) + http.Error(w, errors.New("too many validators requested").Error(), http.StatusBadRequest) return } a.getValidatorBalances(r.Context(), w, blockId, validatorIds) @@ -574,7 +574,7 @@ func (a *ApiHandler) getValidatorBalances(ctx context.Context, w http.ResponseWr if blockId.Head() { // Lets see if we point to head, if yes then we need to look at the head state we always keep. s := a.syncedData.HeadState() if s == nil { - http.Error(w, fmt.Errorf("node is not synced").Error(), http.StatusServiceUnavailable) + http.Error(w, errors.New("node is not synced").Error(), http.StatusServiceUnavailable) return } responseValidatorsBalances(w, filterIndicies, s.Balances(), false, isOptimistic) @@ -587,7 +587,7 @@ func (a *ApiHandler) getValidatorBalances(ctx context.Context, w http.ResponseWr } if slot == nil { - http.Error(w, fmt.Errorf("state not found").Error(), http.StatusNotFound) + http.Error(w, errors.New("state not found").Error(), http.StatusNotFound) return } @@ -599,7 +599,7 @@ func (a *ApiHandler) getValidatorBalances(ctx context.Context, w http.ResponseWr } if balances == nil { - http.Error(w, fmt.Errorf("validators not found, node may node be running in archivial node").Error(), http.StatusNotFound) + http.Error(w, errors.New("validators not found, node may node be running in archivial node").Error(), http.StatusNotFound) } responseValidatorsBalances(w, filterIndicies, balances, true, isOptimistic) return @@ -610,7 +610,7 @@ func (a *ApiHandler) getValidatorBalances(ctx context.Context, w http.ResponseWr return } if balances == nil { - http.Error(w, fmt.Errorf("balances not found").Error(), http.StatusNotFound) + http.Error(w, errors.New("balances not found").Error(), http.StatusNotFound) return } responseValidatorsBalances(w, filterIndicies, balances, *slot <= a.forkchoiceStore.FinalizedSlot(), isOptimistic) @@ -692,7 +692,7 @@ func responseValidator(idx uint64, stateEpoch uint64, balances solid.Uint64ListS return newBeaconResponse([]int{}).WithFinalized(finalized), nil } if idx >= uint64(validators.Length()) { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("validator not found")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("validator not found")) } v := validators.Get(int(idx)) @@ -772,11 +772,11 @@ func shouldStatusBeFiltered(status validatorStatus, statuses []validatorStatus) func (a *ApiHandler) GetEthV1ValidatorAggregateAttestation(w http.ResponseWriter, r *http.Request) (*beaconhttp.BeaconResponse, error) { attDataRoot := r.URL.Query().Get("attestation_data_root") if attDataRoot == "" { - return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Errorf("attestation_data_root is required")) + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, errors.New("attestation_data_root is required")) } slot := r.URL.Query().Get("slot") if slot == "" { - return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Errorf("slot is required")) + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, errors.New("slot is required")) } slotNum, err := strconv.ParseUint(slot, 10, 64) if err != nil { @@ -790,7 +790,7 @@ func (a *ApiHandler) GetEthV1ValidatorAggregateAttestation(w http.ResponseWriter } if slotNum != att.AttestantionData().Slot() { log.Debug("attestation slot does not match", "attestation_data_root", attDataRoot, "slot_inquire", slot) - return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Errorf("attestation slot mismatch")) + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, errors.New("attestation slot mismatch")) } return newBeaconResponse(att), nil diff --git a/cl/clparams/config.go b/cl/clparams/config.go index 0aacad2c0b1..596168d0024 100644 --- a/cl/clparams/config.go +++ b/cl/clparams/config.go @@ -18,12 +18,14 @@ package clparams import ( "crypto/rand" + "errors" "fmt" "math" "math/big" mathrand "math/rand" "os" "path" + "strconv" "time" "gopkg.in/yaml.v2" @@ -1032,7 +1034,7 @@ func GetConfigsByNetworkName(net string) (*NetworkConfig, *BeaconChainConfig, Ne networkCfg, beaconCfg := GetConfigsByNetwork(HoleskyNetwork) return networkCfg, beaconCfg, HoleskyNetwork, nil default: - return nil, nil, MainnetNetwork, fmt.Errorf("chain not found") + return nil, nil, MainnetNetwork, errors.New("chain not found") } } @@ -1112,6 +1114,6 @@ func SupportBackfilling(networkId uint64) bool { } func EpochToPaths(slot uint64, config *BeaconChainConfig, suffix string) (string, string) { - folderPath := path.Clean(fmt.Sprintf("%d", slot/SubDivisionFolderSize)) + folderPath := path.Clean(strconv.FormatUint(slot/SubDivisionFolderSize, 10)) return folderPath, path.Clean(fmt.Sprintf("%s/%d.%s.sz", folderPath, slot, suffix)) } diff --git a/cl/cltypes/beacon_block.go b/cl/cltypes/beacon_block.go index aa8a9f8f063..1c8cd4aed4c 100644 --- a/cl/cltypes/beacon_block.go +++ b/cl/cltypes/beacon_block.go @@ -18,6 +18,7 @@ package cltypes import ( "encoding/json" + "errors" "fmt" libcommon "github.com/erigontech/erigon-lib/common" @@ -369,7 +370,7 @@ func (b *BeaconBody) ExecutionPayloadMerkleProof() ([][32]byte, error) { func (b *BeaconBody) KzgCommitmentMerkleProof(index int) ([][32]byte, error) { if index >= b.BlobKzgCommitments.Len() { - return nil, fmt.Errorf("index out of range") + return nil, errors.New("index out of range") } kzgCommitmentsProof, err := merkle_tree.MerkleProof(4, 11, b.getSchema(false)...) if err != nil { diff --git a/cl/cltypes/beacon_block_blinded.go b/cl/cltypes/beacon_block_blinded.go index c9adcbad6e2..af610d90684 100644 --- a/cl/cltypes/beacon_block_blinded.go +++ b/cl/cltypes/beacon_block_blinded.go @@ -17,6 +17,7 @@ package cltypes import ( + "errors" "fmt" libcommon "github.com/erigontech/erigon-lib/common" @@ -76,7 +77,7 @@ func (b *SignedBlindedBeaconBlock) Clone() clonable.Clonable { func (b *SignedBlindedBeaconBlock) Unblind(blockPayload *Eth1Block) (*SignedBeaconBlock, error) { if b == nil { - return nil, fmt.Errorf("nil block") + return nil, errors.New("nil block") } // check root blindedRoot := b.Block.Body.ExecutionPayload.StateRoot diff --git a/cl/gossip/gossip.go b/cl/gossip/gossip.go index 773c5e69d42..cd1fd0e2b39 100644 --- a/cl/gossip/gossip.go +++ b/cl/gossip/gossip.go @@ -17,6 +17,7 @@ package gossip import ( + "errors" "fmt" "strings" ) @@ -63,7 +64,7 @@ func IsTopicBeaconAttestation(d string) bool { func SubnetIdFromTopicBeaconAttestation(d string) (uint64, error) { if !IsTopicBeaconAttestation(d) { - return 0, fmt.Errorf("not a beacon attestation topic") + return 0, errors.New("not a beacon attestation topic") } var id uint64 _, err := fmt.Sscanf(d, TopicNamePrefixBeaconAttestation, &id) diff --git a/cl/persistence/base_encoding/uint64_diff.go b/cl/persistence/base_encoding/uint64_diff.go index a3aaec64efe..bc920f7cd70 100644 --- a/cl/persistence/base_encoding/uint64_diff.go +++ b/cl/persistence/base_encoding/uint64_diff.go @@ -72,7 +72,7 @@ type repeatedPatternEntry struct { func ComputeCompressedSerializedUint64ListDiff(w io.Writer, old, new []byte) error { if len(old) > len(new) { - return fmt.Errorf("old list is longer than new list") + return errors.New("old list is longer than new list") } compressor := compressorPool.Get().(*zstd.Encoder) @@ -136,7 +136,7 @@ func ComputeCompressedSerializedUint64ListDiff(w io.Writer, old, new []byte) err func ComputeCompressedSerializedEffectiveBalancesDiff(w io.Writer, old, new []byte) error { if len(old) > len(new) { - return fmt.Errorf("old list is longer than new list") + return errors.New("old list is longer than new list") } compressor := compressorPool.Get().(*zstd.Encoder) @@ -264,7 +264,7 @@ func ApplyCompressedSerializedUint64ListDiff(in, out []byte, diff []byte, revers func ComputeCompressedSerializedValidatorSetListDiff(w io.Writer, old, new []byte) error { if len(old) > len(new) { - return fmt.Errorf("old list is longer than new list") + return errors.New("old list is longer than new list") } validatorLength := 121 diff --git a/cl/persistence/blob_storage/blob_db.go b/cl/persistence/blob_storage/blob_db.go index c3cc2593909..74ef592df38 100644 --- a/cl/persistence/blob_storage/blob_db.go +++ b/cl/persistence/blob_storage/blob_db.go @@ -236,7 +236,7 @@ func VerifyAgainstIdentifiersAndInsertIntoTheBlobStore(ctx context.Context, stor return 0, 0, nil } if len(sidecars) > identifiers.Len() { - return 0, 0, fmt.Errorf("sidecars length is greater than identifiers length") + return 0, 0, errors.New("sidecars length is greater than identifiers length") } prevBlockRoot := identifiers.Get(0).BlockRoot totalProcessed := 0 @@ -261,7 +261,7 @@ func VerifyAgainstIdentifiersAndInsertIntoTheBlobStore(ctx context.Context, stor } if !cltypes.VerifyCommitmentInclusionProof(sidecar.KzgCommitment, sidecar.CommitmentInclusionProof, sidecar.Index, clparams.DenebVersion, sidecar.SignedBlockHeader.Header.BodyRoot) { - return 0, 0, fmt.Errorf("could not verify blob's inclusion proof") + return 0, 0, errors.New("could not verify blob's inclusion proof") } if verifySignatureFn != nil { // verify the signature of the sidecar head, we leave this step up to the caller to define @@ -305,7 +305,7 @@ func VerifyAgainstIdentifiersAndInsertIntoTheBlobStore(ctx context.Context, stor kzgProofs[i] = gokzg4844.KZGProof(sidecar.KzgProof) } if err := kzgCtx.VerifyBlobKZGProofBatch(blobs, kzgCommitments, kzgProofs); err != nil { - errAtomic.Store(fmt.Errorf("sidecar is wrong")) + errAtomic.Store(errors.New("sidecar is wrong")) return } if err := storage.WriteBlobSidecars(ctx, sds.blockRoot, sds.sidecars); err != nil { diff --git a/cl/persistence/state/historical_states_reader/attesting_indicies.go b/cl/persistence/state/historical_states_reader/attesting_indicies.go index 1a63f09e121..4ac8ded610a 100644 --- a/cl/persistence/state/historical_states_reader/attesting_indicies.go +++ b/cl/persistence/state/historical_states_reader/attesting_indicies.go @@ -17,6 +17,7 @@ package historical_states_reader import ( + "errors" "fmt" libcommon "github.com/erigontech/erigon-lib/common" @@ -50,7 +51,7 @@ func (r *HistoricalStatesReader) attestingIndicies(attestation solid.Attestation bitIndex := i % 8 sliceIndex := i / 8 if sliceIndex >= len(aggregationBits) { - return nil, fmt.Errorf("GetAttestingIndicies: committee is too big") + return nil, errors.New("GetAttestingIndicies: committee is too big") } if (aggregationBits[sliceIndex] & (1 << bitIndex)) > 0 { attestingIndices = append(attestingIndices, member) diff --git a/cl/persistence/state/static_validator_table.go b/cl/persistence/state/static_validator_table.go index 4bc10abe985..2cdbbfdfc9a 100644 --- a/cl/persistence/state/static_validator_table.go +++ b/cl/persistence/state/static_validator_table.go @@ -17,7 +17,7 @@ package state_accessors import ( - "fmt" + "errors" "io" "sync" @@ -292,7 +292,7 @@ func (s *StaticValidatorTable) AddValidator(v solid.Validator, validatorIndex, s } s.validatorTable = append(s.validatorTable, NewStaticValidatorFromValidator(v, slot)) if validatorIndex != uint64(len(s.validatorTable))-1 { - return fmt.Errorf("validator index mismatch") + return errors.New("validator index mismatch") } return nil } @@ -304,7 +304,7 @@ func (s *StaticValidatorTable) AddWithdrawalCredentials(validatorIndex, slot uin return nil } if validatorIndex >= uint64(len(s.validatorTable)) { - return fmt.Errorf("validator index mismatch") + return errors.New("validator index mismatch") } s.validatorTable[validatorIndex].AddWithdrawalCredentials(slot, withdrawalCredentials) return nil @@ -317,7 +317,7 @@ func (s *StaticValidatorTable) AddSlashed(validatorIndex, slot uint64, slashed b return nil } if validatorIndex >= uint64(len(s.validatorTable)) { - return fmt.Errorf("validator index mismatch") + return errors.New("validator index mismatch") } s.validatorTable[validatorIndex].AddSlashed(slot, slashed) return nil @@ -330,7 +330,7 @@ func (s *StaticValidatorTable) AddActivationEligibility(validatorIndex, slot uin return nil } if validatorIndex >= uint64(len(s.validatorTable)) { - return fmt.Errorf("validator index mismatch") + return errors.New("validator index mismatch") } s.validatorTable[validatorIndex].AddActivationEligibility(slot, activationEligibility) return nil @@ -343,7 +343,7 @@ func (s *StaticValidatorTable) AddActivationEpoch(validatorIndex, slot uint64, a return nil } if validatorIndex >= uint64(len(s.validatorTable)) { - return fmt.Errorf("validator index mismatch") + return errors.New("validator index mismatch") } s.validatorTable[validatorIndex].AddActivationEpoch(slot, activationEpoch) return nil @@ -356,7 +356,7 @@ func (s *StaticValidatorTable) AddExitEpoch(validatorIndex, slot uint64, exitEpo return nil } if validatorIndex >= uint64(len(s.validatorTable)) { - return fmt.Errorf("validator index mismatch") + return errors.New("validator index mismatch") } s.validatorTable[validatorIndex].AddExitEpoch(slot, exitEpoch) return nil @@ -369,7 +369,7 @@ func (s *StaticValidatorTable) AddWithdrawableEpoch(validatorIndex, slot uint64, return nil } if validatorIndex >= uint64(len(s.validatorTable)) { - return fmt.Errorf("validator index mismatch") + return errors.New("validator index mismatch") } s.validatorTable[validatorIndex].AddWithdrawableEpoch(slot, withdrawableEpoch) return nil diff --git a/cl/phase1/core/checkpoint.go b/cl/phase1/core/checkpoint.go index 1a52a6cd932..82bc38142b6 100644 --- a/cl/phase1/core/checkpoint.go +++ b/cl/phase1/core/checkpoint.go @@ -19,6 +19,7 @@ package core import ( "context" "encoding/binary" + "errors" "fmt" "io" "net/http" @@ -33,7 +34,7 @@ import ( func extractSlotFromSerializedBeaconState(beaconState []byte) (uint64, error) { if len(beaconState) < 48 { - return 0, fmt.Errorf("checkpoint sync read failed, too short") + return 0, errors.New("checkpoint sync read failed, too short") } return binary.LittleEndian.Uint64(beaconState[40:48]), nil } @@ -41,7 +42,7 @@ func extractSlotFromSerializedBeaconState(beaconState []byte) (uint64, error) { func RetrieveBeaconState(ctx context.Context, beaconConfig *clparams.BeaconChainConfig, net clparams.NetworkType) (*state.CachingBeaconState, error) { uris := clparams.GetAllCheckpointSyncEndpoints(net) if len(uris) == 0 { - return nil, fmt.Errorf("no uris for checkpoint sync") + return nil, errors.New("no uris for checkpoint sync") } fetchBeaconState := func(uri string) (*state.CachingBeaconState, error) { @@ -122,7 +123,7 @@ func RetrieveBlock(ctx context.Context, beaconConfig *clparams.BeaconChainConfig return nil, fmt.Errorf("checkpoint sync read failed %s", err) } if len(marshaled) < 108 { - return nil, fmt.Errorf("checkpoint sync read failed, too short") + return nil, errors.New("checkpoint sync read failed, too short") } currentSlot := binary.LittleEndian.Uint64(marshaled[100:108]) v := beaconConfig.GetCurrentStateVersion(currentSlot / beaconConfig.SlotsPerEpoch) diff --git a/cl/phase1/core/state/accessors.go b/cl/phase1/core/state/accessors.go index 47198f38fe4..95021de1484 100644 --- a/cl/phase1/core/state/accessors.go +++ b/cl/phase1/core/state/accessors.go @@ -18,6 +18,7 @@ package state import ( "encoding/binary" + "errors" "fmt" "github.com/Giulio2002/bls" @@ -128,7 +129,7 @@ func EligibleValidatorsIndicies(b abstract.BeaconState) (eligibleValidators []ui func IsValidIndexedAttestation(b abstract.BeaconStateBasic, att *cltypes.IndexedAttestation) (bool, error) { inds := att.AttestingIndices if inds.Length() == 0 || !solid.IsUint64SortedSet(inds) { - return false, fmt.Errorf("isValidIndexedAttestation: attesting indices are not sorted or are null") + return false, errors.New("isValidIndexedAttestation: attesting indices are not sorted or are null") } pks := make([][]byte, 0, inds.Length()) @@ -159,7 +160,7 @@ func IsValidIndexedAttestation(b abstract.BeaconStateBasic, att *cltypes.Indexed return false, fmt.Errorf("error while validating signature: %v", err) } if !valid { - return false, fmt.Errorf("invalid aggregate signature") + return false, errors.New("invalid aggregate signature") } return true, nil } @@ -174,7 +175,7 @@ func GetUnslashedParticipatingIndices(b abstract.BeaconState, flagIndex int, epo case PreviousEpoch(b): participation = b.EpochParticipation(false) default: - return nil, fmt.Errorf("getUnslashedParticipatingIndices: only epoch and previous epoch can be used") + return nil, errors.New("getUnslashedParticipatingIndices: only epoch and previous epoch can be used") } // Iterate over all validators and include the active ones that have flag_index enabled and are not slashed. b.ForEachValidator(func(validator solid.Validator, i, total int) bool { diff --git a/cl/phase1/core/state/cache_accessors.go b/cl/phase1/core/state/cache_accessors.go index 41fea3a699a..c152954292d 100644 --- a/cl/phase1/core/state/cache_accessors.go +++ b/cl/phase1/core/state/cache_accessors.go @@ -19,6 +19,7 @@ package state import ( "crypto/sha256" "encoding/binary" + "errors" "fmt" "math" @@ -198,7 +199,7 @@ func (b *CachingBeaconState) GetAttestationParticipationFlagIndicies( } // Matching roots if !data.Source().Equal(justifiedCheckpoint) && !skipAssert { - return nil, fmt.Errorf("GetAttestationParticipationFlagIndicies: source does not match") + return nil, errors.New("GetAttestationParticipationFlagIndicies: source does not match") } targetRoot, err := GetBlockRoot(b, data.Target().Epoch()) if err != nil { @@ -344,7 +345,7 @@ func (b *CachingBeaconState) GetAttestingIndicies( bitIndex := i % 8 sliceIndex := i / 8 if sliceIndex >= len(aggregationBits) { - return nil, fmt.Errorf("GetAttestingIndicies: committee is too big") + return nil, errors.New("GetAttestingIndicies: committee is too big") } if (aggregationBits[sliceIndex] & (1 << bitIndex)) > 0 { attestingIndices = append(attestingIndices, member) diff --git a/cl/phase1/core/state/cache_mutators.go b/cl/phase1/core/state/cache_mutators.go index 70bc41fbba2..351567a4315 100644 --- a/cl/phase1/core/state/cache_mutators.go +++ b/cl/phase1/core/state/cache_mutators.go @@ -17,6 +17,7 @@ package state import ( + "errors" "fmt" "github.com/erigontech/erigon-lib/common/math" @@ -120,7 +121,7 @@ func (b *CachingBeaconState) InitiateValidatorExit(index uint64) error { var overflow bool var newWithdrawableEpoch uint64 if newWithdrawableEpoch, overflow = math.SafeAdd(exitQueueEpoch, b.BeaconConfig().MinValidatorWithdrawabilityDelay); overflow { - return fmt.Errorf("withdrawable epoch is too big") + return errors.New("withdrawable epoch is too big") } b.SetExitEpochForValidatorAtIndex(int(index), exitQueueEpoch) b.SetWithdrawableEpochForValidatorAtIndex(int(index), newWithdrawableEpoch) diff --git a/cl/phase1/core/state/raw/getters.go b/cl/phase1/core/state/raw/getters.go index d2d858e8a06..34df2b6a5de 100644 --- a/cl/phase1/core/state/raw/getters.go +++ b/cl/phase1/core/state/raw/getters.go @@ -331,7 +331,7 @@ func (b *BeaconState) GetBlockRootAtSlot(slot uint64) (libcommon.Hash, error) { return libcommon.Hash{}, ErrGetBlockRootAtSlotFuture } if b.Slot() > slot+b.BeaconConfig().SlotsPerHistoricalRoot { - return libcommon.Hash{}, fmt.Errorf("GetBlockRootAtSlot: slot too much far behind") + return libcommon.Hash{}, errors.New("GetBlockRootAtSlot: slot too much far behind") } return b.blockRoots.Get(int(slot % b.BeaconConfig().SlotsPerHistoricalRoot)), nil } diff --git a/cl/phase1/execution_client/execution_client_direct.go b/cl/phase1/execution_client/execution_client_direct.go index 58eab549fd0..0881d14c89e 100644 --- a/cl/phase1/execution_client/execution_client_direct.go +++ b/cl/phase1/execution_client/execution_client_direct.go @@ -19,6 +19,7 @@ package execution_client import ( "context" "encoding/binary" + "errors" "fmt" "math/big" @@ -75,13 +76,13 @@ func (cc *ExecutionClientDirect) NewPayload(ctx context.Context, payload *cltype // check status switch status { case execution.ExecutionStatus_BadBlock, execution.ExecutionStatus_InvalidForkchoice: - return PayloadStatusInvalidated, fmt.Errorf("bad block") + return PayloadStatusInvalidated, errors.New("bad block") case execution.ExecutionStatus_Busy, execution.ExecutionStatus_MissingSegment, execution.ExecutionStatus_TooFarAway: return PayloadStatusNotValidated, nil case execution.ExecutionStatus_Success: return PayloadStatusValidated, nil } - return PayloadStatusNone, fmt.Errorf("unexpected status") + return PayloadStatusNone, errors.New("unexpected status") } func (cc *ExecutionClientDirect) ForkChoiceUpdate(ctx context.Context, finalized libcommon.Hash, head libcommon.Hash, attr *engine_types.PayloadAttributes) ([]byte, error) { @@ -90,10 +91,10 @@ func (cc *ExecutionClientDirect) ForkChoiceUpdate(ctx context.Context, finalized return nil, fmt.Errorf("execution Client RPC failed to retrieve ForkChoiceUpdate response, err: %w", err) } if status == execution.ExecutionStatus_InvalidForkchoice { - return nil, fmt.Errorf("forkchoice was invalid") + return nil, errors.New("forkchoice was invalid") } if status == execution.ExecutionStatus_BadBlock { - return nil, fmt.Errorf("bad block as forkchoice") + return nil, errors.New("bad block as forkchoice") } if attr == nil { return nil, nil diff --git a/cl/phase1/execution_client/execution_client_rpc.go b/cl/phase1/execution_client/execution_client_rpc.go index 154b3b9ce0a..4983e7704ff 100644 --- a/cl/phase1/execution_client/execution_client_rpc.go +++ b/cl/phase1/execution_client/execution_client_rpc.go @@ -18,6 +18,7 @@ package execution_client import ( "context" + "errors" "fmt" "math/big" "net/http" @@ -89,7 +90,7 @@ func (cc *ExecutionClientRpc) NewPayload(ctx context.Context, payload *cltypes.E case clparams.DenebVersion: engineMethod = rpc_helper.EngineNewPayloadV3 default: - return PayloadStatusNone, fmt.Errorf("invalid payload version") + return PayloadStatusNone, errors.New("invalid payload version") } request := engine_types.ExecutionPayload{ @@ -174,7 +175,7 @@ func (cc *ExecutionClientRpc) ForkChoiceUpdate(ctx context.Context, finalized li func checkPayloadStatus(payloadStatus *engine_types.PayloadStatus) error { if payloadStatus == nil { - return fmt.Errorf("empty payloadStatus") + return errors.New("empty payloadStatus") } validationError := payloadStatus.ValidationError diff --git a/cl/phase1/forkchoice/checkpoint_state.go b/cl/phase1/forkchoice/checkpoint_state.go index ecf9f147a61..f1c095d8bf0 100644 --- a/cl/phase1/forkchoice/checkpoint_state.go +++ b/cl/phase1/forkchoice/checkpoint_state.go @@ -17,6 +17,7 @@ package forkchoice import ( + "errors" "fmt" "github.com/erigontech/erigon/cl/cltypes/solid" @@ -135,7 +136,7 @@ func (c *checkpointState) getAttestingIndicies(attestation *solid.AttestationDat bitIndex := i % 8 sliceIndex := i / 8 if sliceIndex >= len(aggregationBits) { - return nil, fmt.Errorf("GetAttestingIndicies: committee is too big") + return nil, errors.New("GetAttestingIndicies: committee is too big") } if (aggregationBits[sliceIndex] & (1 << bitIndex)) > 0 { attestingIndices = append(attestingIndices, member) @@ -177,7 +178,7 @@ func (c *checkpointState) getDomain(domainType [4]byte, epoch uint64) ([]byte, e func (c *checkpointState) isValidIndexedAttestation(att *cltypes.IndexedAttestation) (bool, error) { inds := att.AttestingIndices if inds.Length() == 0 || !solid.IsUint64SortedSet(inds) { - return false, fmt.Errorf("isValidIndexedAttestation: attesting indices are not sorted or are null") + return false, errors.New("isValidIndexedAttestation: attesting indices are not sorted or are null") } pks := [][]byte{} @@ -206,7 +207,7 @@ func (c *checkpointState) isValidIndexedAttestation(att *cltypes.IndexedAttestat return false, fmt.Errorf("error while validating signature: %v", err) } if !valid { - return false, fmt.Errorf("invalid aggregate signature") + return false, errors.New("invalid aggregate signature") } return true, nil } diff --git a/cl/phase1/forkchoice/get_head.go b/cl/phase1/forkchoice/get_head.go index 793aacd239b..e002098a238 100644 --- a/cl/phase1/forkchoice/get_head.go +++ b/cl/phase1/forkchoice/get_head.go @@ -18,7 +18,7 @@ package forkchoice import ( "bytes" - "fmt" + "errors" "sort" libcommon "github.com/erigontech/erigon-lib/common" @@ -104,7 +104,7 @@ func (f *ForkChoiceStore) GetHead() (libcommon.Hash, uint64, error) { if len(children) == 0 { header, hasHeader := f.forkGraph.GetHeader(f.headHash) if !hasHeader { - return libcommon.Hash{}, 0, fmt.Errorf("no slot for head is stored") + return libcommon.Hash{}, 0, errors.New("no slot for head is stored") } f.headSlot = header.Slot return f.headHash, f.headSlot, nil diff --git a/cl/phase1/forkchoice/on_attestation.go b/cl/phase1/forkchoice/on_attestation.go index 036c5445bb3..d959200b7d3 100644 --- a/cl/phase1/forkchoice/on_attestation.go +++ b/cl/phase1/forkchoice/on_attestation.go @@ -17,7 +17,7 @@ package forkchoice import ( - "fmt" + "errors" "github.com/erigontech/erigon/cl/cltypes/solid" "github.com/erigontech/erigon/cl/phase1/core/state" @@ -26,7 +26,7 @@ import ( ) var ( - ErrIgnore = fmt.Errorf("ignore") + ErrIgnore = errors.New("ignore") ) // OnAttestation processes incoming attestations. @@ -101,7 +101,7 @@ func (f *ForkChoiceStore) verifyAttestationWithCheckpointState( } // Verify attestation signature. if targetState == nil { - return nil, fmt.Errorf("target state does not exist") + return nil, errors.New("target state does not exist") } // Now we need to find the attesting indicies. attestationIndicies, err = targetState.getAttestingIndicies( @@ -122,7 +122,7 @@ func (f *ForkChoiceStore) verifyAttestationWithCheckpointState( return nil, err } if !valid { - return nil, fmt.Errorf("invalid attestation") + return nil, errors.New("invalid attestation") } } return attestationIndicies, nil @@ -152,7 +152,7 @@ func (f *ForkChoiceStore) verifyAttestationWithState( return nil, err } if !valid { - return nil, fmt.Errorf("invalid attestation") + return nil, errors.New("invalid attestation") } } return attestationIndicies, nil @@ -227,23 +227,23 @@ func (f *ForkChoiceStore) ValidateOnAttestation(attestation *solid.Attestation) target := attestation.AttestantionData().Target() if target.Epoch() != f.computeEpochAtSlot(attestation.AttestantionData().Slot()) { - return fmt.Errorf("mismatching target epoch with slot data") + return errors.New("mismatching target epoch with slot data") } if _, has := f.forkGraph.GetHeader(target.BlockRoot()); !has { - return fmt.Errorf("target root is missing") + return errors.New("target root is missing") } if blockHeader, has := f.forkGraph.GetHeader(attestation.AttestantionData().BeaconBlockRoot()); !has || blockHeader.Slot > attestation.AttestantionData().Slot() { - return fmt.Errorf("bad attestation data") + return errors.New("bad attestation data") } // LMD vote must be consistent with FFG vote target targetSlot := f.computeStartSlotAtEpoch(target.Epoch()) ancestorRoot := f.Ancestor(attestation.AttestantionData().BeaconBlockRoot(), targetSlot) if ancestorRoot == (libcommon.Hash{}) { - return fmt.Errorf("could not retrieve ancestor") + return errors.New("could not retrieve ancestor") } if ancestorRoot != target.BlockRoot() { - return fmt.Errorf("ancestor root mismatches with target") + return errors.New("ancestor root mismatches with target") } return nil @@ -263,5 +263,5 @@ func (f *ForkChoiceStore) validateTargetEpochAgainstCurrentTime( if target.Epoch() == currentEpoch || target.Epoch() == previousEpoch { return nil } - return fmt.Errorf("verification of attestation against current time failed") + return errors.New("verification of attestation against current time failed") } diff --git a/cl/phase1/forkchoice/on_attester_slashing.go b/cl/phase1/forkchoice/on_attester_slashing.go index 5eb499aeee5..82a903a8417 100644 --- a/cl/phase1/forkchoice/on_attester_slashing.go +++ b/cl/phase1/forkchoice/on_attester_slashing.go @@ -17,6 +17,7 @@ package forkchoice import ( + "errors" "fmt" "github.com/Giulio2002/bls" @@ -38,7 +39,7 @@ func (f *ForkChoiceStore) OnAttesterSlashing(attesterSlashing *cltypes.AttesterS attestation1 := attesterSlashing.Attestation_1 attestation2 := attesterSlashing.Attestation_2 if !cltypes.IsSlashableAttestationData(attestation1.Data, attestation2.Data) { - return fmt.Errorf("attestation data is not slashable") + return errors.New("attestation data is not slashable") } var err error s := f.syncedDataManager.HeadState() @@ -50,7 +51,7 @@ func (f *ForkChoiceStore) OnAttesterSlashing(attesterSlashing *cltypes.AttesterS } } if s == nil { - return fmt.Errorf("no state accessible") + return errors.New("no state accessible") } attestation1PublicKeys, err := getIndexedAttestationPublicKeys(s, attestation1) if err != nil { @@ -81,7 +82,7 @@ func (f *ForkChoiceStore) OnAttesterSlashing(attesterSlashing *cltypes.AttesterS return fmt.Errorf("error while validating signature: %v", err) } if !valid { - return fmt.Errorf("invalid aggregate signature") + return errors.New("invalid aggregate signature") } // Verify validity of slashings (2) signingRoot, err = fork.ComputeSigningRoot(attestation2.Data, domain2) @@ -94,7 +95,7 @@ func (f *ForkChoiceStore) OnAttesterSlashing(attesterSlashing *cltypes.AttesterS return fmt.Errorf("error while validating signature: %v", err) } if !valid { - return fmt.Errorf("invalid aggregate signature") + return errors.New("invalid aggregate signature") } } @@ -120,7 +121,7 @@ func (f *ForkChoiceStore) OnAttesterSlashing(attesterSlashing *cltypes.AttesterS func getIndexedAttestationPublicKeys(b *state.CachingBeaconState, att *cltypes.IndexedAttestation) ([][]byte, error) { inds := att.AttestingIndices if inds.Length() == 0 || !solid.IsUint64SortedSet(inds) { - return nil, fmt.Errorf("isValidIndexedAttestation: attesting indices are not sorted or are null") + return nil, errors.New("isValidIndexedAttestation: attesting indices are not sorted or are null") } pks := make([][]byte, 0, inds.Length()) if err := solid.RangeErr[uint64](inds, func(_ int, v uint64, _ int) error { diff --git a/cl/phase1/forkchoice/on_block.go b/cl/phase1/forkchoice/on_block.go index b845e8c5522..881f1d5f000 100644 --- a/cl/phase1/forkchoice/on_block.go +++ b/cl/phase1/forkchoice/on_block.go @@ -18,6 +18,7 @@ package forkchoice import ( "context" + "errors" "fmt" "sort" "time" @@ -41,7 +42,7 @@ import ( const foreseenProposers = 16 -var ErrEIP4844DataNotAvailable = fmt.Errorf("EIP-4844 blob data is not available") +var ErrEIP4844DataNotAvailable = errors.New("EIP-4844 blob data is not available") func verifyKzgCommitmentsAgainstTransactions(cfg *clparams.BeaconChainConfig, block *cltypes.Eth1Block, kzgCommitments *solid.ListSSZ[*cltypes.KZGCommitment]) error { expectedBlobHashes := []common.Hash{} @@ -75,7 +76,7 @@ func (f *ForkChoiceStore) OnBlock(ctx context.Context, block *cltypes.SignedBeac return err } if f.Slot() < block.Block.Slot { - return fmt.Errorf("block is too early compared to current_slot") + return errors.New("block is too early compared to current_slot") } // Check that block is later than the finalized epoch slot (optimization to reduce calls to get_ancestor) finalizedSlot := f.computeStartSlotAtEpoch(f.finalizedCheckpoint.Load().(solid.Checkpoint).Epoch()) @@ -129,7 +130,7 @@ func (f *ForkChoiceStore) OnBlock(ctx context.Context, block *cltypes.SignedBeac if err := f.optimisticStore.InvalidateBlock(block.Block); err != nil { return fmt.Errorf("failed to remove block from optimistic store: %v", err) } - return fmt.Errorf("block is invalid") + return errors.New("block is invalid") case execution_client.PayloadStatusValidated: log.Trace("OnBlock: block is validated", "block", libcommon.Hash(blockRoot)) // remove from optimistic candidate diff --git a/cl/phase1/forkchoice/utils.go b/cl/phase1/forkchoice/utils.go index 468d30b3df2..5bd7eca9e09 100644 --- a/cl/phase1/forkchoice/utils.go +++ b/cl/phase1/forkchoice/utils.go @@ -17,7 +17,7 @@ package forkchoice import ( - "fmt" + "errors" "github.com/erigontech/erigon/cl/transition" @@ -124,7 +124,7 @@ func (f *ForkChoiceStore) getCheckpointState(checkpoint solid.Checkpoint) (*chec return nil, err } if baseState == nil { - return nil, fmt.Errorf("getCheckpointState: baseState not found in graph") + return nil, errors.New("getCheckpointState: baseState not found in graph") } // By default use the no change encoding to signal that there is no future epoch here. if baseState.Slot() < f.computeStartSlotAtEpoch(checkpoint.Epoch()) { diff --git a/cl/phase1/network/services/aggregate_and_proof_service.go b/cl/phase1/network/services/aggregate_and_proof_service.go index adaa543fbe4..74dde2aeee8 100644 --- a/cl/phase1/network/services/aggregate_and_proof_service.go +++ b/cl/phase1/network/services/aggregate_and_proof_service.go @@ -18,7 +18,7 @@ package services import ( "context" - "fmt" + "errors" "slices" "sync" "time" @@ -115,11 +115,11 @@ func (a *aggregateAndProofServiceImpl) ProcessMessage( // [REJECT] The committee index is within the expected range -- i.e. index < get_committee_count_per_slot(state, aggregate.data.target.epoch). committeeCountPerSlot := headState.CommitteeCount(target.Epoch()) if aggregateData.CommitteeIndex() >= committeeCountPerSlot { - return fmt.Errorf("invalid committee index in aggregate and proof") + return errors.New("invalid committee index in aggregate and proof") } // [REJECT] The aggregate attestation's epoch matches its target -- i.e. aggregate.data.target.epoch == compute_epoch_at_slot(aggregate.data.slot) if aggregateData.Target().Epoch() != epoch { - return fmt.Errorf("invalid target epoch in aggregate and proof") + return errors.New("invalid target epoch in aggregate and proof") } committee, err := headState.GetBeaconCommitee(slot, committeeIndex) if err != nil { @@ -128,14 +128,14 @@ func (a *aggregateAndProofServiceImpl) ProcessMessage( // [REJECT] The aggregator's validator index is within the committee -- i.e. aggregate_and_proof.aggregator_index in get_beacon_committee(state, aggregate.data.slot, index). if !slices.Contains(committee, aggregateAndProof.Message.AggregatorIndex) { - return fmt.Errorf("committee index not in committee") + return errors.New("committee index not in committee") } // [REJECT] The aggregate attestation's target block is an ancestor of the block named in the LMD vote -- i.e. get_checkpoint_block(store, aggregate.data.beacon_block_root, aggregate.data.target.epoch) == aggregate.data.target.root if a.forkchoiceStore.Ancestor( aggregateData.BeaconBlockRoot(), epoch*a.beaconCfg.SlotsPerEpoch, ) != target.BlockRoot() { - return fmt.Errorf("invalid target block") + return errors.New("invalid target block") } if a.test { return nil @@ -144,7 +144,7 @@ func (a *aggregateAndProofServiceImpl) ProcessMessage( // [REJECT] aggregate_and_proof.selection_proof selects the validator as an aggregator for the slot -- i.e. is_aggregator(state, aggregate.data.slot, index, aggregate_and_proof.selection_proof) returns True. if !state.IsAggregator(a.beaconCfg, uint64(len(committee)), committeeIndex, selectionProof) { log.Warn("receveived aggregate and proof from invalid aggregator") - return fmt.Errorf("invalid aggregate and proof") + return errors.New("invalid aggregate and proof") } attestingIndicies, err := headState.GetAttestingIndicies( aggregateAndProof.Message.Aggregate.AttestantionData(), @@ -183,7 +183,7 @@ func verifySignaturesOnAggregate( return err } if len(attestingIndicies) == 0 { - return fmt.Errorf("no attesting indicies") + return errors.New("no attesting indicies") } // [REJECT] The aggregate_and_proof.selection_proof is a valid signature of the aggregate.data.slot by the validator with index aggregate_and_proof.aggregator_index. if err := verifyAggregateAndProofSignature(s, aggregateAndProof.Message); err != nil { @@ -220,7 +220,7 @@ func verifyAggregateAndProofSignature( return err } if !valid { - return fmt.Errorf("invalid bls signature on aggregate and proof") + return errors.New("invalid bls signature on aggregate and proof") } return nil } @@ -246,7 +246,7 @@ func verifyAggregatorSignature( return err } if !valid { - return fmt.Errorf("invalid bls signature on aggregate and proof") + return errors.New("invalid bls signature on aggregate and proof") } return nil } @@ -266,7 +266,7 @@ func verifyAggregateMessageSignature( return err } if !valid { - return fmt.Errorf("invalid aggregate signature") + return errors.New("invalid aggregate signature") } return nil } diff --git a/cl/phase1/network/services/attestation_service.go b/cl/phase1/network/services/attestation_service.go index ad673507c51..6f30cc0dd74 100644 --- a/cl/phase1/network/services/attestation_service.go +++ b/cl/phase1/network/services/attestation_service.go @@ -101,7 +101,7 @@ func (s *attestationService) ProcessMessage(ctx context.Context, subnet *uint64, // [REJECT] The attestation is for the correct subnet -- i.e. compute_subnet_for_attestation(committees_per_slot, attestation.data.slot, index) == subnet_id subnetId := computeSubnetForAttestation(committeeCount, slot, committeeIndex, s.beaconCfg.SlotsPerEpoch, s.netCfg.AttestationSubnetCount) if subnet == nil || subnetId != *subnet { - return fmt.Errorf("wrong subnet") + return errors.New("wrong subnet") } // [IGNORE] attestation.data.slot is within the last ATTESTATION_PROPAGATION_SLOT_RANGE slots (within a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance) -- // i.e. attestation.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot >= attestation.data.slot (a client MAY queue future attestations for processing at the appropriate slot). @@ -111,7 +111,7 @@ func (s *attestationService) ProcessMessage(ctx context.Context, subnet *uint64, } // [REJECT] The attestation's epoch matches its target -- i.e. attestation.data.target.epoch == compute_epoch_at_slot(attestation.data.slot) if targetEpoch != slot/s.beaconCfg.SlotsPerEpoch { - return fmt.Errorf("epoch mismatch") + return errors.New("epoch mismatch") } // [REJECT] The number of aggregation bits matches the committee size -- i.e. len(aggregation_bits) == len(get_beacon_committee(state, attestation.data.slot, index)). beaconCommittee, err := s.forkchoiceStore.GetBeaconCommitee(slot, committeeIndex) @@ -144,14 +144,14 @@ func (s *attestationService) ProcessMessage(ctx context.Context, subnet *uint64, return ErrIgnore // Ignore if it is just an empty bitlist } if setBits != 1 { - return fmt.Errorf("attestation does not have exactly one participating validator") + return errors.New("attestation does not have exactly one participating validator") } // [IGNORE] There has been no other valid attestation seen on an attestation subnet that has an identical attestation.data.target.epoch and participating validator index. if err != nil { return err } if onBitIndex >= len(beaconCommittee) { - return fmt.Errorf("on bit index out of committee range") + return errors.New("on bit index out of committee range") } // mark the validator as seen vIndex := beaconCommittee[onBitIndex] @@ -179,7 +179,7 @@ func (s *attestationService) ProcessMessage(ctx context.Context, subnet *uint64, return err } else if !valid { log.Warn("lodestar: invalid signature", "signature", common.Bytes2Hex(signature[:]), "signningRoot", common.Bytes2Hex(signingRoot[:]), "pubKey", common.Bytes2Hex(pubKey[:])) - return fmt.Errorf("invalid signature") + return errors.New("invalid signature") } // [IGNORE] The block being voted for (attestation.data.beacon_block_root) has been seen (via both gossip and non-gossip sources) @@ -193,7 +193,7 @@ func (s *attestationService) ProcessMessage(ctx context.Context, subnet *uint64, // get_checkpoint_block(store, attestation.data.beacon_block_root, attestation.data.target.epoch) == attestation.data.target.root startSlotAtEpoch := targetEpoch * s.beaconCfg.SlotsPerEpoch if s.forkchoiceStore.Ancestor(root, startSlotAtEpoch) != att.AttestantionData().Target().BlockRoot() { - return fmt.Errorf("invalid target block") + return errors.New("invalid target block") } // [IGNORE] The current finalized_checkpoint is an ancestor of the block defined by attestation.data.beacon_block_root -- // i.e. get_checkpoint_block(store, attestation.data.beacon_block_root, store.finalized_checkpoint.epoch) == store.finalized_checkpoint.root diff --git a/cl/phase1/network/services/blob_sidecar_service.go b/cl/phase1/network/services/blob_sidecar_service.go index 56f14a434a8..f1e38addcf0 100644 --- a/cl/phase1/network/services/blob_sidecar_service.go +++ b/cl/phase1/network/services/blob_sidecar_service.go @@ -18,6 +18,7 @@ package services import ( "context" + "errors" "fmt" "sync" "time" @@ -86,7 +87,7 @@ func (b *blobSidecarService) ProcessMessage(ctx context.Context, subnetId *uint6 // [REJECT] The sidecar's index is consistent with MAX_BLOBS_PER_BLOCK -- i.e. blob_sidecar.index < MAX_BLOBS_PER_BLOCK. if msg.Index >= b.beaconCfg.MaxBlobsPerBlock { - return fmt.Errorf("blob index out of range") + return errors.New("blob index out of range") } sidecarSubnetIndex := msg.Index % b.beaconCfg.MaxBlobsPerBlock if sidecarSubnetIndex != *subnetId { @@ -148,7 +149,7 @@ func (b *blobSidecarService) verifyAndStoreBlobSidecar(headState *state.CachingB func (b *blobSidecarService) verifySidecarsSignature(headState *state.CachingBeaconState, header *cltypes.SignedBeaconBlockHeader) error { parentHeader, ok := b.forkchoiceStore.GetHeader(header.Header.ParentRoot) if !ok { - return fmt.Errorf("parent header not found") + return errors.New("parent header not found") } currentVersion := b.beaconCfg.GetCurrentStateVersion(parentHeader.Slot / b.beaconCfg.SlotsPerEpoch) forkVersion := b.beaconCfg.GetForkVersionByVersion(currentVersion) @@ -168,7 +169,7 @@ func (b *blobSidecarService) verifySidecarsSignature(headState *state.CachingBea return err } if !ok { - return fmt.Errorf("blob signature validation: signature not valid") + return errors.New("blob signature validation: signature not valid") } return nil } diff --git a/cl/phase1/network/services/bls_to_execution_change_service.go b/cl/phase1/network/services/bls_to_execution_change_service.go index b6170f11fd8..870bf1f0008 100644 --- a/cl/phase1/network/services/bls_to_execution_change_service.go +++ b/cl/phase1/network/services/bls_to_execution_change_service.go @@ -19,6 +19,7 @@ package services import ( "bytes" "context" + "errors" "fmt" libcommon "github.com/erigontech/erigon-lib/common" @@ -84,7 +85,7 @@ func (s *blsToExecutionChangeService) ProcessMessage(ctx context.Context, subnet // assert validator.withdrawal_credentials[:1] == BLS_WITHDRAWAL_PREFIX if wc[0] != byte(s.beaconCfg.BLSWithdrawalPrefixByte) { - return fmt.Errorf("invalid withdrawal credentials prefix") + return errors.New("invalid withdrawal credentials prefix") } // assert validator.withdrawal_credentials[1:] == hash(address_change.from_bls_pubkey)[1:] @@ -92,7 +93,7 @@ func (s *blsToExecutionChangeService) ProcessMessage(ctx context.Context, subnet // Check the validator's withdrawal credentials against the provided message. hashedFrom := utils.Sha256(change.From[:]) if !bytes.Equal(hashedFrom[1:], wc[1:]) { - return fmt.Errorf("invalid withdrawal credentials hash") + return errors.New("invalid withdrawal credentials hash") } // assert bls.Verify(address_change.from_bls_pubkey, signing_root, signed_address_change.signature) @@ -110,7 +111,7 @@ func (s *blsToExecutionChangeService) ProcessMessage(ctx context.Context, subnet return err } if !valid { - return fmt.Errorf("invalid signature") + return errors.New("invalid signature") } // validator.withdrawal_credentials = ( diff --git a/cl/phase1/network/services/proposer_slashing_service.go b/cl/phase1/network/services/proposer_slashing_service.go index 3aa5fc1ed45..7725a222e84 100644 --- a/cl/phase1/network/services/proposer_slashing_service.go +++ b/cl/phase1/network/services/proposer_slashing_service.go @@ -18,6 +18,7 @@ package services import ( "context" + "errors" "fmt" "github.com/erigontech/erigon/cl/beacon/synced_data" @@ -83,7 +84,7 @@ func (s *proposerSlashingService) ProcessMessage(ctx context.Context, subnet *ui // Verify the headers are different if *h1 == *h2 { - return fmt.Errorf("proposee slashing headers are the same") + return errors.New("proposee slashing headers are the same") } // Verify the proposer is slashable diff --git a/cl/phase1/network/services/sync_contribution_service.go b/cl/phase1/network/services/sync_contribution_service.go index ebf28547a0c..4f2cce13124 100644 --- a/cl/phase1/network/services/sync_contribution_service.go +++ b/cl/phase1/network/services/sync_contribution_service.go @@ -21,7 +21,6 @@ import ( "context" "encoding/binary" "errors" - "fmt" "slices" "sync" @@ -95,7 +94,7 @@ func (s *syncContributionService) ProcessMessage(ctx context.Context, subnet *ui // [REJECT] The subcommittee index is in the allowed range, i.e. contribution.subcommittee_index < SYNC_COMMITTEE_SUBNET_COUNT. if contributionAndProof.Contribution.SubcommitteeIndex >= clparams.MainnetBeaconConfig.SyncCommitteeSubnetCount { - return fmt.Errorf("subcommittee index is out of range") + return errors.New("subcommittee index is out of range") } aggregatorPubKey, err := headState.ValidatorPublicKey(int(contributionAndProof.AggregatorIndex)) @@ -114,18 +113,18 @@ func (s *syncContributionService) ProcessMessage(ctx context.Context, subnet *ui // [REJECT] The contribution has participants -- that is, any(contribution.aggregation_bits). if bytes.Equal(aggregationBits, make([]byte, len(aggregationBits))) { // check if the aggregation bits are all zeros - return fmt.Errorf("contribution has no participants") + return errors.New("contribution has no participants") } modulo := max(1, s.beaconCfg.SyncCommitteeSize/s.beaconCfg.SyncCommitteeSubnetCount/s.beaconCfg.TargetAggregatorsPerSyncSubcommittee) hashSignature := utils.Sha256(selectionProof[:]) if !s.test && binary.LittleEndian.Uint64(hashSignature[:8])%modulo != 0 { - return fmt.Errorf("selects the validator as an aggregator") + return errors.New("selects the validator as an aggregator") } // [REJECT] The aggregator's validator index is in the declared subcommittee of the current sync committee -- i.e. state.validators[contribution_and_proof.aggregator_index].pubkey in get_sync_subcommittee_pubkeys(state, contribution.subcommittee_index). if !slices.Contains(subcommiteePubsKeys, aggregatorPubKey) { - return fmt.Errorf("aggregator's validator index is not in subcommittee") + return errors.New("aggregator's validator index is not in subcommittee") } // [IGNORE] The sync committee contribution is the first valid contribution received for the aggregator with index contribution_and_proof.aggregator_index for the slot contribution.slot and subcommittee index contribution.subcommittee_index (this requires maintaining a cache of size SYNC_COMMITTEE_SIZE for this topic that can be flushed after each slot). @@ -236,7 +235,7 @@ func verifySyncContributionSelectionProof(st *state.CachingBeaconState, contribu return err } if !valid { - return fmt.Errorf("invalid selectionProof signature") + return errors.New("invalid selectionProof signature") } return nil } @@ -266,7 +265,7 @@ func verifySyncContributionProofAggregatedSignature(s *state.CachingBeaconState, } if !valid { - return fmt.Errorf("invalid signature for aggregate sync contribution") + return errors.New("invalid signature for aggregate sync contribution") } return nil } diff --git a/cl/phase1/network/services/voluntary_exit_service.go b/cl/phase1/network/services/voluntary_exit_service.go index c425f678cd1..a748a57f003 100644 --- a/cl/phase1/network/services/voluntary_exit_service.go +++ b/cl/phase1/network/services/voluntary_exit_service.go @@ -80,7 +80,7 @@ func (s *voluntaryExitService) ProcessMessage(ctx context.Context, subnet *uint6 // Verify the validator is active // assert is_active_validator(validator, get_current_epoch(state)) if !val.Active(curEpoch) { - return fmt.Errorf("validator is not active") + return errors.New("validator is not active") } // Verify exit has not been initiated @@ -92,13 +92,13 @@ func (s *voluntaryExitService) ProcessMessage(ctx context.Context, subnet *uint6 // Exits must specify an epoch when they become valid; they are not valid before then // assert get_current_epoch(state) >= voluntary_exit.epoch if !(curEpoch >= voluntaryExit.Epoch) { - return fmt.Errorf("exits must specify an epoch when they become valid; they are not valid before then") + return errors.New("exits must specify an epoch when they become valid; they are not valid before then") } // Verify the validator has been active long enough // assert get_current_epoch(state) >= validator.activation_epoch + SHARD_COMMITTEE_PERIOD if !(curEpoch >= val.ActivationEpoch()+s.beaconCfg.ShardCommitteePeriod) { - return fmt.Errorf("verify the validator has been active long enough") + return errors.New("verify the validator has been active long enough") } // Verify signature diff --git a/cl/phase1/stages/stage_history_download.go b/cl/phase1/stages/stage_history_download.go index c19ebad6f42..835a1f7cb6c 100644 --- a/cl/phase1/stages/stage_history_download.go +++ b/cl/phase1/stages/stage_history_download.go @@ -18,6 +18,7 @@ package stages import ( "context" + "errors" "fmt" "math" "sync/atomic" @@ -364,11 +365,11 @@ func downloadBlobHistoryWorker(cfg StageHistoryReconstructionCfg, ctx context.Co continue } if block.Signature != header.Signature { - return fmt.Errorf("signature mismatch beetwen blob and stored block") + return errors.New("signature mismatch beetwen blob and stored block") } return nil } - return fmt.Errorf("block not in batch") + return errors.New("block not in batch") }) if err != nil { rpc.BanPeer(blobs.Peer) diff --git a/cl/rpc/rpc.go b/cl/rpc/rpc.go index a72d8f5ecdf..22e29ab2c46 100644 --- a/cl/rpc/rpc.go +++ b/cl/rpc/rpc.go @@ -20,6 +20,7 @@ import ( "bytes" "context" "encoding/binary" + "errors" "fmt" "io" "time" @@ -105,7 +106,7 @@ func (b *BeaconRpcP2P) sendBlocksRequest(ctx context.Context, topic string, reqD } // Sanity check for message size. if encodedLn > uint64(maxMessageLength) { - return nil, message.Peer.Pid, fmt.Errorf("received message too big") + return nil, message.Peer.Pid, errors.New("received message too big") } // Read bytes using snappy into a new raw buffer of side encodedLn. @@ -122,7 +123,7 @@ func (b *BeaconRpcP2P) sendBlocksRequest(ctx context.Context, topic string, reqD // Fork digests respForkDigest := binary.BigEndian.Uint32(forkDigest) if respForkDigest == 0 { - return nil, message.Peer.Pid, fmt.Errorf("null fork digest") + return nil, message.Peer.Pid, errors.New("null fork digest") } version, err := b.ethClock.StateVersionByForkDigest(utils.Uint32ToBytes4(respForkDigest)) @@ -179,7 +180,7 @@ func (b *BeaconRpcP2P) sendBlobsSidecar(ctx context.Context, topic string, reqDa } // Sanity check for message size. if encodedLn > uint64(maxMessageLength) { - return nil, message.Peer.Pid, fmt.Errorf("received message too big") + return nil, message.Peer.Pid, errors.New("received message too big") } // Read bytes using snappy into a new raw buffer of side encodedLn. @@ -196,7 +197,7 @@ func (b *BeaconRpcP2P) sendBlobsSidecar(ctx context.Context, topic string, reqDa // Fork digests respForkDigest := binary.BigEndian.Uint32(forkDigest) if respForkDigest == 0 { - return nil, message.Peer.Pid, fmt.Errorf("null fork digest") + return nil, message.Peer.Pid, errors.New("null fork digest") } version, err := b.ethClock.StateVersionByForkDigest(utils.Uint32ToBytes4(respForkDigest)) diff --git a/cl/sentinel/communication/ssz_snappy/encoding.go b/cl/sentinel/communication/ssz_snappy/encoding.go index 5d841614f4f..54b587d47f3 100644 --- a/cl/sentinel/communication/ssz_snappy/encoding.go +++ b/cl/sentinel/communication/ssz_snappy/encoding.go @@ -20,6 +20,7 @@ import ( "bufio" "bytes" "encoding/binary" + "errors" "fmt" "io" "sync" @@ -88,7 +89,7 @@ func DecodeAndReadNoForkDigest(r io.Reader, val ssz.EncodableSSZ, version clpara return fmt.Errorf("unable to read varint from message prefix: %v", err) } if encodedLn > uint64(16*datasize.MB) { - return fmt.Errorf("payload too big") + return errors.New("payload too big") } sr := snappy.NewReader(r) diff --git a/cl/sentinel/discovery.go b/cl/sentinel/discovery.go index 53448906d0f..02a1fff5db3 100644 --- a/cl/sentinel/discovery.go +++ b/cl/sentinel/discovery.go @@ -18,7 +18,7 @@ package sentinel import ( "context" - "fmt" + "errors" "time" "github.com/libp2p/go-libp2p/core/network" @@ -42,7 +42,7 @@ func (s *Sentinel) ConnectWithPeer(ctx context.Context, info peer.AddrInfo) (err return nil } if s.peers.BanStatus(info.ID) { - return fmt.Errorf("refused to connect to bad peer") + return errors.New("refused to connect to bad peer") } ctxWithTimeout, cancel := context.WithTimeout(ctx, clparams.MaxDialTimeout) defer cancel() diff --git a/cl/sentinel/handlers/blobs_test.go b/cl/sentinel/handlers/blobs_test.go index 6465776cd69..12fee5be072 100644 --- a/cl/sentinel/handlers/blobs_test.go +++ b/cl/sentinel/handlers/blobs_test.go @@ -20,7 +20,7 @@ import ( "bytes" "context" "encoding/binary" - "fmt" + "errors" "io" "math" "testing" @@ -166,7 +166,7 @@ func TestBlobsByRangeHandler(t *testing.T) { // Fork digests respForkDigest := binary.BigEndian.Uint32(forkDigest) if respForkDigest == 0 { - require.NoError(t, fmt.Errorf("null fork digest")) + require.NoError(t, errors.New("null fork digest")) } version, err := ethClock.StateVersionByForkDigest(utils.Uint32ToBytes4(respForkDigest)) if err != nil { @@ -288,7 +288,7 @@ func TestBlobsByIdentifiersHandler(t *testing.T) { // Fork digests respForkDigest := binary.BigEndian.Uint32(forkDigest) if respForkDigest == 0 { - require.NoError(t, fmt.Errorf("null fork digest")) + require.NoError(t, errors.New("null fork digest")) } version, err := ethClock.StateVersionByForkDigest(utils.Uint32ToBytes4(respForkDigest)) if err != nil { diff --git a/cl/sentinel/peers/pool.go b/cl/sentinel/peers/pool.go index ad969212ad6..cfa9f9d8a36 100644 --- a/cl/sentinel/peers/pool.go +++ b/cl/sentinel/peers/pool.go @@ -17,7 +17,7 @@ package peers import ( - "fmt" + "errors" "sync" "sync/atomic" @@ -151,7 +151,7 @@ func (p *Pool) Request() (pid *Item, done func(), err error) { //grab a peer from our ringbuffer val, ok := p.queue.PopFront() if !ok { - return nil, nil, fmt.Errorf("no peers? ( :( > ") + return nil, nil, errors.New("no peers? ( :( > ") } return val, func() { p.mu.Lock() diff --git a/cl/sentinel/service/notifiers.go b/cl/sentinel/service/notifiers.go index 9a2c0a34d9a..5c35cb51693 100644 --- a/cl/sentinel/service/notifiers.go +++ b/cl/sentinel/service/notifiers.go @@ -17,7 +17,7 @@ package service import ( - "fmt" + "errors" "sync" ) @@ -58,7 +58,7 @@ func (g *gossipNotifier) addSubscriber() (chan gossipObject, int, error) { defer g.mu.Unlock() if len(g.notifiers) >= maxSubscribers { - return nil, -1, fmt.Errorf("too many subsribers, try again later") + return nil, -1, errors.New("too many subsribers, try again later") } ch := make(chan gossipObject, 1<<16) g.notifiers = append(g.notifiers, ch) @@ -70,7 +70,7 @@ func (g *gossipNotifier) removeSubscriber(id int) error { defer g.mu.Unlock() if len(g.notifiers) <= id { - return fmt.Errorf("invalid id, no subscription exist with this id") + return errors.New("invalid id, no subscription exist with this id") } close(g.notifiers[id]) g.notifiers = append(g.notifiers[:id], g.notifiers[id+1:]...) diff --git a/cl/sentinel/service/service.go b/cl/sentinel/service/service.go index 2084c1f8b11..798e31ca33a 100644 --- a/cl/sentinel/service/service.go +++ b/cl/sentinel/service/service.go @@ -19,6 +19,7 @@ package service import ( "bytes" "context" + "errors" "fmt" "io" "net/http" @@ -121,17 +122,17 @@ func (s *SentinelServer) PublishGossip(_ context.Context, msg *sentinelrpc.Gossi switch { case gossip.IsTopicBlobSidecar(msg.Name): if msg.SubnetId == nil { - return nil, fmt.Errorf("subnetId is required for blob sidecar") + return nil, errors.New("subnetId is required for blob sidecar") } subscription = manager.GetMatchingSubscription(gossip.TopicNameBlobSidecar(*msg.SubnetId)) case gossip.IsTopicSyncCommittee(msg.Name): if msg.SubnetId == nil { - return nil, fmt.Errorf("subnetId is required for sync_committee") + return nil, errors.New("subnetId is required for sync_committee") } subscription = manager.GetMatchingSubscription(gossip.TopicNameSyncCommittee(int(*msg.SubnetId))) case gossip.IsTopicBeaconAttestation(msg.Name): if msg.SubnetId == nil { - return nil, fmt.Errorf("subnetId is required for beacon attestation") + return nil, errors.New("subnetId is required for beacon attestation") } subscription = manager.GetMatchingSubscription(gossip.TopicNameBeaconAttestation(*msg.SubnetId)) default: @@ -368,7 +369,7 @@ func (s *SentinelServer) SetSubscribeExpiry(ctx context.Context, expiryReq *sent ) subs := s.sentinel.GossipManager().GetMatchingSubscription(topic) if subs == nil { - return nil, fmt.Errorf("no such subscription") + return nil, errors.New("no such subscription") } subs.OverwriteSubscriptionExpiry(expiryTime) return &sentinelrpc.EmptyMessage{}, nil diff --git a/cl/sentinel/utils.go b/cl/sentinel/utils.go index f108c91d257..4758815bd0d 100644 --- a/cl/sentinel/utils.go +++ b/cl/sentinel/utils.go @@ -18,6 +18,7 @@ package sentinel import ( "crypto/ecdsa" + "errors" "fmt" "net" "strings" @@ -41,11 +42,11 @@ func convertToInterfacePubkey(pubkey *ecdsa.PublicKey) (crypto.PubKey, error) { xVal, yVal := new(btcec.FieldVal), new(btcec.FieldVal) overflows := xVal.SetByteSlice(pubkey.X.Bytes()) if overflows { - return nil, fmt.Errorf("x value overflows") + return nil, errors.New("x value overflows") } overflows = yVal.SetByteSlice(pubkey.Y.Bytes()) if overflows { - return nil, fmt.Errorf("y value overflows") + return nil, errors.New("y value overflows") } newKey := crypto.PubKey((*crypto.Secp256k1PublicKey)(btcec.NewPublicKey(xVal, yVal))) // Zero out temporary values. @@ -85,7 +86,7 @@ func multiAddressBuilderWithID(ipAddr, protocol string, port uint, id peer.ID) ( return nil, fmt.Errorf("invalid ip address provided: %s", ipAddr) } if id.String() == "" { - return nil, fmt.Errorf("empty peer id given") + return nil, errors.New("empty peer id given") } if parsedIP.To4() != nil { return multiaddr.NewMultiaddr(fmt.Sprintf("/ip4/%s/%s/%d/p2p/%s", ipAddr, protocol, port, id.String())) diff --git a/cl/spectest/consensus_tests/operations.go b/cl/spectest/consensus_tests/operations.go index af719ecee74..a00bdd0179d 100644 --- a/cl/spectest/consensus_tests/operations.go +++ b/cl/spectest/consensus_tests/operations.go @@ -17,7 +17,7 @@ package consensus_tests import ( - "fmt" + "errors" "io/fs" "os" "testing" @@ -63,7 +63,7 @@ func operationAttestationHandler(t *testing.T, root fs.FS, c spectest.TestCase) return err } if expectedError { - return fmt.Errorf("expected error") + return errors.New("expected error") } haveRoot, err := preState.HashSSZ() require.NoError(t, err) @@ -93,7 +93,7 @@ func operationAttesterSlashingHandler(t *testing.T, root fs.FS, c spectest.TestC return err } if expectedError { - return fmt.Errorf("expected error") + return errors.New("expected error") } haveRoot, err := preState.HashSSZ() require.NoError(t, err) @@ -123,7 +123,7 @@ func operationProposerSlashingHandler(t *testing.T, root fs.FS, c spectest.TestC return err } if expectedError { - return fmt.Errorf("expected error") + return errors.New("expected error") } haveRoot, err := preState.HashSSZ() require.NoError(t, err) @@ -155,7 +155,7 @@ func operationBlockHeaderHandler(t *testing.T, root fs.FS, c spectest.TestCase) return err } if expectedError { - return fmt.Errorf("expected error") + return errors.New("expected error") } haveRoot, err := preState.HashSSZ() require.NoError(t, err) @@ -185,7 +185,7 @@ func operationDepositHandler(t *testing.T, root fs.FS, c spectest.TestCase) erro return err } if expectedError { - return fmt.Errorf("expected error") + return errors.New("expected error") } haveRoot, err := preState.HashSSZ() require.NoError(t, err) @@ -215,7 +215,7 @@ func operationSyncAggregateHandler(t *testing.T, root fs.FS, c spectest.TestCase return err } if expectedError { - return fmt.Errorf("expected error") + return errors.New("expected error") } haveRoot, err := preState.HashSSZ() require.NoError(t, err) @@ -245,7 +245,7 @@ func operationVoluntaryExitHandler(t *testing.T, root fs.FS, c spectest.TestCase return err } if expectedError { - return fmt.Errorf("expected error") + return errors.New("expected error") } haveRoot, err := preState.HashSSZ() require.NoError(t, err) @@ -275,7 +275,7 @@ func operationWithdrawalHandler(t *testing.T, root fs.FS, c spectest.TestCase) e return err } if expectedError { - return fmt.Errorf("expected error") + return errors.New("expected error") } haveRoot, err := preState.HashSSZ() require.NoError(t, err) @@ -305,7 +305,7 @@ func operationSignedBlsChangeHandler(t *testing.T, root fs.FS, c spectest.TestCa return err } if expectedError { - return fmt.Errorf("expected error") + return errors.New("expected error") } haveRoot, err := preState.HashSSZ() require.NoError(t, err) diff --git a/cl/transition/impl/eth2/operations.go b/cl/transition/impl/eth2/operations.go index 3d9dab984f5..a0328bfda6d 100644 --- a/cl/transition/impl/eth2/operations.go +++ b/cl/transition/impl/eth2/operations.go @@ -63,7 +63,7 @@ func (I *impl) ProcessProposerSlashing( } if *h1 == *h2 { - return fmt.Errorf("proposee slashing headers are the same") + return errors.New("proposee slashing headers are the same") } proposer, err := s.ValidatorForValidatorIndex(int(h1.ProposerIndex)) @@ -125,7 +125,7 @@ func (I *impl) ProcessAttesterSlashing( return fmt.Errorf("error calculating indexed attestation 1 validity: %v", err) } if !valid { - return fmt.Errorf("invalid indexed attestation 1") + return errors.New("invalid indexed attestation 1") } valid, err = state.IsValidIndexedAttestation(s, att2) @@ -133,7 +133,7 @@ func (I *impl) ProcessAttesterSlashing( return fmt.Errorf("error calculating indexed attestation 2 validity: %v", err) } if !valid { - return fmt.Errorf("invalid indexed attestation 2") + return errors.New("invalid indexed attestation 2") } slashedAny := false @@ -158,7 +158,7 @@ func (I *impl) ProcessAttesterSlashing( } if !slashedAny { - return fmt.Errorf("no validators slashed") + return errors.New("no validators slashed") } return nil } @@ -186,7 +186,7 @@ func (I *impl) ProcessDeposit(s abstract.BeaconState, deposit *cltypes.Deposit) depositIndex, eth1Data.Root, ) { - return fmt.Errorf("processDepositForAltair: Could not validate deposit root") + return errors.New("processDepositForAltair: Could not validate deposit root") } // Increment index @@ -360,7 +360,7 @@ func (I *impl) ProcessWithdrawals( func (I *impl) ProcessExecutionPayload(s abstract.BeaconState, parentHash, prevRandao common.Hash, time uint64, payloadHeader *cltypes.Eth1Header) error { if state.IsMergeTransitionComplete(s) { if parentHash != s.LatestExecutionPayloadHeader().BlockHash { - return fmt.Errorf("ProcessExecutionPayload: invalid eth1 chain. mismatching parent") + return errors.New("ProcessExecutionPayload: invalid eth1 chain. mismatching parent") } } if prevRandao != s.GetRandaoMixes(state.Epoch(s)) { @@ -371,7 +371,7 @@ func (I *impl) ProcessExecutionPayload(s abstract.BeaconState, parentHash, prevR ) } if time != state.ComputeTimestampAtSlot(s, s.Slot()) { - return fmt.Errorf("ProcessExecutionPayload: invalid Eth1 timestamp") + return errors.New("ProcessExecutionPayload: invalid Eth1 timestamp") } s.SetLatestExecutionPayloadHeader(payloadHeader) return nil @@ -490,13 +490,13 @@ func (I *impl) ProcessBlsToExecutionChange( if I.FullValidation { // Check the validator's withdrawal credentials prefix. if wc[0] != byte(beaconConfig.BLSWithdrawalPrefixByte) { - return fmt.Errorf("invalid withdrawal credentials prefix") + return errors.New("invalid withdrawal credentials prefix") } // Check the validator's withdrawal credentials against the provided message. hashedFrom := utils.Sha256(change.From[:]) if !bytes.Equal(hashedFrom[1:], wc[1:]) { - return fmt.Errorf("invalid withdrawal credentials") + return errors.New("invalid withdrawal credentials") } // Compute the signing domain and verify the message signature. @@ -517,7 +517,7 @@ func (I *impl) ProcessBlsToExecutionChange( return err } if !valid { - return fmt.Errorf("invalid signature") + return errors.New("invalid signature") } } credentials := wc @@ -658,7 +658,7 @@ func (I *impl) processAttestationPhase0( } if len(committee) != utils.GetBitlistLength(attestation.AggregationBits()) { - return nil, fmt.Errorf("processAttestationPhase0: mismatching aggregation bits size") + return nil, errors.New("processAttestationPhase0: mismatching aggregation bits size") } // Cached so it is performant. proposerIndex, err := s.GetBeaconProposerIndex() @@ -677,12 +677,12 @@ func (I *impl) processAttestationPhase0( // Depending of what slot we are on we put in either the current justified or previous justified. if isCurrentAttestation { if !data.Source().Equal(s.CurrentJustifiedCheckpoint()) { - return nil, fmt.Errorf("processAttestationPhase0: mismatching sources") + return nil, errors.New("processAttestationPhase0: mismatching sources") } s.AddCurrentEpochAtteastation(pendingAttestation) } else { if !data.Source().Equal(s.PreviousJustifiedCheckpoint()) { - return nil, fmt.Errorf("processAttestationPhase0: mismatching sources") + return nil, errors.New("processAttestationPhase0: mismatching sources") } s.AddPreviousEpochAttestation(pendingAttestation) } diff --git a/cl/transition/impl/eth2/validation.go b/cl/transition/impl/eth2/validation.go index 7f5c8c25445..05348d735ec 100644 --- a/cl/transition/impl/eth2/validation.go +++ b/cl/transition/impl/eth2/validation.go @@ -17,6 +17,7 @@ package eth2 import ( + "errors" "fmt" "github.com/Giulio2002/bls" @@ -49,7 +50,7 @@ func (I *impl) VerifyBlockSignature(s abstract.BeaconState, block *cltypes.Signe return fmt.Errorf("error validating block signature: %v", err) } if !valid { - return fmt.Errorf("block not valid") + return errors.New("block not valid") } return nil } diff --git a/cl/validator/committee_subscription/committee_subscription.go b/cl/validator/committee_subscription/committee_subscription.go index 1f6859eec83..20841436bac 100644 --- a/cl/validator/committee_subscription/committee_subscription.go +++ b/cl/validator/committee_subscription/committee_subscription.go @@ -18,6 +18,7 @@ package committee_subscription import ( "context" + "errors" "fmt" "sync" "time" @@ -37,13 +38,13 @@ import ( ) var ( - ErrIgnore = fmt.Errorf("ignore") - ErrCommitteeIndexOutOfRange = fmt.Errorf("committee index out of range") - ErrWrongSubnet = fmt.Errorf("attestation is for the wrong subnet") + ErrIgnore = errors.New("ignore") + ErrCommitteeIndexOutOfRange = errors.New("committee index out of range") + ErrWrongSubnet = errors.New("attestation is for the wrong subnet") ErrNotInPropagationRange = fmt.Errorf("attestation is not in propagation range. %w", ErrIgnore) - ErrEpochMismatch = fmt.Errorf("epoch mismatch") - ErrExactlyOneBitSet = fmt.Errorf("exactly one aggregation bit should be set") - ErrAggregationBitsMismatch = fmt.Errorf("aggregation bits mismatch committee size") + ErrEpochMismatch = errors.New("epoch mismatch") + ErrExactlyOneBitSet = errors.New("exactly one aggregation bit should be set") + ErrAggregationBitsMismatch = errors.New("aggregation bits mismatch committee size") ) type CommitteeSubscribeMgmt struct { @@ -99,7 +100,7 @@ func (c *CommitteeSubscribeMgmt) AddAttestationSubscription(ctx context.Context, ) headState := c.syncedData.HeadState() if headState == nil { - return fmt.Errorf("head state not available") + return errors.New("head state not available") } log.Debug("Add attestation subscription", "slot", slot, "committeeIndex", cIndex, "isAggregator", p.IsAggregator, "validatorIndex", p.ValidatorIndex) diff --git a/cmd/capcli/cli.go b/cmd/capcli/cli.go index 447848cb1b5..a89ddabb9e1 100644 --- a/cmd/capcli/cli.go +++ b/cmd/capcli/cli.go @@ -19,6 +19,7 @@ package main import ( "context" "encoding/json" + "errors" "fmt" "io" "math" @@ -208,9 +209,9 @@ func (c *ChainEndpoint) Run(ctx *Context) error { } log.Info("Hooked", "uri", baseUri) // Let's fetch the head first - currentBlock, err := core.RetrieveBlock(ctx, beaconConfig, fmt.Sprintf("%s/head", baseUri), nil) + currentBlock, err := core.RetrieveBlock(ctx, beaconConfig, baseUri+"/head", nil) if err != nil { - return fmt.Errorf("failed to retrieve head: %w, uri: %s", err, fmt.Sprintf("%s/head", baseUri)) + return fmt.Errorf("failed to retrieve head: %w, uri: %s", err, baseUri+"/head") } currentRoot, err := currentBlock.Block.HashSSZ() if err != nil { @@ -388,7 +389,7 @@ func (c *CheckSnapshots) Run(ctx *Context) error { if genesisHeader == nil { log.Warn("beaconIndices up to", "block", to, "caplinSnapIndexMax", csn.IndicesMax()) - return fmt.Errorf("genesis header is nil") + return errors.New("genesis header is nil") } previousBlockRoot, err := genesisHeader.Header.HashSSZ() if err != nil { @@ -601,7 +602,7 @@ type ArchiveSanitizer struct { func getHead(beaconApiURL string) (uint64, error) { headResponse := map[string]interface{}{} - req, err := http.NewRequest("GET", fmt.Sprintf("%s/eth/v2/debug/beacon/heads", beaconApiURL), nil) + req, err := http.NewRequest("GET", beaconApiURL+"/eth/v2/debug/beacon/heads", nil) if err != nil { return 0, err } @@ -616,12 +617,12 @@ func getHead(beaconApiURL string) (uint64, error) { } data := headResponse["data"].([]interface{}) if len(data) == 0 { - return 0, fmt.Errorf("no head found") + return 0, errors.New("no head found") } head := data[0].(map[string]interface{}) slotStr, ok := head["slot"].(string) if !ok { - return 0, fmt.Errorf("no slot found") + return 0, errors.New("no slot found") } slot, err := strconv.ParseUint(slotStr, 10, 64) if err != nil { @@ -650,7 +651,7 @@ func getStateRootAtSlot(beaconApiURL string, slot uint64) (libcommon.Hash, error } data := response["data"].(map[string]interface{}) if len(data) == 0 { - return libcommon.Hash{}, fmt.Errorf("no head found") + return libcommon.Hash{}, errors.New("no head found") } rootStr := data["root"].(string) @@ -781,8 +782,8 @@ func (b *BenchmarkNode) Run(ctx *Context) error { for i := uint64(startSlot); i < headSlot; i += uint64(interval) { uri := b.BaseURL + b.Endpoint - uri = strings.Replace(uri, "{slot}", fmt.Sprintf("%d", i), 1) - uri = strings.Replace(uri, "{epoch}", fmt.Sprintf("%d", i/beaconConfig.SlotsPerEpoch), 1) + uri = strings.Replace(uri, "{slot}", strconv.FormatUint(i, 10), 1) + uri = strings.Replace(uri, "{epoch}", strconv.FormatUint(i/beaconConfig.SlotsPerEpoch, 10), 1) elapsed, err := timeRequest(uri, b.Accept, b.Method, b.Body) if err != nil { log.Warn("Failed to benchmark", "error", err, "uri", uri) diff --git a/cmd/commitment-prefix/main.go b/cmd/commitment-prefix/main.go index e192351d857..c4ae16623b1 100644 --- a/cmd/commitment-prefix/main.go +++ b/cmd/commitment-prefix/main.go @@ -17,12 +17,14 @@ package main import ( + "errors" "flag" "fmt" "io" "os" "path" "path/filepath" + "strconv" "sync" "github.com/c2h5oh/datasize" @@ -106,7 +108,7 @@ func proceedFiles(files []string) { panic(err) } } - outPath := path.Join(dir, fmt.Sprintf("%s.html", "analysis")) + outPath := path.Join(dir, "analysis.html") fmt.Printf("rendering total graph to %s\n", outPath) f, err := os.Create(outPath) @@ -184,7 +186,7 @@ func extractKVPairFromCompressed(filename string, keysSink chan commitment.Branc for getter.HasNext() { key, _ := getter.Next(nil) if !getter.HasNext() { - return fmt.Errorf("invalid key/value pair during decompression") + return errors.New("invalid key/value pair during decompression") } val, afterValPos := getter.Next(nil) cpair++ @@ -245,7 +247,7 @@ func processCommitmentFile(fpath string) (*overallStat, error) { func prefixLenCountChart(fname string, data *overallStat) *charts.Pie { items := make([]opts.PieData, 0) for prefSize, count := range data.prefCount { - items = append(items, opts.PieData{Name: fmt.Sprintf("%d", prefSize), Value: count}) + items = append(items, opts.PieData{Name: strconv.FormatUint(prefSize, 10), Value: count}) } pie := charts.NewPie() @@ -268,7 +270,7 @@ func fileContentsMapChart(fileName string, data *overallStat) *charts.TreeMap { TreeMap[keysIndex].Children = make([]opts.TreeMapNode, 0) for prefSize, stat := range data.prefixes { TreeMap[keysIndex].Children = append(TreeMap[keysIndex].Children, opts.TreeMapNode{ - Name: fmt.Sprintf("%d", prefSize), + Name: strconv.FormatUint(prefSize, 10), Value: int(stat.KeySize), }) } diff --git a/cmd/devnet/contracts/steps/l1l2transfers.go b/cmd/devnet/contracts/steps/l1l2transfers.go index 0a6d05d3558..08b0ffd0416 100644 --- a/cmd/devnet/contracts/steps/l1l2transfers.go +++ b/cmd/devnet/contracts/steps/l1l2transfers.go @@ -20,6 +20,7 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" "math" "math/big" @@ -132,7 +133,7 @@ func GenerateSyncEvents(ctx context.Context, senderName string, numberOfTransfer } if !sendConfirmed { - return fmt.Errorf("No post sync log received") + return errors.New("No post sync log received") } auth.Nonce = (&big.Int{}).Add(auth.Nonce, big.NewInt(1)) diff --git a/cmd/devnet/devnetutils/utils.go b/cmd/devnet/devnetutils/utils.go index 6e89b5ee563..2a475b1fae4 100644 --- a/cmd/devnet/devnetutils/utils.go +++ b/cmd/devnet/devnetutils/utils.go @@ -132,7 +132,7 @@ func RandomInt(max int) int { func NamespaceAndSubMethodFromMethod(method string) (string, string, error) { parts := strings.SplitN(method, "_", 2) if len(parts) != 2 { - return "", "", fmt.Errorf("invalid string to split") + return "", "", errors.New("invalid string to split") } return parts[0], parts[1], nil } diff --git a/cmd/devnet/scenarios/errors.go b/cmd/devnet/scenarios/errors.go index 1aabb477cdb..2e7ce7ab0f1 100644 --- a/cmd/devnet/scenarios/errors.go +++ b/cmd/devnet/scenarios/errors.go @@ -16,10 +16,13 @@ package scenarios -import "fmt" +import ( + "errors" + "fmt" +) // ErrUndefined is returned in case if step definition was not found -var ErrUndefined = fmt.Errorf("step is undefined") +var ErrUndefined = errors.New("step is undefined") type ScenarioError struct { error diff --git a/cmd/devnet/services/accounts/faucet.go b/cmd/devnet/services/accounts/faucet.go index daf0233a754..f5a5c8c6b62 100644 --- a/cmd/devnet/services/accounts/faucet.go +++ b/cmd/devnet/services/accounts/faucet.go @@ -18,6 +18,7 @@ package accounts import ( "context" + "errors" "fmt" "math/big" "strings" @@ -173,7 +174,7 @@ func (f *Faucet) Send(ctx context.Context, destination *accounts.Account, eth fl } if f.transactOpts == nil { - return nil, libcommon.Hash{}, fmt.Errorf("faucet not initialized") + return nil, libcommon.Hash{}, errors.New("faucet not initialized") } node := devnet.SelectNode(ctx) diff --git a/cmd/devnet/services/polygon/checkpoint.go b/cmd/devnet/services/polygon/checkpoint.go index c1f9b7b1c6d..cbf809e4436 100644 --- a/cmd/devnet/services/polygon/checkpoint.go +++ b/cmd/devnet/services/polygon/checkpoint.go @@ -61,7 +61,7 @@ func (c CheckpointBlock) GetSignBytes() ([]byte, error) { } return sdk.SortJSON(b)*/ - return nil, fmt.Errorf("TODO") + return nil, errors.New("TODO") } type CheckpointAck struct { @@ -588,7 +588,7 @@ func (h *Heimdall) handleRootHeaderBlock(event *contracts.TestRootChainNewHeader if ack.StartBlock != h.pendingCheckpoint.StartBlock().Uint64() { h.logger.Error("Invalid start block", "startExpected", h.pendingCheckpoint.StartBlock, "startReceived", ack.StartBlock) - return fmt.Errorf("invalid Checkpoint Ack: Invalid start block") + return errors.New("invalid Checkpoint Ack: Invalid start block") } // Return err if start and end matches but contract root hash doesn't match @@ -603,7 +603,7 @@ func (h *Heimdall) handleRootHeaderBlock(event *contracts.TestRootChainNewHeader "rootRecieved", ack.RootHash.String(), ) - return fmt.Errorf("invalid Checkpoint Ack: Invalid root hash") + return errors.New("invalid Checkpoint Ack: Invalid root hash") } h.latestCheckpoint = &ack diff --git a/cmd/devnet/services/polygon/heimdall.go b/cmd/devnet/services/polygon/heimdall.go index 018a2cd0d15..814d5ebf3c4 100644 --- a/cmd/devnet/services/polygon/heimdall.go +++ b/cmd/devnet/services/polygon/heimdall.go @@ -20,7 +20,6 @@ import ( "context" "encoding/json" "errors" - "fmt" "math/big" "net" "net/http" @@ -178,7 +177,7 @@ func (h *Heimdall) FetchSpan(ctx context.Context, spanID uint64) (*heimdall.Span nextSpan.StartBlock = 1 //256 } else { if spanID != uint64(h.currentSpan.Id+1) { - return nil, fmt.Errorf("can't initialize span: non consecutive span") + return nil, errors.New("can't initialize span: non consecutive span") } nextSpan.StartBlock = h.currentSpan.EndBlock + 1 @@ -202,7 +201,7 @@ func (h *Heimdall) FetchSpan(ctx context.Context, spanID uint64) (*heimdall.Span } func (h *Heimdall) FetchLatestSpan(ctx context.Context) (*heimdall.Span, error) { - return nil, fmt.Errorf("TODO") + return nil, errors.New("TODO") } func (h *Heimdall) currentSprintLength() int { @@ -220,47 +219,47 @@ func (h *Heimdall) getSpanOverrideHeight() uint64 { } func (h *Heimdall) FetchCheckpoint(ctx context.Context, number int64) (*heimdall.Checkpoint, error) { - return nil, fmt.Errorf("TODO") + return nil, errors.New("TODO") } func (h *Heimdall) FetchCheckpointCount(ctx context.Context) (int64, error) { - return 0, fmt.Errorf("TODO") + return 0, errors.New("TODO") } func (h *Heimdall) FetchCheckpoints(ctx context.Context, page uint64, limit uint64) ([]*heimdall.Checkpoint, error) { - return nil, fmt.Errorf("TODO") + return nil, errors.New("TODO") } func (h *Heimdall) FetchMilestone(ctx context.Context, number int64) (*heimdall.Milestone, error) { - return nil, fmt.Errorf("TODO") + return nil, errors.New("TODO") } func (h *Heimdall) FetchMilestoneCount(ctx context.Context) (int64, error) { - return 0, fmt.Errorf("TODO") + return 0, errors.New("TODO") } func (h *Heimdall) FetchFirstMilestoneNum(ctx context.Context) (int64, error) { - return 0, fmt.Errorf("TODO") + return 0, errors.New("TODO") } func (h *Heimdall) FetchNoAckMilestone(ctx context.Context, milestoneID string) error { - return fmt.Errorf("TODO") + return errors.New("TODO") } func (h *Heimdall) FetchLastNoAckMilestone(ctx context.Context) (string, error) { - return "", fmt.Errorf("TODO") + return "", errors.New("TODO") } func (h *Heimdall) FetchMilestoneID(ctx context.Context, milestoneID string) error { - return fmt.Errorf("TODO") + return errors.New("TODO") } func (h *Heimdall) FetchStateSyncEvents(ctx context.Context, fromID uint64, to time.Time, limit int) ([]*heimdall.EventRecordWithTime, error) { - return nil, fmt.Errorf("TODO") + return nil, errors.New("TODO") } func (h *Heimdall) FetchStateSyncEvent(ctx context.Context, id uint64) (*heimdall.EventRecordWithTime, error) { - return nil, fmt.Errorf("TODO") + return nil, errors.New("TODO") } func (h *Heimdall) Close() { diff --git a/cmd/devnet/services/polygon/proofgenerator.go b/cmd/devnet/services/polygon/proofgenerator.go index 4c8fb7c12d8..4fd7d98e3f5 100644 --- a/cmd/devnet/services/polygon/proofgenerator.go +++ b/cmd/devnet/services/polygon/proofgenerator.go @@ -83,7 +83,7 @@ func (pg *ProofGenerator) GenerateExitPayload(ctx context.Context, burnTxHash li logger := devnet.Logger(ctx) if pg.heimdall == nil || pg.heimdall.rootChainBinding == nil { - return nil, fmt.Errorf("ProofGenerator not initialized") + return nil, errors.New("ProofGenerator not initialized") } logger.Info("Checking for checkpoint status", "hash", burnTxHash) @@ -95,7 +95,7 @@ func (pg *ProofGenerator) GenerateExitPayload(ctx context.Context, burnTxHash li } if !isCheckpointed { - return nil, fmt.Errorf("eurn transaction has not been checkpointed yet") + return nil, errors.New("eurn transaction has not been checkpointed yet") } // build payload for exit @@ -106,11 +106,11 @@ func (pg *ProofGenerator) GenerateExitPayload(ctx context.Context, burnTxHash li return nil, fmt.Errorf("block not included: %w", err) } - return nil, fmt.Errorf("null receipt received") + return nil, errors.New("null receipt received") } if len(result) == 0 { - return nil, fmt.Errorf("null result received") + return nil, errors.New("null result received") } return result, nil @@ -165,11 +165,11 @@ func (pg *ProofGenerator) buildPayloadForExit(ctx context.Context, burnTxHash li node := devnet.SelectBlockProducer(ctx) if node == nil { - return nil, fmt.Errorf("no node available") + return nil, errors.New("no node available") } if index < 0 { - return nil, fmt.Errorf("index must not negative") + return nil, errors.New("index must not negative") } var receipt *types.Receipt @@ -183,7 +183,7 @@ func (pg *ProofGenerator) buildPayloadForExit(ctx context.Context, burnTxHash li } if lastChildBlockNum < txBlockNum { - return nil, fmt.Errorf("burn transaction has not been checkpointed as yet") + return nil, errors.New("burn transaction has not been checkpointed as yet") } // step 2- get transaction receipt from txhash and @@ -248,7 +248,7 @@ func (pg *ProofGenerator) buildPayloadForExit(ctx context.Context, burnTxHash li } if logIndex < 0 { - return nil, fmt.Errorf("log not found in receipt") + return nil, errors.New("log not found in receipt") } parentNodesBytes, err := rlp.EncodeToBytes(receiptProof.parentNodes) @@ -329,7 +329,7 @@ func getReceiptProof(ctx context.Context, node requests.RequestGenerator, receip result, parents, ok := receiptsTrie.FindPath(path) if !ok { - return nil, fmt.Errorf("node does not contain the key") + return nil, errors.New("node does not contain the key") } var nodeValue any diff --git a/cmd/devnet/services/polygon/proofgenerator_test.go b/cmd/devnet/services/polygon/proofgenerator_test.go index ecc01979643..9e5eaa5aeed 100644 --- a/cmd/devnet/services/polygon/proofgenerator_test.go +++ b/cmd/devnet/services/polygon/proofgenerator_test.go @@ -20,6 +20,7 @@ import ( "bytes" "context" "crypto/ecdsa" + "errors" "fmt" "math" "math/big" @@ -200,7 +201,7 @@ func (rg *requestGenerator) GetTransactionReceipt(ctx context.Context, hash libc } } - return nil, fmt.Errorf("tx not found in block") + return nil, errors.New("tx not found in block") } type blockReader struct { @@ -213,7 +214,7 @@ func (reader blockReader) BlockByNumber(ctx context.Context, db kv.Tx, number ui return reader.chain.Blocks[number], nil } - return nil, fmt.Errorf("block not found") + return nil, errors.New("block not found") } func (reader blockReader) HeaderByNumber(ctx context.Context, txn kv.Getter, blockNum uint64) (*types.Header, error) { @@ -221,7 +222,7 @@ func (reader blockReader) HeaderByNumber(ctx context.Context, txn kv.Getter, blo return reader.chain.Headers[blockNum], nil } - return nil, fmt.Errorf("header not found") + return nil, errors.New("header not found") } func TestMerkle(t *testing.T) { diff --git a/cmd/devnet/transactions/block.go b/cmd/devnet/transactions/block.go index 47eb2a2bfab..30e59c48fe5 100644 --- a/cmd/devnet/transactions/block.go +++ b/cmd/devnet/transactions/block.go @@ -18,6 +18,7 @@ package transactions import ( "context" + "errors" "fmt" "time" @@ -67,7 +68,7 @@ func searchBlockForHashes( logger := devnet.Logger(ctx) if len(hashmap) == 0 { - return nil, fmt.Errorf("no hashes to search for") + return nil, errors.New("no hashes to search for") } txToBlock := make(map[libcommon.Hash]uint64, len(hashmap)) @@ -76,7 +77,7 @@ func searchBlockForHashes( // get a block from the new heads channel if headsSub == nil { - return nil, fmt.Errorf("no block heads subscription") + return nil, errors.New("no block heads subscription") } var blockCount int @@ -104,7 +105,7 @@ func searchBlockForHashes( logger.Error("Missing Tx", "txHash", h) } - return nil, fmt.Errorf("timeout when searching for tx") + return nil, errors.New("timeout when searching for tx") } } } diff --git a/cmd/diag/ui/ui.go b/cmd/diag/ui/ui.go index e8c7532f781..434251f7fac 100644 --- a/cmd/diag/ui/ui.go +++ b/cmd/diag/ui/ui.go @@ -128,8 +128,8 @@ func runUI(cli *cli.Context) error { } }() - uiUrl := fmt.Sprintf("http://%s", listenUrl) - fmt.Println(text.Hyperlink(uiUrl, fmt.Sprintf("UI running on %s", uiUrl))) + uiUrl := "http://" + listenUrl + fmt.Println(text.Hyperlink(uiUrl, "UI running on "+uiUrl)) wg.Wait() // Wait for the server goroutine to finish return nil diff --git a/cmd/diag/util/util.go b/cmd/diag/util/util.go index 1f571098773..a2077c1d900 100644 --- a/cmd/diag/util/util.go +++ b/cmd/diag/util/util.go @@ -19,6 +19,7 @@ package util import ( "context" "encoding/json" + "errors" "fmt" "io" "net/http" @@ -43,7 +44,7 @@ func MakeHttpGetCall(ctx context.Context, url string, data interface{}) error { resp, err := client.Do(req) if err != nil { if strings.Contains(err.Error(), "connection refused") { - return fmt.Errorf("it looks like the Erigon node is not running, is running incorrectly, or you have specified the wrong diagnostics URL. If you run the Erigon node with the '--diagnostics.endpoint.addr' or '--diagnostics.endpoint.port' flags, you must also specify the '--debug.addr' flag with the same address and port") + return errors.New("it looks like the Erigon node is not running, is running incorrectly, or you have specified the wrong diagnostics URL. If you run the Erigon node with the '--diagnostics.endpoint.addr' or '--diagnostics.endpoint.port' flags, you must also specify the '--debug.addr' flag with the same address and port") } return err } @@ -57,7 +58,7 @@ func MakeHttpGetCall(ctx context.Context, url string, data interface{}) error { err = json.Unmarshal(body, &data) if err != nil { if err.Error() == "invalid character 'p' after top-level value" { - return fmt.Errorf("diagnostics was not initialized yet. Please try again in a few seconds") + return errors.New("diagnostics was not initialized yet. Please try again in a few seconds") } return err diff --git a/cmd/downloader/main.go b/cmd/downloader/main.go index 5e43d8da260..9576ad374c9 100644 --- a/cmd/downloader/main.go +++ b/cmd/downloader/main.go @@ -355,7 +355,7 @@ var torrentCat = &cobra.Command{ Example: "go run ./cmd/downloader torrent_cat ", RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 0 { - return fmt.Errorf("please pass .torrent file path by first argument") + return errors.New("please pass .torrent file path by first argument") } fPath := args[0] mi, err := metainfo.LoadFromFile(fPath) @@ -415,7 +415,7 @@ var torrentMagnet = &cobra.Command{ Example: "go run ./cmd/downloader torrent_magnet ", RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 0 { - return fmt.Errorf("please pass .torrent file path by first argument") + return errors.New("please pass .torrent file path by first argument") } fPath := args[0] mi, err := metainfo.LoadFromFile(fPath) diff --git a/cmd/evm/internal/t8ntool/transition.go b/cmd/evm/internal/t8ntool/transition.go index f2a161867c5..1a0d89647e2 100644 --- a/cmd/evm/internal/t8ntool/transition.go +++ b/cmd/evm/internal/t8ntool/transition.go @@ -398,21 +398,21 @@ func getTransaction(txJson jsonrpc.RPCTransaction) (types.Transaction, error) { if txJson.Value != nil { value, overflow = uint256.FromBig(txJson.Value.ToInt()) if overflow { - return nil, fmt.Errorf("value field caused an overflow (uint256)") + return nil, errors.New("value field caused an overflow (uint256)") } } if txJson.GasPrice != nil { gasPrice, overflow = uint256.FromBig(txJson.GasPrice.ToInt()) if overflow { - return nil, fmt.Errorf("gasPrice field caused an overflow (uint256)") + return nil, errors.New("gasPrice field caused an overflow (uint256)") } } if txJson.ChainID != nil { chainId, overflow = uint256.FromBig(txJson.ChainID.ToInt()) if overflow { - return nil, fmt.Errorf("chainId field caused an overflow (uint256)") + return nil, errors.New("chainId field caused an overflow (uint256)") } } @@ -448,14 +448,14 @@ func getTransaction(txJson jsonrpc.RPCTransaction) (types.Transaction, error) { if txJson.Tip != nil { tip, overflow = uint256.FromBig(txJson.Tip.ToInt()) if overflow { - return nil, fmt.Errorf("maxPriorityFeePerGas field caused an overflow (uint256)") + return nil, errors.New("maxPriorityFeePerGas field caused an overflow (uint256)") } } if txJson.FeeCap != nil { feeCap, overflow = uint256.FromBig(txJson.FeeCap.ToInt()) if overflow { - return nil, fmt.Errorf("maxFeePerGas field caused an overflow (uint256)") + return nil, errors.New("maxFeePerGas field caused an overflow (uint256)") } } diff --git a/cmd/hack/db/lmdb.go b/cmd/hack/db/lmdb.go index 83490c90058..a4a745641b4 100644 --- a/cmd/hack/db/lmdb.go +++ b/cmd/hack/db/lmdb.go @@ -27,6 +27,7 @@ import ( "os" "os/exec" "path/filepath" + "strconv" "strings" "github.com/erigontech/erigon-lib/kv" @@ -122,14 +123,14 @@ func _64(page []byte, pos int) uint64 { func pagesToString(pages []uint32) (out string) { if len(pages) == 1 { - out += fmt.Sprint(pages[0]) + out += strconv.FormatUint(uint64(pages[0]), 10) return } if len(pages) == 2 { - out += fmt.Sprint(pages[0]) + out += strconv.FormatUint(uint64(pages[0]), 10) out += ", " - out += fmt.Sprint(pages[1]) + out += strconv.FormatUint(uint64(pages[1]), 10) return } @@ -166,7 +167,7 @@ func pagesToString(pages []uint32) (out string) { if i < len(container)-1 { out += fmt.Sprintf("%d, ", n) } else { - out += fmt.Sprintf("%d", n) + out += strconv.FormatUint(uint64(n), 10) } } @@ -189,7 +190,7 @@ func pagesToString(pages []uint32) (out string) { if i < len(container)-1 { out += fmt.Sprintf("%d, ", n) } else { - out += fmt.Sprintf("%d", n) + out += strconv.FormatUint(uint64(n), 10) } } @@ -1109,7 +1110,7 @@ func _conditions(f io.ReaderAt, visStream io.Writer, node *mdbx_node, _header *h for _, subNode := range subHeader.nodes { val := string(subNode.data[:subNode.ksize]) - *out += fmt.Sprintf("|%s", val) + *out += "|" + val } *out += "}" @@ -1240,7 +1241,7 @@ func freeDBPages(f io.ReaderAt, visStream io.Writer, freeRoot uint32) error { out += fmt.Sprintf("txid(%v)", txnID) out += fmt.Sprintf("(ON %d OVERFLOW PAGES)=", overflowPages) for i := 0; i < overflowPages; i++ { - out += fmt.Sprintf("%d", int(node.pgno)+i) + out += strconv.Itoa(int(node.pgno) + i) if i+1 < overflowPages { out += ", " } diff --git a/cmd/hack/flow/flow.go b/cmd/hack/flow/flow.go index 2a964c253eb..8486203ae37 100644 --- a/cmd/hack/flow/flow.go +++ b/cmd/hack/flow/flow.go @@ -286,11 +286,11 @@ func batchServer() { } func si64(n int64) string { - return fmt.Sprintf("%v", n) + return strconv.FormatInt(n, 10) } func sui64(n uint64) string { - return fmt.Sprintf("%v", n) + return strconv.FormatUint(n, 10) } /* @@ -742,11 +742,11 @@ type cfgJobResult struct { } func sb(b bool) string { - return fmt.Sprintf("%v", b) + return strconv.FormatBool(b) } func si(i int) string { - return fmt.Sprintf("%v", i) + return strconv.Itoa(i) } func percent(n int, d int) string { diff --git a/cmd/hack/hack.go b/cmd/hack/hack.go index f7976433680..2e70e8a1348 100644 --- a/cmd/hack/hack.go +++ b/cmd/hack/hack.go @@ -725,7 +725,7 @@ func chainConfig(name string) error { if chainConfig == nil { return fmt.Errorf("unknown name: %s", name) } - f, err := os.Create(filepath.Join("params", "chainspecs", fmt.Sprintf("%s.json", name))) + f, err := os.Create(filepath.Join("params", "chainspecs", name+".json")) if err != nil { return err } diff --git a/cmd/integration/commands/refetence_db.go b/cmd/integration/commands/refetence_db.go index 8721bec5367..520e5c30bf0 100644 --- a/cmd/integration/commands/refetence_db.go +++ b/cmd/integration/commands/refetence_db.go @@ -20,6 +20,7 @@ import ( "bufio" "bytes" "context" + "encoding/hex" "errors" "fmt" "os" @@ -499,7 +500,7 @@ MainLoop: case <-ctx.Done(): return ctx.Err() case <-commitEvery.C: - logger.Info("Progress", "bucket", bucket, "key", fmt.Sprintf("%x", k)) + logger.Info("Progress", "bucket", bucket, "key", hex.EncodeToString(k)) } } err = fileScanner.Err() diff --git a/cmd/integration/commands/stages.go b/cmd/integration/commands/stages.go index 08c9a83a737..17a85202228 100644 --- a/cmd/integration/commands/stages.go +++ b/cmd/integration/commands/stages.go @@ -863,7 +863,7 @@ func stageBodies(db kv.RwDB, ctx context.Context, logger log.Logger) error { if unwind > 0 { if unwind > s.BlockNumber { - return fmt.Errorf("cannot unwind past 0") + return errors.New("cannot unwind past 0") } u := sync.NewUnwindState(stages.Bodies, s.BlockNumber-unwind, s.BlockNumber, true, false) diff --git a/cmd/pics/pics.go b/cmd/pics/pics.go index 0487fe88900..64ba7fe454a 100644 --- a/cmd/pics/pics.go +++ b/cmd/pics/pics.go @@ -22,6 +22,7 @@ import ( "os" "os/exec" "sort" + "strconv" libcommon "github.com/erigontech/erigon-lib/common" @@ -61,7 +62,7 @@ func prefixGroups1() { visual.StartGraph(f, false) for i, key := range keys { visual.QuadVertical(f, []byte(key), len(key), fmt.Sprintf("q_%x", key)) - visual.Circle(f, fmt.Sprintf("e_%d", i), fmt.Sprintf("%d", i), false) + visual.Circle(f, fmt.Sprintf("e_%d", i), strconv.Itoa(i), false) fmt.Fprintf(f, `q_%x -> e_%d; `, key, i) @@ -90,7 +91,7 @@ func prefixGroups2() { visual.StartGraph(f, false) for i, key := range keys { visual.QuadVertical(f, []byte(key), len(key), fmt.Sprintf("q_%x", key)) - visual.Circle(f, fmt.Sprintf("e_%d", i), fmt.Sprintf("%d", i), false) + visual.Circle(f, fmt.Sprintf("e_%d", i), strconv.Itoa(i), false) fmt.Fprintf(f, `q_%x -> e_%d; `, key, i) @@ -176,7 +177,7 @@ q_%x->q_%x; } // Display the key visual.QuadVertical(f, []byte(key), len(key), fmt.Sprintf("q_%x", key)) - visual.Circle(f, fmt.Sprintf("e_%d", i), fmt.Sprintf("%d", i), false) + visual.Circle(f, fmt.Sprintf("e_%d", i), strconv.Itoa(i), false) fmt.Fprintf(f, `q_%x -> e_%d; `, key, i) @@ -218,7 +219,7 @@ func prefixGroups4() { for j := 0; j < len(hexKey); j++ { hexKey[j] = key[2*j+1] | (key[2*j] << 4) } - vs := fmt.Sprintf("%d", i) + vs := strconv.Itoa(i) tr.Update(hexKey, []byte(vs)) hightlights = append(hightlights, []byte(key)) } @@ -258,7 +259,7 @@ func prefixGroups5() { for j := 0; j < len(hexKey); j++ { hexKey[j] = key[2*j+1] | (key[2*j] << 4) } - vs := fmt.Sprintf("%d", i) + vs := strconv.Itoa(i) tr.Update(hexKey, []byte(vs)) hightlights = append(hightlights, []byte(key)) folds = append(folds, hexKey) @@ -300,7 +301,7 @@ func prefixGroups6() { for j := 0; j < len(hexKey); j++ { hexKey[j] = key[2*j+1] | (key[2*j] << 4) } - vs := fmt.Sprintf("%d", i) + vs := strconv.Itoa(i) tr.Update(hexKey, []byte(vs)) hightlights = append(hightlights, []byte(key)) folds = append(folds, hexKey) @@ -343,7 +344,7 @@ func prefixGroups7() { for j := 0; j < len(hexKey); j++ { hexKey[j] = key[2*j+1] | (key[2*j] << 4) } - vs := fmt.Sprintf("%d", i) + vs := strconv.Itoa(i) tr.Update(hexKey, []byte(vs)) hightlights = append(hightlights, []byte(key)) folds = append(folds, hexKey) @@ -388,7 +389,7 @@ func prefixGroups8() { for j := 0; j < len(hexKey); j++ { hexKey[j] = key[2*j+1] | (key[2*j] << 4) } - vs := fmt.Sprintf("%d", i) + vs := strconv.Itoa(i) tr.Update(hexKey, []byte(vs)) hightlights = append(hightlights, []byte(key)) switch i { diff --git a/cmd/rpcdaemon/cli/config.go b/cmd/rpcdaemon/cli/config.go index ba9d8f8c83d..f4bacc271fd 100644 --- a/cmd/rpcdaemon/cli/config.go +++ b/cmd/rpcdaemon/cli/config.go @@ -324,7 +324,7 @@ func RemoteServices(ctx context.Context, cfg *httpcfg.HttpCfg, logger log.Logger stateCache kvcache.Cache, blockReader services.FullBlockReader, engine consensus.EngineReader, ff *rpchelper.Filters, err error) { if !cfg.WithDatadir && cfg.PrivateApiAddr == "" { - return nil, nil, nil, nil, nil, nil, nil, ff, fmt.Errorf("either remote db or local db must be specified") + return nil, nil, nil, nil, nil, nil, nil, ff, errors.New("either remote db or local db must be specified") } creds, err := grpcutil.TLS(cfg.TLSCACert, cfg.TLSCertfile, cfg.TLSKeyFile) if err != nil { @@ -389,7 +389,7 @@ func RemoteServices(ctx context.Context, cfg *httpcfg.HttpCfg, logger log.Logger return nil, nil, nil, nil, nil, nil, nil, ff, err } if cc == nil { - return nil, nil, nil, nil, nil, nil, nil, ff, fmt.Errorf("chain config not found in db. Need start erigon at least once on this db") + return nil, nil, nil, nil, nil, nil, nil, ff, errors.New("chain config not found in db. Need start erigon at least once on this db") } cfg.Snap.Enabled = cfg.Snap.Enabled || cfg.Sync.UseSnapshots if !cfg.Snap.Enabled { diff --git a/cmd/rpcdaemon/graphql/graph/schema.resolvers.go b/cmd/rpcdaemon/graphql/graph/schema.resolvers.go index 52b79fb03d6..9932c43bd13 100644 --- a/cmd/rpcdaemon/graphql/graph/schema.resolvers.go +++ b/cmd/rpcdaemon/graphql/graph/schema.resolvers.go @@ -20,7 +20,7 @@ import ( // SendRawTransaction is the resolver for the sendRawTransaction field. func (r *mutationResolver) SendRawTransaction(ctx context.Context, data string) (string, error) { - panic(fmt.Errorf("not implemented: SendRawTransaction - sendRawTransaction")) + panic("not implemented: SendRawTransaction - sendRawTransaction") } // Block is the resolver for the block field. @@ -208,32 +208,32 @@ func (r *queryResolver) Blocks(ctx context.Context, from *uint64, to *uint64) ([ // Pending is the resolver for the pending field. func (r *queryResolver) Pending(ctx context.Context) (*model.Pending, error) { - panic(fmt.Errorf("not implemented: Pending - pending")) + panic("not implemented: Pending - pending") } // Transaction is the resolver for the transaction field. func (r *queryResolver) Transaction(ctx context.Context, hash string) (*model.Transaction, error) { - panic(fmt.Errorf("not implemented: Transaction - transaction")) + panic("not implemented: Transaction - transaction") } // Logs is the resolver for the logs field. func (r *queryResolver) Logs(ctx context.Context, filter model.FilterCriteria) ([]*model.Log, error) { - panic(fmt.Errorf("not implemented: Logs - logs")) + panic("not implemented: Logs - logs") } // GasPrice is the resolver for the gasPrice field. func (r *queryResolver) GasPrice(ctx context.Context) (string, error) { - panic(fmt.Errorf("not implemented: GasPrice - gasPrice")) + panic("not implemented: GasPrice - gasPrice") } // MaxPriorityFeePerGas is the resolver for the maxPriorityFeePerGas field. func (r *queryResolver) MaxPriorityFeePerGas(ctx context.Context) (string, error) { - panic(fmt.Errorf("not implemented: MaxPriorityFeePerGas - maxPriorityFeePerGas")) + panic("not implemented: MaxPriorityFeePerGas - maxPriorityFeePerGas") } // Syncing is the resolver for the syncing field. func (r *queryResolver) Syncing(ctx context.Context) (*model.SyncState, error) { - panic(fmt.Errorf("not implemented: Syncing - syncing")) + panic("not implemented: Syncing - syncing") } // ChainID is the resolver for the chainID field. diff --git a/cmd/rpcdaemon/health/check_block.go b/cmd/rpcdaemon/health/check_block.go index b6cab8ed2b1..877a515d131 100644 --- a/cmd/rpcdaemon/health/check_block.go +++ b/cmd/rpcdaemon/health/check_block.go @@ -18,6 +18,7 @@ package health import ( "context" + "errors" "fmt" "github.com/erigontech/erigon/rpc" @@ -25,7 +26,7 @@ import ( func checkBlockNumber(blockNumber rpc.BlockNumber, api EthAPI) error { if api == nil { - return fmt.Errorf("no connection to the Erigon server or `eth` namespace isn't enabled") + return errors.New("no connection to the Erigon server or `eth` namespace isn't enabled") } data, err := api.GetBlockByNumber(context.TODO(), blockNumber, false) if err != nil { diff --git a/cmd/rpcdaemon/health/check_peers.go b/cmd/rpcdaemon/health/check_peers.go index e5b0587525c..d073fd76ca8 100644 --- a/cmd/rpcdaemon/health/check_peers.go +++ b/cmd/rpcdaemon/health/check_peers.go @@ -28,7 +28,7 @@ var ( func checkMinPeers(minPeerCount uint, api NetAPI) error { if api == nil { - return fmt.Errorf("no connection to the Erigon server or `net` namespace isn't enabled") + return errors.New("no connection to the Erigon server or `net` namespace isn't enabled") } peerCount, err := api.PeerCount(context.TODO()) diff --git a/cmd/rpcdaemon/rpcservices/eth_backend.go b/cmd/rpcdaemon/rpcservices/eth_backend.go index 67eea96a16e..1a1ccee5e59 100644 --- a/cmd/rpcdaemon/rpcservices/eth_backend.go +++ b/cmd/rpcdaemon/rpcservices/eth_backend.go @@ -327,7 +327,7 @@ func (back *RemoteBackend) Span(ctx context.Context, tx kv.Getter, spanId uint64 } func (r *RemoteBackend) LastMilestoneId(ctx context.Context, tx kv.Tx) (uint64, bool, error) { - return 0, false, fmt.Errorf("not implemented") + return 0, false, errors.New("not implemented") } func (r *RemoteBackend) Milestone(ctx context.Context, tx kv.Getter, spanId uint64) ([]byte, error) { @@ -335,7 +335,7 @@ func (r *RemoteBackend) Milestone(ctx context.Context, tx kv.Getter, spanId uint } func (r *RemoteBackend) LastCheckpointId(ctx context.Context, tx kv.Tx) (uint64, bool, error) { - return 0, false, fmt.Errorf("not implemented") + return 0, false, errors.New("not implemented") } func (r *RemoteBackend) Checkpoint(ctx context.Context, tx kv.Getter, spanId uint64) ([]byte, error) { diff --git a/cmd/rpctest/rpctest/bench1.go b/cmd/rpctest/rpctest/bench1.go index d394c681439..cf9632b0ed7 100644 --- a/cmd/rpctest/rpctest/bench1.go +++ b/cmd/rpctest/rpctest/bench1.go @@ -19,6 +19,7 @@ package rpctest import ( "bytes" "encoding/base64" + "errors" "fmt" "net/http" "os" @@ -161,7 +162,7 @@ func Bench1(erigonURL, gethURL string, needCompare bool, fullTest bool, blockFro printStorageRange(sm) fmt.Printf("================smg\n") printStorageRange(smg) - return fmt.Errorf("Storage range different\n") + return errors.New("Storage range different\n") } } } @@ -229,7 +230,7 @@ func Bench1(erigonURL, gethURL string, needCompare bool, fullTest bool, blockFro fmt.Printf("Different receipts block %d, txn %s\n", bn, txn.Hash) print(client, routes[Geth], reqGen.getTransactionReceipt(txn.Hash)) print(client, routes[Erigon], reqGen.getTransactionReceipt(txn.Hash)) - return fmt.Errorf("Receipts are different\n") + return errors.New("Receipts are different\n") } } } @@ -323,7 +324,7 @@ func Bench1(erigonURL, gethURL string, needCompare bool, fullTest bool, blockFro fmt.Printf("Different next page keys: %x geth %x", page, pageGeth) } if !compareAccountRanges(accRangeErigon, accRangeGeth) { - return fmt.Errorf("Different in account ranges tx\n") + return errors.New("Different in account ranges tx\n") } } } diff --git a/cmd/rpctest/rpctest/bench3.go b/cmd/rpctest/rpctest/bench3.go index b9241f1ea47..e283b9ae3e0 100644 --- a/cmd/rpctest/rpctest/bench3.go +++ b/cmd/rpctest/rpctest/bench3.go @@ -18,6 +18,7 @@ package rpctest import ( "encoding/base64" + "errors" "fmt" "net/http" "time" @@ -80,7 +81,7 @@ func Bench3(erigon_url, geth_url string) error { } if !compareAccountRanges(accRangeTG, accRangeGeth) { - return fmt.Errorf("Different in account ranges tx\n") + return errors.New("Different in account ranges tx\n") } fmt.Println("debug_accountRanges... OK!") @@ -164,7 +165,7 @@ func Bench3(erigon_url, geth_url string) error { } fmt.Printf("storageRange g: %d\n", len(smg)) if !compareStorageRanges(sm, smg) { - return fmt.Errorf("Different in storage ranges tx\n") + return errors.New("Different in storage ranges tx\n") } return nil diff --git a/cmd/rpctest/rpctest/bench7.go b/cmd/rpctest/rpctest/bench7.go index 8167b1c6211..a4eee0e8414 100644 --- a/cmd/rpctest/rpctest/bench7.go +++ b/cmd/rpctest/rpctest/bench7.go @@ -17,6 +17,7 @@ package rpctest import ( + "errors" "fmt" "net/http" "time" @@ -90,7 +91,7 @@ func Bench7(erigonURL, gethURL string) error { printStorageRange(sm) fmt.Printf("================smg\n") printStorageRange(smg) - return fmt.Errorf("storage are different") + return errors.New("storage are different") } fmt.Printf("storageRanges: %d\n", len(sm)) return nil diff --git a/cmd/sentinel/sentinelcli/cliSettings.go b/cmd/sentinel/sentinelcli/cliSettings.go index f65c85e0f89..0a155340d9d 100644 --- a/cmd/sentinel/sentinelcli/cliSettings.go +++ b/cmd/sentinel/sentinelcli/cliSettings.go @@ -17,6 +17,7 @@ package sentinelcli import ( + "errors" "fmt" "github.com/erigontech/erigon/cl/clparams" @@ -60,7 +61,7 @@ func SetupSentinelCli(ctx *cli.Context) (*SentinelCliCfg, error) { return nil, err } if ctx.String(sentinelflags.GenesisSSZFlag.Name) == "" { - return nil, fmt.Errorf("no genesis file provided") + return nil, errors.New("no genesis file provided") } } diff --git a/cmd/snapshots/cmp/cmp.go b/cmd/snapshots/cmp/cmp.go index f34fca1fed0..de5a49f5a07 100644 --- a/cmd/snapshots/cmp/cmp.go +++ b/cmd/snapshots/cmp/cmp.go @@ -19,6 +19,7 @@ package cmp import ( "bytes" "context" + "errors" "fmt" "io/fs" "os" @@ -221,11 +222,11 @@ func cmp(cliCtx *cli.Context) error { } if session1 == nil { - return fmt.Errorf("no first session established") + return errors.New("no first session established") } if session1 == nil { - return fmt.Errorf("no second session established") + return errors.New("no second session established") } logger.Info(fmt.Sprintf("Starting compare: %s==%s", loc1.String(), loc2.String()), "first", firstBlock, "last", lastBlock, "types", snapTypes, "dir", tempDir) @@ -439,7 +440,7 @@ func (c comparitor) compareHeaders(ctx context.Context, f1ents []fs.DirEntry, f2 g.SetLimit(2) g.Go(func() error { - logger.Info(fmt.Sprintf("Downloading %s", ent1.Name()), "entry", fmt.Sprint(i1+1, "/", len(f1ents))) + logger.Info("Downloading ", ent1.Name(), "entry", fmt.Sprint(i1+1, "/", len(f1ents))) startTime := time.Now() defer func() { atomic.AddUint64(&downloadTime, uint64(time.Since(startTime))) @@ -460,7 +461,7 @@ func (c comparitor) compareHeaders(ctx context.Context, f1ents []fs.DirEntry, f2 atomic.AddUint64(&downloadTime, uint64(time.Since(startTime))) }() - logger.Info(fmt.Sprintf("Downloading %s", ent2.Name()), "entry", fmt.Sprint(i2+1, "/", len(f2ents)), "size", datasize.ByteSize(ent2Info.Size())) + logger.Info("Downloading "+ent2.Name(), "entry", fmt.Sprint(i2+1, "/", len(f2ents)), "size", datasize.ByteSize(ent2Info.Size())) err := c.session2.Download(gctx, ent2.Name()) if err != nil { @@ -615,7 +616,7 @@ func (c comparitor) compareBodies(ctx context.Context, f1ents []*BodyEntry, f2en atomic.AddUint64(&downloadTime, uint64(time.Since(startTime))) }() - logger.Info(fmt.Sprintf("Downloading %s", ent1.Body.Name()), "entry", fmt.Sprint(i1+1, "/", len(f1ents))) + logger.Info("Downloading "+ent1.Body.Name(), "entry", fmt.Sprint(i1+1, "/", len(f1ents))) return c.session1.Download(ctx, ent1.Body.Name()) }() @@ -631,7 +632,7 @@ func (c comparitor) compareBodies(ctx context.Context, f1ents []*BodyEntry, f2en atomic.AddUint64(&indexTime, uint64(time.Since(startTime))) }() - logger.Info(fmt.Sprintf("Indexing %s", ent1.Body.Name())) + logger.Info("Indexing " + ent1.Body.Name()) return coresnaptype.Bodies.BuildIndexes(ctx, info, c.chainConfig(), c.session1.LocalFsRoot(), nil, log.LvlDebug, logger) }) @@ -647,7 +648,7 @@ func (c comparitor) compareBodies(ctx context.Context, f1ents []*BodyEntry, f2en defer func() { atomic.AddUint64(&downloadTime, uint64(time.Since(startTime))) }() - logger.Info(fmt.Sprintf("Downloading %s", ent1.Transactions.Name()), "entry", fmt.Sprint(i1+1, "/", len(f1ents))) + logger.Info("Downloading "+ent1.Transactions.Name(), "entry", fmt.Sprint(i1+1, "/", len(f1ents))) return c.session1.Download(ctx, ent1.Transactions.Name()) }() @@ -670,7 +671,7 @@ func (c comparitor) compareBodies(ctx context.Context, f1ents []*BodyEntry, f2en atomic.AddUint64(&indexTime, uint64(time.Since(startTime))) }() - logger.Info(fmt.Sprintf("Indexing %s", ent1.Transactions.Name())) + logger.Info("Indexing " + ent1.Transactions.Name()) return coresnaptype.Transactions.BuildIndexes(ctx, info, c.chainConfig(), c.session1.LocalFsRoot(), nil, log.LvlDebug, logger) }) @@ -690,7 +691,7 @@ func (c comparitor) compareBodies(ctx context.Context, f1ents []*BodyEntry, f2en atomic.AddUint64(&downloadTime, uint64(time.Since(startTime))) }() - logger.Info(fmt.Sprintf("Downloading %s", ent2.Body.Name()), "entry", fmt.Sprint(i2+1, "/", len(f2ents))) + logger.Info("Downloading "+ent2.Body.Name(), "entry", fmt.Sprint(i2+1, "/", len(f2ents))) return c.session2.Download(ctx, ent2.Body.Name()) }() @@ -706,7 +707,7 @@ func (c comparitor) compareBodies(ctx context.Context, f1ents []*BodyEntry, f2en atomic.AddUint64(&indexTime, uint64(time.Since(startTime))) }() - logger.Info(fmt.Sprintf("Indexing %s", ent2.Body.Name())) + logger.Info("Indexing " + ent2.Body.Name()) return coresnaptype.Bodies.BuildIndexes(ctx, info, c.chainConfig(), c.session1.LocalFsRoot(), nil, log.LvlDebug, logger) }) @@ -724,7 +725,7 @@ func (c comparitor) compareBodies(ctx context.Context, f1ents []*BodyEntry, f2en atomic.AddUint64(&downloadTime, uint64(time.Since(startTime))) }() - logger.Info(fmt.Sprintf("Downloading %s", ent2.Transactions.Name()), "entry", fmt.Sprint(i2+1, "/", len(f2ents))) + logger.Info("Downloading "+ent2.Transactions.Name(), "entry", fmt.Sprint(i2+1, "/", len(f2ents))) return c.session2.Download(ctx, ent2.Transactions.Name()) }() @@ -747,7 +748,7 @@ func (c comparitor) compareBodies(ctx context.Context, f1ents []*BodyEntry, f2en atomic.AddUint64(&indexTime, uint64(time.Since(startTime))) }() - logger.Info(fmt.Sprintf("Indexing %s", ent2.Transactions.Name())) + logger.Info("Indexing " + ent2.Transactions.Name()) return coresnaptype.Transactions.BuildIndexes(ctx, info, c.chainConfig(), c.session2.LocalFsRoot(), nil, log.LvlDebug, logger) }) diff --git a/cmd/snapshots/copy/copy.go b/cmd/snapshots/copy/copy.go index f3b714c5762..99c722c9360 100644 --- a/cmd/snapshots/copy/copy.go +++ b/cmd/snapshots/copy/copy.go @@ -18,6 +18,7 @@ package copy import ( "context" + "errors" "fmt" "io/fs" "path/filepath" @@ -125,7 +126,7 @@ func copy(cliCtx *cli.Context) error { switch dst.LType { case sync.TorrentFs: - return fmt.Errorf("can't copy to torrent - need intermediate local fs") + return errors.New("can't copy to torrent - need intermediate local fs") case sync.RemoteFs: if rcCli == nil { @@ -238,26 +239,26 @@ func copy(cliCtx *cli.Context) error { } func torrentToLocal(torrentCli *sync.TorrentClient, src *sync.Locator, dst *sync.Locator, from uint64, to uint64, snapTypes []snaptype.Type, torrents, hashes, manifest bool) error { - return fmt.Errorf("TODO") + return errors.New("TODO") } func torrentToRemote(torrentCli *sync.TorrentClient, rcCli *downloader.RCloneClient, src *sync.Locator, dst *sync.Locator, from uint64, to uint64, snapTypes []snaptype.Type, torrents, hashes, manifest bool) error { - return fmt.Errorf("TODO") + return errors.New("TODO") } func localToRemote(rcCli *downloader.RCloneClient, src *sync.Locator, dst *sync.Locator, from uint64, to uint64, snapTypes []snaptype.Type, torrents, hashes, manifest bool) error { - return fmt.Errorf("TODO") + return errors.New("TODO") } func localToLocal(src *sync.Locator, dst *sync.Locator, from uint64, to uint64, snapTypes []snaptype.Type, torrents, hashes, manifest bool) error { - return fmt.Errorf("TODO") + return errors.New("TODO") } func remoteToLocal(ctx context.Context, rcCli *downloader.RCloneClient, src *sync.Locator, dst *sync.Locator, from uint64, to uint64, snapTypes []snaptype.Type, torrents, hashes, manifest bool) error { logger := sync.Logger(ctx) if rcCli == nil { - return fmt.Errorf("no remote downloader") + return errors.New("no remote downloader") } session, err := rcCli.NewSession(ctx, dst.Root, src.Src+":"+src.Root, nil) @@ -281,7 +282,7 @@ func remoteToLocal(ctx context.Context, rcCli *downloader.RCloneClient, src *syn } func remoteToRemote(rcCli *downloader.RCloneClient, src *sync.Locator, dst *sync.Locator, from uint64, to uint64, snapTypes []snaptype.Type, torrents, hashes, manifest bool) error { - return fmt.Errorf("TODO") + return errors.New("TODO") } type sinf struct { diff --git a/cmd/snapshots/manifest/manifest.go b/cmd/snapshots/manifest/manifest.go index 2e84d0c892d..617b442a19b 100644 --- a/cmd/snapshots/manifest/manifest.go +++ b/cmd/snapshots/manifest/manifest.go @@ -20,6 +20,7 @@ import ( "bufio" "bytes" "context" + "errors" "fmt" "io/fs" "os" @@ -99,7 +100,7 @@ func manifest(cliCtx *cli.Context, command string) error { pos := 0 if cliCtx.Args().Len() == 0 { - return fmt.Errorf("missing manifest location") + return errors.New("missing manifest location") } arg := cliCtx.Args().Get(pos) @@ -144,7 +145,7 @@ func manifest(cliCtx *cli.Context, command string) error { } if src != nil && srcSession == nil { - return fmt.Errorf("no src session established") + return errors.New("no src session established") } logger.Debug("Starting manifest " + command) diff --git a/cmd/snapshots/sync/sync.go b/cmd/snapshots/sync/sync.go index fdec668f448..a4f87d06f8d 100644 --- a/cmd/snapshots/sync/sync.go +++ b/cmd/snapshots/sync/sync.go @@ -19,6 +19,7 @@ package sync import ( "bufio" "context" + "errors" "fmt" "io/fs" "os" @@ -139,7 +140,7 @@ func ParseLocator(value string) (*Locator, error) { }, nil } - return nil, fmt.Errorf("Invalid locator syntax") + return nil, errors.New("Invalid locator syntax") } type TorrentClient struct { diff --git a/cmd/snapshots/torrents/torrents.go b/cmd/snapshots/torrents/torrents.go index b5ba219e168..49e20614fa8 100644 --- a/cmd/snapshots/torrents/torrents.go +++ b/cmd/snapshots/torrents/torrents.go @@ -18,6 +18,7 @@ package torrents import ( "context" + "errors" "fmt" "os" "path/filepath" @@ -136,7 +137,7 @@ func torrents(cliCtx *cli.Context, command string) error { } if src == nil { - return fmt.Errorf("missing data source") + return errors.New("missing data source") } var rcCli *downloader.RCloneClient @@ -188,7 +189,7 @@ func torrents(cliCtx *cli.Context, command string) error { } if src != nil && srcSession == nil { - return fmt.Errorf("no src session established") + return errors.New("no src session established") } logger.Debug("Starting torrents " + command) @@ -199,14 +200,14 @@ func torrents(cliCtx *cli.Context, command string) error { case "update": startTime := time.Now() - logger.Info(fmt.Sprintf("Starting update: %s", src.String()), "first", firstBlock, "last", lastBlock, "dir", tempDir) + logger.Info("Starting update: "+src.String(), "first", firstBlock, "last", lastBlock, "dir", tempDir) err := updateTorrents(cliCtx.Context, srcSession, firstBlock, lastBlock, logger) if err == nil { - logger.Info(fmt.Sprintf("Finished update: %s", src.String()), "elapsed", time.Since(startTime)) + logger.Info("Finished update: "+src.String(), "elapsed", time.Since(startTime)) } else { - logger.Info(fmt.Sprintf("Aborted update: %s", src.String()), "err", err) + logger.Info("Aborted update: "+src.String(), "err", err) } return err @@ -214,14 +215,14 @@ func torrents(cliCtx *cli.Context, command string) error { case "verify": startTime := time.Now() - logger.Info(fmt.Sprintf("Starting verify: %s", src.String()), "first", firstBlock, "last", lastBlock, "dir", tempDir) + logger.Info("Starting verify: "+src.String(), "first", firstBlock, "last", lastBlock, "dir", tempDir) err := verifyTorrents(cliCtx.Context, srcSession, firstBlock, lastBlock, logger) if err == nil { - logger.Info(fmt.Sprintf("Verified: %s", src.String()), "elapsed", time.Since(startTime)) + logger.Info("Verified: "+src.String(), "elapsed", time.Since(startTime)) } else { - logger.Info(fmt.Sprintf("Verification failed: %s", src.String()), "err", err) + logger.Info("Verification failed: "+src.String(), "err", err) } return err @@ -388,7 +389,7 @@ func updateTorrents(ctx context.Context, srcSession *downloader.RCloneSession, f } } - logger.Info(fmt.Sprintf("Updating %s", file+".torrent")) + logger.Info("Updating " + file + ".torrent") err := srcSession.Download(gctx, file) @@ -445,7 +446,7 @@ func verifyTorrents(ctx context.Context, srcSession *downloader.RCloneSession, f } } - logger.Info(fmt.Sprintf("Validating %s", file+".torrent")) + logger.Info("Validating " + file + ".torrent") var mi *metainfo.MetaInfo diff --git a/cmd/snapshots/verify/verify.go b/cmd/snapshots/verify/verify.go index a1e6099ea22..06f76b81a04 100644 --- a/cmd/snapshots/verify/verify.go +++ b/cmd/snapshots/verify/verify.go @@ -17,6 +17,7 @@ package verify import ( + "errors" "fmt" "os" "path/filepath" @@ -249,11 +250,11 @@ func verify(cliCtx *cli.Context) error { } if src != nil && srcSession == nil { - return fmt.Errorf("no src session established") + return errors.New("no src session established") } if dstSession == nil { - return fmt.Errorf("no dst session established") + return errors.New("no dst session established") } if srcSession == nil { @@ -264,5 +265,5 @@ func verify(cliCtx *cli.Context) error { } func verifySnapshots(srcSession sync.DownloadSession, rcSession sync.DownloadSession, from uint64, to uint64, snapTypes []snaptype.Type, torrents, hashes, manifest bool) error { - return fmt.Errorf("TODO") + return errors.New("TODO") } diff --git a/common/types.go b/common/types.go index 8f8dfee50f4..ae31348b168 100644 --- a/common/types.go +++ b/common/types.go @@ -24,7 +24,6 @@ import ( "encoding/hex" "encoding/json" "errors" - "fmt" "reflect" "strings" @@ -104,9 +103,9 @@ func (ma *MixedcaseAddress) UnmarshalJSON(input []byte) error { // MarshalJSON marshals the original value func (ma *MixedcaseAddress) MarshalJSON() ([]byte, error) { if strings.HasPrefix(ma.original, "0x") || strings.HasPrefix(ma.original, "0X") { - return json.Marshal(fmt.Sprintf("0x%s", ma.original[2:])) + return json.Marshal("0x" + ma.original[2:]) } - return json.Marshal(fmt.Sprintf("0x%s", ma.original)) + return json.Marshal("0x" + ma.original) } // Address returns the address @@ -117,9 +116,9 @@ func (ma *MixedcaseAddress) Address() libcommon.Address { // String implements fmt.Stringer func (ma *MixedcaseAddress) String() string { if ma.ValidChecksum() { - return fmt.Sprintf("%s [chksum ok]", ma.original) + return ma.original + " [chksum ok]" } - return fmt.Sprintf("%s [chksum INVALID]", ma.original) + return ma.original + " [chksum INVALID]" } // ValidChecksum returns true if the address has valid checksum diff --git a/consensus/aura/aura.go b/consensus/aura/aura.go index 6cf3b41202c..1318e7c5dc2 100644 --- a/consensus/aura/aura.go +++ b/consensus/aura/aura.go @@ -251,11 +251,11 @@ func NewAuRa(spec *chain.AuRaConfig, db kv.RwDB) (*AuRa, error) { } if _, ok := auraParams.StepDurations[0]; !ok { - return nil, fmt.Errorf("authority Round step 0 duration is undefined") + return nil, errors.New("authority Round step 0 duration is undefined") } for _, v := range auraParams.StepDurations { if v == 0 { - return nil, fmt.Errorf("authority Round step duration cannot be 0") + return nil, errors.New("authority Round step duration cannot be 0") } } //shouldTimeout := auraParams.StartStep == nil @@ -276,7 +276,7 @@ func NewAuRa(spec *chain.AuRaConfig, db kv.RwDB) (*AuRa, error) { dur := auraParams.StepDurations[time] step, t, ok := nextStepTimeDuration(durInfo, time) if !ok { - return nil, fmt.Errorf("timestamp overflow") + return nil, errors.New("timestamp overflow") } durInfo.TransitionStep = step durInfo.TransitionTimestamp = t @@ -1059,7 +1059,7 @@ func (c *AuRa) epochSet(chain consensus.ChainHeaderReader, e *NonTransactionalEp finalityChecker, epochTransitionNumber, ok := c.EpochManager.zoomToAfter(chain, e, c.cfg.Validators, h.ParentHash, call) if !ok { - return nil, 0, fmt.Errorf("unable to zoomToAfter to epoch") + return nil, 0, errors.New("unable to zoomToAfter to epoch") } return finalityChecker.signers, epochTransitionNumber, nil } diff --git a/consensus/aura/rolling_finality.go b/consensus/aura/rolling_finality.go index 3071fefafe7..24d2b7a8aa3 100644 --- a/consensus/aura/rolling_finality.go +++ b/consensus/aura/rolling_finality.go @@ -18,6 +18,7 @@ package aura import ( "container/list" + "errors" "fmt" libcommon "github.com/erigontech/erigon-lib/common" @@ -72,7 +73,7 @@ func (f *RollingFinality) clear() { func (f *RollingFinality) push(head libcommon.Hash, num uint64, signers []libcommon.Address) (newlyFinalized []unAssembledHeader, err error) { for i := range signers { if !f.hasSigner(signers[i]) { - return nil, fmt.Errorf("unknown validator") + return nil, errors.New("unknown validator") } } diff --git a/consensus/aura/validators.go b/consensus/aura/validators.go index a3c63044037..1fa742f73da 100644 --- a/consensus/aura/validators.go +++ b/consensus/aura/validators.go @@ -18,6 +18,7 @@ package aura import ( "container/list" + "errors" "fmt" "math" "sort" @@ -333,7 +334,7 @@ func (s *SimpleList) defaultCaller(blockHash libcommon.Hash) (Call, error) { } func (s *SimpleList) getWithCaller(parentHash libcommon.Hash, nonce uint, caller consensus.Call) (libcommon.Address, error) { if len(s.validators) == 0 { - return libcommon.Address{}, fmt.Errorf("cannot operate with an empty validator set") + return libcommon.Address{}, errors.New("cannot operate with an empty validator set") } return s.validators[nonce%uint(len(s.validators))], nil } diff --git a/consensus/merge/merge.go b/consensus/merge/merge.go index 3c3f5a51f3f..c63ad926014 100644 --- a/consensus/merge/merge.go +++ b/consensus/merge/merge.go @@ -208,13 +208,13 @@ func (s *Merge) Finalize(config *chain.Config, header *types.Header, state *stat return nil, nil, nil, fmt.Errorf("error: invalid requests root hash in header, expected: %v, got :%v", header.RequestsRoot, rh) } if !reflect.DeepEqual(requestsInBlock.Deposits(), depositReqs.Deposits()) { - return nil, nil, nil, fmt.Errorf("error: invalid EIP-6110 Deposit Requests in block") + return nil, nil, nil, errors.New("error: invalid EIP-6110 Deposit Requests in block") } if !reflect.DeepEqual(requestsInBlock.Withdrawals(), withdrawalReqs.Withdrawals()) { - return nil, nil, nil, fmt.Errorf("error: invalid EIP-7002 Withdrawal requests in block") + return nil, nil, nil, errors.New("error: invalid EIP-7002 Withdrawal requests in block") } if !reflect.DeepEqual(requestsInBlock.Consolidations(), consolidations.Consolidations()) { - return nil, nil, nil, fmt.Errorf("error: invalid EIP-7251 Consolidation requests in block") + return nil, nil, nil, errors.New("error: invalid EIP-7251 Consolidation requests in block") } } } @@ -301,7 +301,7 @@ func (s *Merge) verifyHeader(chain consensus.ChainHeaderReader, header, parent * // Verify existence / non-existence of withdrawalsHash shanghai := chain.Config().IsShanghai(header.Time) if shanghai && header.WithdrawalsHash == nil { - return fmt.Errorf("missing withdrawalsHash") + return errors.New("missing withdrawalsHash") } if !shanghai && header.WithdrawalsHash != nil { return consensus.ErrUnexpectedWithdrawals @@ -321,7 +321,7 @@ func (s *Merge) verifyHeader(chain consensus.ChainHeaderReader, header, parent * // Verify existence / non-existence of requestsRoot prague := chain.Config().IsPrague(header.Time) if prague && header.RequestsRoot == nil { - return fmt.Errorf("missing requestsRoot") + return errors.New("missing requestsRoot") } if !prague && header.RequestsRoot != nil { return consensus.ErrUnexpectedRequests diff --git a/consensus/misc/eip1559.go b/consensus/misc/eip1559.go index 1035df0fe29..a08383e3a54 100644 --- a/consensus/misc/eip1559.go +++ b/consensus/misc/eip1559.go @@ -20,6 +20,7 @@ package misc import ( + "errors" "fmt" "math/big" @@ -50,7 +51,7 @@ func VerifyEip1559Header(config *chain.Config, parent, header *types.Header, ski } // Verify the header is not malformed if header.BaseFee == nil { - return fmt.Errorf("header is missing baseFee") + return errors.New("header is missing baseFee") } // Verify the baseFee is correct based on the parent header. expectedBaseFee := CalcBaseFee(config, parent) @@ -69,7 +70,7 @@ func (f eip1559Calculator) CurrentFees(chainConfig *chain.Config, db kv.Getter) hash := rawdb.ReadHeadHeaderHash(db) if hash == (common.Hash{}) { - return 0, 0, 0, 0, fmt.Errorf("can't get head header hash") + return 0, 0, 0, 0, errors.New("can't get head header hash") } currentHeader, err := rawdb.ReadHeaderByHash(db, hash) diff --git a/consensus/misc/eip4844.go b/consensus/misc/eip4844.go index 076947788a4..f4db487e198 100644 --- a/consensus/misc/eip4844.go +++ b/consensus/misc/eip4844.go @@ -20,6 +20,7 @@ package misc import ( + "errors" "fmt" "github.com/holiman/uint256" @@ -77,13 +78,13 @@ func FakeExponential(factor, denom *uint256.Int, excessBlobGas uint64) (*uint256 // VerifyPresenceOfCancunHeaderFields checks that the fields introduced in Cancun (EIP-4844, EIP-4788) are present. func VerifyPresenceOfCancunHeaderFields(header *types.Header) error { if header.BlobGasUsed == nil { - return fmt.Errorf("header is missing blobGasUsed") + return errors.New("header is missing blobGasUsed") } if header.ExcessBlobGas == nil { - return fmt.Errorf("header is missing excessBlobGas") + return errors.New("header is missing excessBlobGas") } if header.ParentBeaconBlockRoot == nil { - return fmt.Errorf("header is missing parentBeaconBlockRoot") + return errors.New("header is missing parentBeaconBlockRoot") } return nil } diff --git a/core/chain_makers.go b/core/chain_makers.go index dd1aa555b3a..2eecd6af003 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -22,6 +22,7 @@ package core import ( "context" "encoding/binary" + "errors" "fmt" "math/big" @@ -407,7 +408,7 @@ func GenerateChain(config *chain.Config, parent *types.Block, engine consensus.E block := types.NewBlock(b.header, b.txs, b.uncles, b.receipts, nil /* withdrawals */, nil /*requests*/) return block, b.receipts, nil } - return nil, nil, fmt.Errorf("no engine to generate blocks") + return nil, nil, errors.New("no engine to generate blocks") } for i := 0; i < n; i++ { diff --git a/core/evm.go b/core/evm.go index 19a6a9af169..34dcd500316 100644 --- a/core/evm.go +++ b/core/evm.go @@ -20,7 +20,6 @@ package core import ( - "fmt" "math/big" "github.com/holiman/uint256" @@ -49,7 +48,7 @@ func NewEVMBlockContext(header *types.Header, blockHashFunc func(n uint64) libco if header.BaseFee != nil { overflow := baseFee.SetFromBig(header.BaseFee) if overflow { - panic(fmt.Errorf("header.BaseFee higher than 2^256-1")) + panic("header.BaseFee higher than 2^256-1") } } diff --git a/core/genesis_write.go b/core/genesis_write.go index 8df012674db..61d8f141e7b 100644 --- a/core/genesis_write.go +++ b/core/genesis_write.go @@ -24,6 +24,7 @@ import ( "crypto/ecdsa" "embed" "encoding/json" + "errors" "fmt" "math/big" "os" @@ -203,7 +204,7 @@ func WriteGenesisState(g *types.Genesis, tx kv.RwTx, tmpDir string, logger log.L stateWriter = state.NewNoopWriter() if block.Number().Sign() != 0 { - return nil, statedb, fmt.Errorf("can't commit genesis block with number > 0") + return nil, statedb, errors.New("can't commit genesis block with number > 0") } if err := statedb.CommitBlock(&chain.Rules{}, stateWriter); err != nil { return nil, statedb, fmt.Errorf("cannot write state: %w", err) diff --git a/core/snaptype/block_types.go b/core/snaptype/block_types.go index ad88a13a080..f20d1843547 100644 --- a/core/snaptype/block_types.go +++ b/core/snaptype/block_types.go @@ -257,7 +257,7 @@ var ( // TODO review this code, test pass with lhs+1 <= baseTxnID.U64()+ti for body.BaseTxnID.LastSystemTx(body.TxCount) < baseTxnID.U64()+ti { // skip empty blocks; ti here is not transaction index in one block, but total transaction index counter if !bodyGetter.HasNext() { - return fmt.Errorf("not enough bodies") + return errors.New("not enough bodies") } bodyBuf, _ = bodyGetter.Next(bodyBuf[:0]) diff --git a/core/state/intra_block_state.go b/core/state/intra_block_state.go index 5cec10b79f1..2b5cd6a8a18 100644 --- a/core/state/intra_block_state.go +++ b/core/state/intra_block_state.go @@ -21,6 +21,7 @@ package state import ( + "errors" "fmt" "sort" @@ -195,7 +196,7 @@ func (sdb *IntraBlockState) AddRefund(gas uint64) { func (sdb *IntraBlockState) SubRefund(gas uint64) { sdb.journal.append(refundChange{prev: sdb.refund}) if gas > sdb.refund { - sdb.setErrorUnsafe(fmt.Errorf("refund counter below zero")) + sdb.setErrorUnsafe(errors.New("refund counter below zero")) } sdb.refund -= gas } diff --git a/core/types/authorization.go b/core/types/authorization.go index 3cd545e59a4..93b1ce11062 100644 --- a/core/types/authorization.go +++ b/core/types/authorization.go @@ -100,7 +100,7 @@ func (ath *Authorization) RecoverSigner(data *bytes.Buffer, b []byte) (*libcommo } if !crypto.ValidateSignatureValues(sig[64], &ath.R, &ath.S, false) { - return nil, fmt.Errorf("invalid signature") + return nil, errors.New("invalid signature") } pubkey, err := crypto.Ecrecover(hash.Bytes(), sig[:]) diff --git a/core/types/blob_tx.go b/core/types/blob_tx.go index fad937dc37b..a2f4f78a660 100644 --- a/core/types/blob_tx.go +++ b/core/types/blob_tx.go @@ -82,7 +82,7 @@ func (stx *BlobTx) AsMessage(s Signer, baseFee *big.Int, rules *chain.Rules) (Me if baseFee != nil { overflow := msg.gasPrice.SetFromBig(baseFee) if overflow { - return msg, fmt.Errorf("gasPrice higher than 2^256-1") + return msg, errors.New("gasPrice higher than 2^256-1") } } msg.gasPrice.Add(&msg.gasPrice, stx.Tip) @@ -361,7 +361,7 @@ func (stx *BlobTx) DecodeRLP(s *rlp.Stream) error { return err } if len(stx.BlobVersionedHashes) == 0 { - return fmt.Errorf("a blob stx must contain at least one blob") + return errors.New("a blob stx must contain at least one blob") } // decode V if b, err = s.Uint256Bytes(); err != nil { diff --git a/core/types/blob_tx_wrapper.go b/core/types/blob_tx_wrapper.go index f55a5064e3c..0a914c065f1 100644 --- a/core/types/blob_tx_wrapper.go +++ b/core/types/blob_tx_wrapper.go @@ -17,6 +17,7 @@ package types import ( + "errors" "fmt" "io" "math/big" @@ -275,7 +276,7 @@ func (txw *BlobTxWrapper) ValidateBlobTransactionWrapper() error { blobTx := txw.Tx l1 := len(blobTx.BlobVersionedHashes) if l1 == 0 { - return fmt.Errorf("a blob txn must contain at least one blob") + return errors.New("a blob txn must contain at least one blob") } l2 := len(txw.Commitments) l3 := len(txw.Blobs) diff --git a/core/types/dynamic_fee_tx.go b/core/types/dynamic_fee_tx.go index 7cd85881e08..38b787bd7e6 100644 --- a/core/types/dynamic_fee_tx.go +++ b/core/types/dynamic_fee_tx.go @@ -358,7 +358,7 @@ func (tx *DynamicFeeTransaction) AsMessage(s Signer, baseFee *big.Int, rules *ch if baseFee != nil { overflow := msg.gasPrice.SetFromBig(baseFee) if overflow { - return msg, fmt.Errorf("gasPrice higher than 2^256-1") + return msg, errors.New("gasPrice higher than 2^256-1") } } msg.gasPrice.Add(&msg.gasPrice, tx.Tip) diff --git a/core/types/log_test.go b/core/types/log_test.go index 1ea5275c671..e90c9b693c0 100644 --- a/core/types/log_test.go +++ b/core/types/log_test.go @@ -21,7 +21,7 @@ package types import ( "encoding/json" - "fmt" + "errors" "reflect" "testing" @@ -104,7 +104,7 @@ var unmarshalLogTests = map[string]struct { }, "missing data": { input: `{"address":"0xecf8f87f810ecf450940c9f60066b4a7a501d6a7","blockHash":"0x656c34545f90a730a19008c0e7a7cd4fb3895064b48d6d69761bd5abad681056","blockNumber":"0x1ecfa4","timestamp":"0x57a53d3a","logIndex":"0x2","topics":["0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef","0x00000000000000000000000080b2c9d7cbbf30a1b0fc8983c647d754c6525615","0x000000000000000000000000f9dff387dcb5cc4cca5b91adb07a95f54e9f1bb6"],"transactionHash":"0x3b198bfd5d2907285af009e9ae84a0ecd63677110d89d7e030251acb87f6487e","transactionIndex":"0x3"}`, - wantError: fmt.Errorf("missing required field 'data' for Log"), + wantError: errors.New("missing required field 'data' for Log"), }, } diff --git a/core/types/receipt.go b/core/types/receipt.go index bfbfb0701a3..ed88a6df26e 100644 --- a/core/types/receipt.go +++ b/core/types/receipt.go @@ -456,7 +456,7 @@ func (r *Receipt) DeriveFieldsV3ForSingleReceipt(txnIdx int, blockHash libcommon sender, ok := txn.cachedSender() if !ok { - return fmt.Errorf("tx must have cached sender") + return errors.New("tx must have cached sender") } blockNumber := new(big.Int).SetUint64(blockNum) diff --git a/core/types/request.go b/core/types/request.go index c34aae02183..c8d7cfa6c22 100644 --- a/core/types/request.go +++ b/core/types/request.go @@ -42,7 +42,7 @@ type Request interface { func decode(data []byte) (Request, error) { if len(data) <= 1 { - return nil, fmt.Errorf("error: too short type request") + return nil, errors.New("error: too short type request") } var req Request switch data[0] { @@ -81,9 +81,9 @@ func (r *Requests) DecodeRLP(s *rlp.Stream) (err error) { } switch kind { case rlp.List: - return fmt.Errorf("error: untyped request (unexpected lit)") + return errors.New("error: untyped request (unexpected lit)") case rlp.Byte: - return fmt.Errorf("error: too short request") + return errors.New("error: too short request") default: var buf []byte if buf, err = s.Bytes(); err != nil { diff --git a/core/types/transaction.go b/core/types/transaction.go index 0b7a413d552..a5be3bef561 100644 --- a/core/types/transaction.go +++ b/core/types/transaction.go @@ -175,7 +175,7 @@ func DecodeTransaction(data []byte) (Transaction, error) { return nil, err } if s.Remaining() != 0 { - return nil, fmt.Errorf("trailing bytes after rlp encoded transaction") + return nil, errors.New("trailing bytes after rlp encoded transaction") } return tx, nil } @@ -211,7 +211,7 @@ func UnmarshalTransactionFromBinary(data []byte, blobTxnsAreWrappedWithBlobs boo return nil, err } if s.Remaining() != 0 { - return nil, fmt.Errorf("trailing bytes after rlp encoded transaction") + return nil, errors.New("trailing bytes after rlp encoded transaction") } return t, nil } diff --git a/core/types/transaction_marshalling.go b/core/types/transaction_marshalling.go index ccd4263ef3c..d95cca228e1 100644 --- a/core/types/transaction_marshalling.go +++ b/core/types/transaction_marshalling.go @@ -296,21 +296,21 @@ func (tx *LegacyTx) UnmarshalJSON(input []byte) error { } overflow = tx.V.SetFromBig(dec.V.ToInt()) if overflow { - return fmt.Errorf("dec.V higher than 2^256-1") + return errors.New("dec.V higher than 2^256-1") } if dec.R == nil { return errors.New("missing required field 'r' in transaction") } overflow = tx.R.SetFromBig(dec.R.ToInt()) if overflow { - return fmt.Errorf("dec.R higher than 2^256-1") + return errors.New("dec.R higher than 2^256-1") } if dec.S == nil { return errors.New("missing required field 's' in transaction") } overflow = tx.S.SetFromBig(dec.S.ToInt()) if overflow { - return fmt.Errorf("dec.S higher than 2^256-1") + return errors.New("dec.S higher than 2^256-1") } if overflow { return errors.New("'s' in transaction does not fit in 256 bits") @@ -375,21 +375,21 @@ func (tx *AccessListTx) UnmarshalJSON(input []byte) error { } overflow = tx.V.SetFromBig(dec.V.ToInt()) if overflow { - return fmt.Errorf("dec.V higher than 2^256-1") + return errors.New("dec.V higher than 2^256-1") } if dec.R == nil { return errors.New("missing required field 'r' in transaction") } overflow = tx.R.SetFromBig(dec.R.ToInt()) if overflow { - return fmt.Errorf("dec.R higher than 2^256-1") + return errors.New("dec.R higher than 2^256-1") } if dec.S == nil { return errors.New("missing required field 's' in transaction") } overflow = tx.S.SetFromBig(dec.S.ToInt()) if overflow { - return fmt.Errorf("dec.S higher than 2^256-1") + return errors.New("dec.S higher than 2^256-1") } withSignature := !tx.V.IsZero() || !tx.R.IsZero() || !tx.S.IsZero() if withSignature { @@ -451,21 +451,21 @@ func (tx *DynamicFeeTransaction) unmarshalJson(dec txJSON) error { } overflow = tx.V.SetFromBig(dec.V.ToInt()) if overflow { - return fmt.Errorf("dec.V higher than 2^256-1") + return errors.New("dec.V higher than 2^256-1") } if dec.R == nil { return errors.New("missing required field 'r' in transaction") } overflow = tx.R.SetFromBig(dec.R.ToInt()) if overflow { - return fmt.Errorf("dec.R higher than 2^256-1") + return errors.New("dec.R higher than 2^256-1") } if dec.S == nil { return errors.New("missing required field 's' in transaction") } overflow = tx.S.SetFromBig(dec.S.ToInt()) if overflow { - return fmt.Errorf("dec.S higher than 2^256-1") + return errors.New("dec.S higher than 2^256-1") } if overflow { return errors.New("'s' in transaction does not fit in 256 bits") @@ -581,21 +581,21 @@ func UnmarshalBlobTxJSON(input []byte) (Transaction, error) { } overflow = tx.V.SetFromBig(dec.V.ToInt()) if overflow { - return nil, fmt.Errorf("dec.V higher than 2^256-1") + return nil, errors.New("dec.V higher than 2^256-1") } if dec.R == nil { return nil, errors.New("missing required field 'r' in transaction") } overflow = tx.R.SetFromBig(dec.R.ToInt()) if overflow { - return nil, fmt.Errorf("dec.R higher than 2^256-1") + return nil, errors.New("dec.R higher than 2^256-1") } if dec.S == nil { return nil, errors.New("missing required field 's' in transaction") } overflow = tx.S.SetFromBig(dec.S.ToInt()) if overflow { - return nil, fmt.Errorf("dec.S higher than 2^256-1") + return nil, errors.New("dec.S higher than 2^256-1") } withSignature := !tx.V.IsZero() || !tx.R.IsZero() || !tx.S.IsZero() diff --git a/core/types/transaction_signing.go b/core/types/transaction_signing.go index 737f2086816..dadd362960f 100644 --- a/core/types/transaction_signing.go +++ b/core/types/transaction_signing.go @@ -45,7 +45,7 @@ func MakeSigner(config *chain.Config, blockNumber uint64, blockTime uint64) *Sig if config.ChainID != nil { overflow := chainId.SetFromBig(config.ChainID) if overflow { - panic(fmt.Errorf("chainID higher than 2^256-1")) + panic("chainID higher than 2^256-1") } } signer.unprotected = true @@ -108,7 +108,7 @@ func LatestSigner(config *chain.Config) *Signer { signer.unprotected = true chainId, overflow := uint256.FromBig(config.ChainID) if overflow { - panic(fmt.Errorf("chainID higher than 2^256-1")) + panic("chainID higher than 2^256-1") } signer.chainID.Set(chainId) signer.chainIDMul.Mul(chainId, u256.Num2) @@ -147,7 +147,7 @@ func LatestSignerForChainID(chainID *big.Int) *Signer { } chainId, overflow := uint256.FromBig(chainID) if overflow { - panic(fmt.Errorf("chainID higher than 2^256-1")) + panic("chainID higher than 2^256-1") } signer.chainID.Set(chainId) signer.chainIDMul.Mul(chainId, u256.Num2) diff --git a/core/vm/absint_cfg.go b/core/vm/absint_cfg.go index 43eb4681496..d2a4c3cb183 100644 --- a/core/vm/absint_cfg.go +++ b/core/vm/absint_cfg.go @@ -291,7 +291,7 @@ func (state *astate) String(abbrev bool) string { if len(values) > 1 { e = fmt.Sprintf("{%v}", strings.Join(elm, ",")) } else { - e = fmt.Sprintf("%v", strings.Join(elm, ",")) + e = strings.Join(elm, ",") } elms = append(elms, e) } diff --git a/core/vm/eips.go b/core/vm/eips.go index 4ed10b4cf0d..ea0903297c8 100644 --- a/core/vm/eips.go +++ b/core/vm/eips.go @@ -22,6 +22,7 @@ package vm import ( "fmt" "sort" + "strconv" "github.com/holiman/uint256" @@ -66,7 +67,7 @@ func ValidEip(eipNum int) bool { func ActivateableEips() []string { var nums []string //nolint:prealloc for k := range activators { - nums = append(nums, fmt.Sprintf("%d", k)) + nums = append(nums, strconv.Itoa(k)) } sort.Strings(nums) return nums diff --git a/core/vm/instructions.go b/core/vm/instructions.go index 247788fb0e2..87e889d642c 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -20,6 +20,7 @@ package vm import ( + "errors" "fmt" "math" @@ -518,7 +519,7 @@ func opDifficulty(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) var overflow bool v, overflow = uint256.FromBig(interpreter.evm.Context.Difficulty) if overflow { - return nil, fmt.Errorf("interpreter.evm.Context.Difficulty higher than 2^256-1") + return nil, errors.New("interpreter.evm.Context.Difficulty higher than 2^256-1") } } scope.Stack.Push(v) diff --git a/crypto/crypto.go b/crypto/crypto.go index a0e8ebc4e0e..01cd6c06ae4 100644 --- a/crypto/crypto.go +++ b/crypto/crypto.go @@ -157,11 +157,11 @@ func toECDSA(d []byte, strict bool) (*ecdsa.PrivateKey, error) { // The priv.D must < N if priv.D.Cmp(secp256k1NBig) >= 0 { - return nil, fmt.Errorf("invalid private key, >=N") + return nil, errors.New("invalid private key, >=N") } // The priv.D must not be zero or negative. if priv.D.Sign() <= 0 { - return nil, fmt.Errorf("invalid private key, zero or negative") + return nil, errors.New("invalid private key, zero or negative") } priv.PublicKey.X, priv.PublicKey.Y = priv.PublicKey.Curve.ScalarBaseMult(d) @@ -248,7 +248,7 @@ func LoadECDSA(file string) (*ecdsa.PrivateKey, error) { if err != nil { return nil, err } else if n != len(buf) { - return nil, fmt.Errorf("key file too short, want 64 hex characters") + return nil, errors.New("key file too short, want 64 hex characters") } if err := checkKeyFileEnd(r); err != nil { return nil, err diff --git a/crypto/ecies/ecies.go b/crypto/ecies/ecies.go index abb1c7ffa0e..40383f98eff 100644 --- a/crypto/ecies/ecies.go +++ b/crypto/ecies/ecies.go @@ -36,7 +36,7 @@ import ( "crypto/hmac" "crypto/subtle" "encoding/binary" - "fmt" + "errors" "hash" "io" "math/big" @@ -45,11 +45,11 @@ import ( ) var ( - ErrImport = fmt.Errorf("ecies: failed to import key") - ErrInvalidCurve = fmt.Errorf("ecies: invalid elliptic curve") - ErrInvalidPublicKey = fmt.Errorf("ecies: invalid public key") - ErrSharedKeyIsPointAtInfinity = fmt.Errorf("ecies: shared key is point at infinity") - ErrSharedKeyTooBig = fmt.Errorf("ecies: shared key params are too big") + ErrImport = errors.New("ecies: failed to import key") + ErrInvalidCurve = errors.New("ecies: invalid elliptic curve") + ErrInvalidPublicKey = errors.New("ecies: invalid public key") + ErrSharedKeyIsPointAtInfinity = errors.New("ecies: shared key is point at infinity") + ErrSharedKeyTooBig = errors.New("ecies: shared key params are too big") ) // PublicKey is a representation of an elliptic curve public key. @@ -140,8 +140,8 @@ func (prv *PrivateKey) GenerateShared(pub *PublicKey, skLen, macLen int) (sk []b } var ( - ErrSharedTooLong = fmt.Errorf("ecies: shared secret is too long") - ErrInvalidMessage = fmt.Errorf("ecies: invalid message") + ErrSharedTooLong = errors.New("ecies: shared secret is too long") + ErrInvalidMessage = errors.New("ecies: invalid message") ) // NIST SP 800-56 Concatenation Key Derivation Function (see section 5.8.1). diff --git a/crypto/ecies/ecies_test.go b/crypto/ecies/ecies_test.go index a0780f2a637..c907e6ccb68 100644 --- a/crypto/ecies/ecies_test.go +++ b/crypto/ecies/ecies_test.go @@ -35,8 +35,8 @@ import ( "crypto/rand" "crypto/sha256" "encoding/hex" + "errors" "flag" - "fmt" "math/big" "os" "testing" @@ -74,7 +74,7 @@ func TestKDF(t *testing.T) { } } -var ErrBadSharedKeys = fmt.Errorf("ecies: shared keys don't match") +var ErrBadSharedKeys = errors.New("ecies: shared keys don't match") // cmpParams compares a set of ECIES parameters. We assume, as per the // docs, that AES is the only supported symmetric encryption algorithm. diff --git a/crypto/ecies/params.go b/crypto/ecies/params.go index 756f5343be4..5e69fa5e6bb 100644 --- a/crypto/ecies/params.go +++ b/crypto/ecies/params.go @@ -39,6 +39,7 @@ import ( "crypto/elliptic" "crypto/sha256" "crypto/sha512" + "errors" "fmt" "hash" @@ -47,8 +48,8 @@ import ( var ( DefaultCurve = ethcrypto.S256() - ErrUnsupportedECDHAlgorithm = fmt.Errorf("ecies: unsupported ECDH algorithm") - ErrUnsupportedECIESParameters = fmt.Errorf("ecies: unsupported ECIES parameters") + ErrUnsupportedECDHAlgorithm = errors.New("ecies: unsupported ECDH algorithm") + ErrUnsupportedECIESParameters = errors.New("ecies: unsupported ECIES parameters") ErrInvalidKeyLen = fmt.Errorf("ecies: invalid key size (> %d) in ECIESParams", maxKeyLen) ) diff --git a/crypto/signature_cgo.go b/crypto/signature_cgo.go index f45d44d2e06..e818aeb3680 100644 --- a/crypto/signature_cgo.go +++ b/crypto/signature_cgo.go @@ -24,6 +24,7 @@ package crypto import ( "crypto/ecdsa" "crypto/elliptic" + "errors" "fmt" "github.com/erigontech/secp256k1" @@ -80,7 +81,7 @@ func VerifySignature(pubkey, digestHash, signature []byte) bool { func DecompressPubkey(pubkey []byte) (*ecdsa.PublicKey, error) { x, y := secp256k1.DecompressPubkey(pubkey) if x == nil { - return nil, fmt.Errorf("invalid public key") + return nil, errors.New("invalid public key") } return &ecdsa.PublicKey{X: x, Y: y, Curve: S256()}, nil } diff --git a/crypto/signature_nocgo.go b/crypto/signature_nocgo.go index 70a125463ee..aff2a209206 100644 --- a/crypto/signature_nocgo.go +++ b/crypto/signature_nocgo.go @@ -76,12 +76,12 @@ func Sign(hash []byte, prv *ecdsa.PrivateKey) ([]byte, error) { return nil, fmt.Errorf("hash is required to be exactly 32 bytes (%d)", len(hash)) } if prv.Curve != btcec.S256() { - return nil, fmt.Errorf("private key curve is not secp256k1") + return nil, errors.New("private key curve is not secp256k1") } // ecdsa.PrivateKey -> btcec.PrivateKey var priv btcec.PrivateKey if overflow := priv.Key.SetByteSlice(prv.D.Bytes()); overflow || priv.Key.IsZero() { - return nil, fmt.Errorf("invalid private key") + return nil, errors.New("invalid private key") } defer priv.Zero() sig, err := btc_ecdsa.SignCompact(&priv, hash, false) // ref uncompressed pubkey diff --git a/diagnostics/logs.go b/diagnostics/logs.go index 8cb090020e5..b2765ac5f04 100644 --- a/diagnostics/logs.go +++ b/diagnostics/logs.go @@ -119,7 +119,7 @@ func writeLogsRead(w http.ResponseWriter, r *http.Request, dirPath string) { } if fileInfo.IsDir() { - http.Error(w, fmt.Sprintf("%s is a directory, needs to be a file", file), http.StatusInternalServerError) + http.Error(w, file+" is a directory, needs to be a file", http.StatusInternalServerError) return } diff --git a/erigon-lib/commitment/bin_patricia_hashed.go b/erigon-lib/commitment/bin_patricia_hashed.go index 2057234acd7..f39d967452c 100644 --- a/erigon-lib/commitment/bin_patricia_hashed.go +++ b/erigon-lib/commitment/bin_patricia_hashed.go @@ -21,6 +21,7 @@ import ( "context" "encoding/binary" "encoding/hex" + "errors" "fmt" "io" "math/bits" @@ -294,7 +295,7 @@ func (cell *BinaryCell) deriveHashedKeys(depth int, keccak keccakState, accountK extraLen := 0 if cell.apl > 0 { if depth > halfKeySize { - return fmt.Errorf("deriveHashedKeys accountPlainKey present at depth > halfKeySize") + return errors.New("deriveHashedKeys accountPlainKey present at depth > halfKeySize") } extraLen = halfKeySize - depth } @@ -333,9 +334,9 @@ func (cell *BinaryCell) fillFromFields(data []byte, pos int, fieldBits PartFlags if fieldBits&HashedKeyPart != 0 { l, n := binary.Uvarint(data[pos:]) if n == 0 { - return 0, fmt.Errorf("fillFromFields buffer too small for hashedKey len") + return 0, errors.New("fillFromFields buffer too small for hashedKey len") } else if n < 0 { - return 0, fmt.Errorf("fillFromFields value overflow for hashedKey len") + return 0, errors.New("fillFromFields value overflow for hashedKey len") } pos += n if len(data) < pos+int(l) { @@ -355,13 +356,13 @@ func (cell *BinaryCell) fillFromFields(data []byte, pos int, fieldBits PartFlags if fieldBits&AccountPlainPart != 0 { l, n := binary.Uvarint(data[pos:]) if n == 0 { - return 0, fmt.Errorf("fillFromFields buffer too small for accountPlainKey len") + return 0, errors.New("fillFromFields buffer too small for accountPlainKey len") } else if n < 0 { - return 0, fmt.Errorf("fillFromFields value overflow for accountPlainKey len") + return 0, errors.New("fillFromFields value overflow for accountPlainKey len") } pos += n if len(data) < pos+int(l) { - return 0, fmt.Errorf("fillFromFields buffer too small for accountPlainKey") + return 0, errors.New("fillFromFields buffer too small for accountPlainKey") } cell.apl = int(l) if l > 0 { @@ -374,13 +375,13 @@ func (cell *BinaryCell) fillFromFields(data []byte, pos int, fieldBits PartFlags if fieldBits&StoragePlainPart != 0 { l, n := binary.Uvarint(data[pos:]) if n == 0 { - return 0, fmt.Errorf("fillFromFields buffer too small for storagePlainKey len") + return 0, errors.New("fillFromFields buffer too small for storagePlainKey len") } else if n < 0 { - return 0, fmt.Errorf("fillFromFields value overflow for storagePlainKey len") + return 0, errors.New("fillFromFields value overflow for storagePlainKey len") } pos += n if len(data) < pos+int(l) { - return 0, fmt.Errorf("fillFromFields buffer too small for storagePlainKey") + return 0, errors.New("fillFromFields buffer too small for storagePlainKey") } cell.spl = int(l) if l > 0 { @@ -393,13 +394,13 @@ func (cell *BinaryCell) fillFromFields(data []byte, pos int, fieldBits PartFlags if fieldBits&HashPart != 0 { l, n := binary.Uvarint(data[pos:]) if n == 0 { - return 0, fmt.Errorf("fillFromFields buffer too small for hash len") + return 0, errors.New("fillFromFields buffer too small for hash len") } else if n < 0 { - return 0, fmt.Errorf("fillFromFields value overflow for hash len") + return 0, errors.New("fillFromFields value overflow for hash len") } pos += n if len(data) < pos+int(l) { - return 0, fmt.Errorf("fillFromFields buffer too small for hash") + return 0, errors.New("fillFromFields buffer too small for hash") } cell.hl = int(l) if l > 0 { @@ -740,7 +741,7 @@ func (bph *BinPatriciaHashed) computeBinaryCellHash(cell *BinaryCell, depth int, return nil, err } } else { - return nil, fmt.Errorf("computeBinaryCellHash extension without hash") + return nil, errors.New("computeBinaryCellHash extension without hash") } } else if cell.hl > 0 { storageRootHash = cell.h @@ -768,7 +769,7 @@ func (bph *BinPatriciaHashed) computeBinaryCellHash(cell *BinaryCell, depth int, } buf = append(buf, hash[:]...) } else { - return nil, fmt.Errorf("computeBinaryCellHash extension without hash") + return nil, errors.New("computeBinaryCellHash extension without hash") } } else if cell.hl > 0 { buf = append(buf, cell.h[:cell.hl]...) @@ -997,7 +998,7 @@ func (bph *BinPatriciaHashed) needFolding(hashedKey []byte) bool { func (bph *BinPatriciaHashed) fold() (err error) { updateKeyLen := bph.currentKeyLen if bph.activeRows == 0 { - return fmt.Errorf("cannot fold - no active rows") + return errors.New("cannot fold - no active rows") } if bph.trace { fmt.Printf("fold: activeRows: %d, currentKey: [%x], touchMap: %016b, afterMap: %016b\n", bph.activeRows, bph.currentKey[:bph.currentKeyLen], bph.touchMap[bph.activeRows-1], bph.afterMap[bph.activeRows-1]) @@ -1433,7 +1434,7 @@ func (c *BinaryCell) bytes() []byte { func (c *BinaryCell) decodeBytes(buf []byte) error { if len(buf) < 1 { - return fmt.Errorf("invalid buffer size to contain BinaryCell (at least 1 byte expected)") + return errors.New("invalid buffer size to contain BinaryCell (at least 1 byte expected)") } c.fillEmpty() @@ -1497,7 +1498,7 @@ func (bph *BinPatriciaHashed) EncodeCurrentState(buf []byte) ([]byte, error) { // buf expected to be encoded hph state. Decode state and set up hph to that state. func (bph *BinPatriciaHashed) SetState(buf []byte) error { if bph.activeRows != 0 { - return fmt.Errorf("has active rows, could not reset state") + return errors.New("has active rows, could not reset state") } var s state diff --git a/erigon-lib/commitment/commitment.go b/erigon-lib/commitment/commitment.go index 57aa6039828..dd79402ccc2 100644 --- a/erigon-lib/commitment/commitment.go +++ b/erigon-lib/commitment/commitment.go @@ -20,6 +20,7 @@ import ( "bytes" "context" "encoding/binary" + "errors" "fmt" "math/bits" "strings" @@ -261,14 +262,14 @@ func (be *BranchEncoder) EncodeBranch(bitmap, touchMap, afterMap uint16, readCel return err } if n != wn { - return fmt.Errorf("n != wn size") + return errors.New("n != wn size") } wn, err = be.buf.Write(val) if err != nil { return err } if len(val) != wn { - return fmt.Errorf("wn != value size") + return errors.New("wn != value size") } return nil } @@ -357,14 +358,14 @@ func (branchData BranchData) ReplacePlainKeys(newData []byte, fn func(key []byte if fieldBits&HashedKeyPart != 0 { l, n := binary.Uvarint(branchData[pos:]) if n == 0 { - return nil, fmt.Errorf("replacePlainKeys buffer too small for hashedKey len") + return nil, errors.New("replacePlainKeys buffer too small for hashedKey len") } else if n < 0 { - return nil, fmt.Errorf("replacePlainKeys value overflow for hashedKey len") + return nil, errors.New("replacePlainKeys value overflow for hashedKey len") } newData = append(newData, branchData[pos:pos+n]...) pos += n if len(branchData) < pos+int(l) { - return nil, fmt.Errorf("replacePlainKeys buffer too small for hashedKey") + return nil, errors.New("replacePlainKeys buffer too small for hashedKey") } if l > 0 { newData = append(newData, branchData[pos:pos+int(l)]...) @@ -374,13 +375,13 @@ func (branchData BranchData) ReplacePlainKeys(newData []byte, fn func(key []byte if fieldBits&AccountPlainPart != 0 { l, n := binary.Uvarint(branchData[pos:]) if n == 0 { - return nil, fmt.Errorf("replacePlainKeys buffer too small for accountPlainKey len") + return nil, errors.New("replacePlainKeys buffer too small for accountPlainKey len") } else if n < 0 { - return nil, fmt.Errorf("replacePlainKeys value overflow for accountPlainKey len") + return nil, errors.New("replacePlainKeys value overflow for accountPlainKey len") } pos += n if len(branchData) < pos+int(l) { - return nil, fmt.Errorf("replacePlainKeys buffer too small for accountPlainKey") + return nil, errors.New("replacePlainKeys buffer too small for accountPlainKey") } if l > 0 { pos += int(l) @@ -407,13 +408,13 @@ func (branchData BranchData) ReplacePlainKeys(newData []byte, fn func(key []byte if fieldBits&StoragePlainPart != 0 { l, n := binary.Uvarint(branchData[pos:]) if n == 0 { - return nil, fmt.Errorf("replacePlainKeys buffer too small for storagePlainKey len") + return nil, errors.New("replacePlainKeys buffer too small for storagePlainKey len") } else if n < 0 { - return nil, fmt.Errorf("replacePlainKeys value overflow for storagePlainKey len") + return nil, errors.New("replacePlainKeys value overflow for storagePlainKey len") } pos += n if len(branchData) < pos+int(l) { - return nil, fmt.Errorf("replacePlainKeys buffer too small for storagePlainKey") + return nil, errors.New("replacePlainKeys buffer too small for storagePlainKey") } if l > 0 { pos += int(l) @@ -440,14 +441,14 @@ func (branchData BranchData) ReplacePlainKeys(newData []byte, fn func(key []byte if fieldBits&HashPart != 0 { l, n := binary.Uvarint(branchData[pos:]) if n == 0 { - return nil, fmt.Errorf("replacePlainKeys buffer too small for hash len") + return nil, errors.New("replacePlainKeys buffer too small for hash len") } else if n < 0 { - return nil, fmt.Errorf("replacePlainKeys value overflow for hash len") + return nil, errors.New("replacePlainKeys value overflow for hash len") } newData = append(newData, branchData[pos:pos+n]...) pos += n if len(branchData) < pos+int(l) { - return nil, fmt.Errorf("replacePlainKeys buffer too small for hash") + return nil, errors.New("replacePlainKeys buffer too small for hash") } if l > 0 { newData = append(newData, branchData[pos:pos+int(l)]...) @@ -501,14 +502,14 @@ func (branchData BranchData) MergeHexBranches(branchData2 BranchData, newData [] for i := 0; i < bits.OnesCount8(byte(fieldBits)); i++ { l, n := binary.Uvarint(branchData2[pos2:]) if n == 0 { - return nil, fmt.Errorf("MergeHexBranches buffer2 too small for field") + return nil, errors.New("MergeHexBranches buffer2 too small for field") } else if n < 0 { - return nil, fmt.Errorf("MergeHexBranches value2 overflow for field") + return nil, errors.New("MergeHexBranches value2 overflow for field") } newData = append(newData, branchData2[pos2:pos2+n]...) pos2 += n if len(branchData2) < pos2+int(l) { - return nil, fmt.Errorf("MergeHexBranches buffer2 too small for field") + return nil, errors.New("MergeHexBranches buffer2 too small for field") } if l > 0 { newData = append(newData, branchData2[pos2:pos2+int(l)]...) @@ -526,16 +527,16 @@ func (branchData BranchData) MergeHexBranches(branchData2 BranchData, newData [] for i := 0; i < bits.OnesCount8(byte(fieldBits)); i++ { l, n := binary.Uvarint(branchData[pos1:]) if n == 0 { - return nil, fmt.Errorf("MergeHexBranches buffer1 too small for field") + return nil, errors.New("MergeHexBranches buffer1 too small for field") } else if n < 0 { - return nil, fmt.Errorf("MergeHexBranches value1 overflow for field") + return nil, errors.New("MergeHexBranches value1 overflow for field") } if add { newData = append(newData, branchData[pos1:pos1+n]...) } pos1 += n if len(branchData) < pos1+int(l) { - return nil, fmt.Errorf("MergeHexBranches buffer1 too small for field") + return nil, errors.New("MergeHexBranches buffer1 too small for field") } if l > 0 { if add { @@ -562,7 +563,7 @@ func (branchData BranchData) DecodeCells() (touchMap, afterMap uint16, row [16]* pos++ row[nibble] = new(Cell) if pos, err = row[nibble].fillFromFields(branchData, pos, fieldBits); err != nil { - err = fmt.Errorf("faield to fill cell at nibble %x: %w", nibble, err) + err = fmt.Errorf("failed to fill cell at nibble %x: %w", nibble, err) return } } @@ -616,9 +617,9 @@ func (m *BranchMerger) Merge(branch1 BranchData, branch2 BranchData) (BranchData for i := 0; i < bits.OnesCount8(byte(fieldBits)); i++ { l, n := binary.Uvarint(branch2[pos2:]) if n == 0 { - return nil, fmt.Errorf("MergeHexBranches branch2 is too small: expected node info size") + return nil, errors.New("MergeHexBranches branch2 is too small: expected node info size") } else if n < 0 { - return nil, fmt.Errorf("MergeHexBranches branch2: size overflow for length") + return nil, errors.New("MergeHexBranches branch2: size overflow for length") } m.buf = append(m.buf, branch2[pos2:pos2+n]...) @@ -644,9 +645,9 @@ func (m *BranchMerger) Merge(branch1 BranchData, branch2 BranchData) (BranchData for i := 0; i < bits.OnesCount8(byte(fieldBits)); i++ { l, n := binary.Uvarint(branch1[pos1:]) if n == 0 { - return nil, fmt.Errorf("MergeHexBranches branch1 is too small: expected node info size") + return nil, errors.New("MergeHexBranches branch1 is too small: expected node info size") } else if n < 0 { - return nil, fmt.Errorf("MergeHexBranches branch1: size overflow for length") + return nil, errors.New("MergeHexBranches branch1: size overflow for length") } if add { diff --git a/erigon-lib/commitment/hex_patricia_hashed.go b/erigon-lib/commitment/hex_patricia_hashed.go index 5f29890e0e8..dfaee4c5b31 100644 --- a/erigon-lib/commitment/hex_patricia_hashed.go +++ b/erigon-lib/commitment/hex_patricia_hashed.go @@ -21,6 +21,7 @@ import ( "context" "encoding/binary" "encoding/hex" + "errors" "fmt" "hash" "io" @@ -261,7 +262,7 @@ func (cell *Cell) deriveHashedKeys(depth int, keccak keccakState, accountKeyLen extraLen := 0 if cell.accountPlainKeyLen > 0 { if depth > 64 { - return fmt.Errorf("deriveHashedKeys accountPlainKey present at depth > 64") + return errors.New("deriveHashedKeys accountPlainKey present at depth > 64") } extraLen = 64 - depth } @@ -303,9 +304,9 @@ func (cell *Cell) fillFromFields(data []byte, pos int, fieldBits PartFlags) (int if fieldBits&HashedKeyPart != 0 { l, n := binary.Uvarint(data[pos:]) if n == 0 { - return 0, fmt.Errorf("fillFromFields buffer too small for hashedKey len") + return 0, errors.New("fillFromFields buffer too small for hashedKey len") } else if n < 0 { - return 0, fmt.Errorf("fillFromFields value overflow for hashedKey len") + return 0, errors.New("fillFromFields value overflow for hashedKey len") } pos += n if len(data) < pos+int(l) { @@ -325,13 +326,13 @@ func (cell *Cell) fillFromFields(data []byte, pos int, fieldBits PartFlags) (int if fieldBits&AccountPlainPart != 0 { l, n := binary.Uvarint(data[pos:]) if n == 0 { - return 0, fmt.Errorf("fillFromFields buffer too small for accountPlainKey len") + return 0, errors.New("fillFromFields buffer too small for accountPlainKey len") } else if n < 0 { - return 0, fmt.Errorf("fillFromFields value overflow for accountPlainKey len") + return 0, errors.New("fillFromFields value overflow for accountPlainKey len") } pos += n if len(data) < pos+int(l) { - return 0, fmt.Errorf("fillFromFields buffer too small for accountPlainKey") + return 0, errors.New("fillFromFields buffer too small for accountPlainKey") } cell.accountPlainKeyLen = int(l) if l > 0 { @@ -344,13 +345,13 @@ func (cell *Cell) fillFromFields(data []byte, pos int, fieldBits PartFlags) (int if fieldBits&StoragePlainPart != 0 { l, n := binary.Uvarint(data[pos:]) if n == 0 { - return 0, fmt.Errorf("fillFromFields buffer too small for storagePlainKey len") + return 0, errors.New("fillFromFields buffer too small for storagePlainKey len") } else if n < 0 { - return 0, fmt.Errorf("fillFromFields value overflow for storagePlainKey len") + return 0, errors.New("fillFromFields value overflow for storagePlainKey len") } pos += n if len(data) < pos+int(l) { - return 0, fmt.Errorf("fillFromFields buffer too small for storagePlainKey") + return 0, errors.New("fillFromFields buffer too small for storagePlainKey") } cell.storagePlainKeyLen = int(l) if l > 0 { @@ -363,13 +364,13 @@ func (cell *Cell) fillFromFields(data []byte, pos int, fieldBits PartFlags) (int if fieldBits&HashPart != 0 { l, n := binary.Uvarint(data[pos:]) if n == 0 { - return 0, fmt.Errorf("fillFromFields buffer too small for hash len") + return 0, errors.New("fillFromFields buffer too small for hash len") } else if n < 0 { - return 0, fmt.Errorf("fillFromFields value overflow for hash len") + return 0, errors.New("fillFromFields value overflow for hash len") } pos += n if len(data) < pos+int(l) { - return 0, fmt.Errorf("fillFromFields buffer too small for hash") + return 0, errors.New("fillFromFields buffer too small for hash") } cell.HashLen = int(l) if l > 0 { @@ -719,7 +720,7 @@ func (hph *HexPatriciaHashed) computeCellHash(cell *Cell, depth int, buf []byte) return nil, err } } else { - return nil, fmt.Errorf("computeCellHash extension without hash") + return nil, errors.New("computeCellHash extension without hash") } } else if cell.HashLen > 0 { storageRootHash = cell.hash @@ -747,7 +748,7 @@ func (hph *HexPatriciaHashed) computeCellHash(cell *Cell, depth int, buf []byte) } buf = append(buf, hash[:]...) } else { - return nil, fmt.Errorf("computeCellHash extension without hash") + return nil, errors.New("computeCellHash extension without hash") } } else if cell.HashLen > 0 { buf = append(buf, cell.hash[:cell.HashLen]...) @@ -991,7 +992,7 @@ func (hph *HexPatriciaHashed) needFolding(hashedKey []byte) bool { func (hph *HexPatriciaHashed) fold() (err error) { updateKeyLen := hph.currentKeyLen if hph.activeRows == 0 { - return fmt.Errorf("cannot fold - no active rows") + return errors.New("cannot fold - no active rows") } if hph.trace { fmt.Printf("fold: activeRows: %d, currentKey: [%x], touchMap: %016b, afterMap: %016b\n", hph.activeRows, hph.currentKey[:hph.currentKeyLen], hph.touchMap[hph.activeRows-1], hph.afterMap[hph.activeRows-1]) @@ -1779,7 +1780,7 @@ const ( func (cell *Cell) Decode(buf []byte) error { if len(buf) < 1 { - return fmt.Errorf("invalid buffer size to contain Cell (at least 1 byte expected)") + return errors.New("invalid buffer size to contain Cell (at least 1 byte expected)") } cell.reset() @@ -1864,7 +1865,7 @@ func (hph *HexPatriciaHashed) SetState(buf []byte) error { return nil } if hph.activeRows != 0 { - return fmt.Errorf("target trie has active rows, could not reset state before fold") + return errors.New("target trie has active rows, could not reset state before fold") } var s state @@ -2188,18 +2189,18 @@ func (u *Update) Encode(buf []byte, numBuf []byte) []byte { func (u *Update) Decode(buf []byte, pos int) (int, error) { if len(buf) < pos+1 { - return 0, fmt.Errorf("decode Update: buffer too small for flags") + return 0, errors.New("decode Update: buffer too small for flags") } u.Flags = UpdateFlags(buf[pos]) pos++ if u.Flags&BalanceUpdate != 0 { if len(buf) < pos+1 { - return 0, fmt.Errorf("decode Update: buffer too small for balance len") + return 0, errors.New("decode Update: buffer too small for balance len") } balanceLen := int(buf[pos]) pos++ if len(buf) < pos+balanceLen { - return 0, fmt.Errorf("decode Update: buffer too small for balance") + return 0, errors.New("decode Update: buffer too small for balance") } u.Balance.SetBytes(buf[pos : pos+balanceLen]) pos += balanceLen @@ -2208,16 +2209,16 @@ func (u *Update) Decode(buf []byte, pos int) (int, error) { var n int u.Nonce, n = binary.Uvarint(buf[pos:]) if n == 0 { - return 0, fmt.Errorf("decode Update: buffer too small for nonce") + return 0, errors.New("decode Update: buffer too small for nonce") } if n < 0 { - return 0, fmt.Errorf("decode Update: nonce overflow") + return 0, errors.New("decode Update: nonce overflow") } pos += n } if u.Flags&CodeUpdate != 0 { if len(buf) < pos+length.Hash { - return 0, fmt.Errorf("decode Update: buffer too small for codeHash") + return 0, errors.New("decode Update: buffer too small for codeHash") } copy(u.CodeHashOrStorage[:], buf[pos:pos+32]) pos += length.Hash @@ -2226,14 +2227,14 @@ func (u *Update) Decode(buf []byte, pos int) (int, error) { if u.Flags&StorageUpdate != 0 { l, n := binary.Uvarint(buf[pos:]) if n == 0 { - return 0, fmt.Errorf("decode Update: buffer too small for storage len") + return 0, errors.New("decode Update: buffer too small for storage len") } if n < 0 { - return 0, fmt.Errorf("decode Update: storage pos overflow") + return 0, errors.New("decode Update: storage pos overflow") } pos += n if len(buf) < pos+int(l) { - return 0, fmt.Errorf("decode Update: buffer too small for storage") + return 0, errors.New("decode Update: buffer too small for storage") } u.ValLength = int(l) copy(u.CodeHashOrStorage[:], buf[pos:pos+int(l)]) diff --git a/erigon-lib/commitment/patricia_state_mock_test.go b/erigon-lib/commitment/patricia_state_mock_test.go index 7004a6ffcb4..62af90a955f 100644 --- a/erigon-lib/commitment/patricia_state_mock_test.go +++ b/erigon-lib/commitment/patricia_state_mock_test.go @@ -19,6 +19,7 @@ package commitment import ( "encoding/binary" "encoding/hex" + "errors" "fmt" "slices" "testing" @@ -84,7 +85,7 @@ func (ms *MockState) GetAccount(plainKey []byte, cell *Cell) error { } if ex.Flags&StorageUpdate != 0 { ms.t.Logf("GetAccount reading storage item for key [%x]", plainKey) - return fmt.Errorf("storage read by GetAccount") + return errors.New("storage read by GetAccount") } if ex.Flags&DeleteUpdate != 0 { ms.t.Fatalf("GetAccount reading deleted account for key [%x]", plainKey) diff --git a/erigon-lib/downloader/downloader.go b/erigon-lib/downloader/downloader.go index 1bd27e1126c..657113d4d73 100644 --- a/erigon-lib/downloader/downloader.go +++ b/erigon-lib/downloader/downloader.go @@ -943,7 +943,7 @@ func (d *Downloader) mainLoop(silent bool) error { } else { downloadComplete <- downloadStatus{ name: fileInfo.Name(), - err: fmt.Errorf("hash check failed"), + err: errors.New("hash check failed"), } d.logger.Warn("[snapshots] Torrent hash does not match file", "file", fileInfo.Name(), "torrent-hash", infoHash, "file-hash", hex.EncodeToString(fileHashBytes)) @@ -1609,7 +1609,7 @@ func (d *Downloader) torrentDownload(t *torrent.Torrent, statusChan chan downloa func (d *Downloader) webDownload(peerUrls []*url.URL, t *torrent.Torrent, i *webDownloadInfo, statusChan chan downloadStatus) (*RCloneSession, error) { if d.webDownloadClient == nil { - return nil, fmt.Errorf("webdownload client not enabled") + return nil, errors.New("webdownload client not enabled") } peerUrl, err := selectDownloadPeer(d.ctx, peerUrls, t) @@ -1743,7 +1743,7 @@ func (d *Downloader) webDownload(peerUrls []*url.URL, t *torrent.Torrent, i *web func selectDownloadPeer(ctx context.Context, peerUrls []*url.URL, t *torrent.Torrent) (string, error) { switch len(peerUrls) { case 0: - return "", fmt.Errorf("no download peers") + return "", errors.New("no download peers") case 1: downloadUrl := peerUrls[0].JoinPath(t.Name()) @@ -1775,7 +1775,7 @@ func selectDownloadPeer(ctx context.Context, peerUrls []*url.URL, t *torrent.Tor } } - return "", fmt.Errorf("can't find download peer") + return "", errors.New("can't find download peer") } func availableTorrents(ctx context.Context, pending []*torrent.Torrent, downloading map[string]*downloadInfo, fileSlots int, pieceSlots int) []*torrent.Torrent { diff --git a/erigon-lib/downloader/downloader_grpc_server.go b/erigon-lib/downloader/downloader_grpc_server.go index 5da11358650..b48497f7806 100644 --- a/erigon-lib/downloader/downloader_grpc_server.go +++ b/erigon-lib/downloader/downloader_grpc_server.go @@ -18,6 +18,7 @@ package downloader import ( "context" + "errors" "fmt" "os" "path/filepath" @@ -60,7 +61,7 @@ func (s *GrpcServer) Add(ctx context.Context, request *proto_downloader.AddReque for i, it := range request.Items { if it.Path == "" { - return nil, fmt.Errorf("field 'path' is required") + return nil, errors.New("field 'path' is required") } select { @@ -91,7 +92,7 @@ func (s *GrpcServer) Delete(ctx context.Context, request *proto_downloader.Delet torrents := s.d.torrentClient.Torrents() for _, name := range request.Paths { if name == "" { - return nil, fmt.Errorf("field 'path' is required") + return nil, errors.New("field 'path' is required") } for _, t := range torrents { select { diff --git a/erigon-lib/downloader/rclone.go b/erigon-lib/downloader/rclone.go index 2957ba4f85b..caa09f7e9b7 100644 --- a/erigon-lib/downloader/rclone.go +++ b/erigon-lib/downloader/rclone.go @@ -110,7 +110,7 @@ func (c *RCloneClient) start(logger log.Logger) error { rclone, _ := exec.LookPath("rclone") if len(rclone) == 0 { - return fmt.Errorf("rclone not found in PATH") + return errors.New("rclone not found in PATH") } logger.Info("[downloader] rclone found in PATH: enhanced upload/download enabled") @@ -687,7 +687,7 @@ var ErrAccessDenied = errors.New("access denied") func (c *RCloneSession) ReadRemoteDir(ctx context.Context, refresh bool) ([]fs.DirEntry, error) { if len(c.remoteFs) == 0 { - return nil, fmt.Errorf("remote fs undefined") + return nil, errors.New("remote fs undefined") } c.oplock.Lock() @@ -871,7 +871,7 @@ func (c *RCloneSession) syncFiles(ctx context.Context) { if syncQueue != nil { syncQueue <- request } else { - request.cerr <- fmt.Errorf("no sync queue available") + request.cerr <- errors.New("no sync queue available") } } diff --git a/erigon-lib/downloader/snaptype/files.go b/erigon-lib/downloader/snaptype/files.go index 085a4c13428..4a0c769b086 100644 --- a/erigon-lib/downloader/snaptype/files.go +++ b/erigon-lib/downloader/snaptype/files.go @@ -34,7 +34,7 @@ import ( ) var ( - ErrInvalidFileName = fmt.Errorf("invalid compressed file name") + ErrInvalidFileName = errors.New("invalid compressed file name") ) func FileName(version Version, from, to uint64, fileType string) string { diff --git a/erigon-lib/downloader/snaptype/type.go b/erigon-lib/downloader/snaptype/type.go index f163674e4eb..e4f2fc69aa8 100644 --- a/erigon-lib/downloader/snaptype/type.go +++ b/erigon-lib/downloader/snaptype/type.go @@ -52,7 +52,7 @@ func ParseVersion(v string) (Version, error) { } if len(v) == 0 { - return 0, fmt.Errorf("invalid version: no prefix") + return 0, errors.New("invalid version: no prefix") } return 0, fmt.Errorf("invalid version prefix: %s", v[0:1]) diff --git a/erigon-lib/downloader/util.go b/erigon-lib/downloader/util.go index abde27acfab..5269f33972b 100644 --- a/erigon-lib/downloader/util.go +++ b/erigon-lib/downloader/util.go @@ -21,6 +21,7 @@ import ( "context" "crypto/sha1" "encoding/json" + "errors" "fmt" "io" "os" @@ -494,7 +495,7 @@ func ScheduleVerifyFile(ctx context.Context, t *torrent.Torrent, completePieces if change.Err != nil { err = change.Err } else { - err = fmt.Errorf("unexpected piece change error") + err = errors.New("unexpected piece change error") } cancel() diff --git a/erigon-lib/downloader/webseed.go b/erigon-lib/downloader/webseed.go index 0eadbe5105e..1863d42178a 100644 --- a/erigon-lib/downloader/webseed.go +++ b/erigon-lib/downloader/webseed.go @@ -445,8 +445,8 @@ func (d *WebSeeds) ByFileName(name string) (metainfo.UrlList, bool) { return v, ok } -var ErrInvalidEtag = fmt.Errorf("invalid etag") -var ErrEtagNotFound = fmt.Errorf("not found") +var ErrInvalidEtag = errors.New("invalid etag") +var ErrEtagNotFound = errors.New("not found") func (d *WebSeeds) retrieveFileEtag(ctx context.Context, file *url.URL) (string, error) { request, err := http.NewRequestWithContext(ctx, http.MethodHead, file.String(), nil) diff --git a/erigon-lib/etl/etl.go b/erigon-lib/etl/etl.go index 2ecd0fc20ac..366d09b88d0 100644 --- a/erigon-lib/etl/etl.go +++ b/erigon-lib/etl/etl.go @@ -18,6 +18,7 @@ package etl import ( "bytes" + "errors" "fmt" "reflect" "time" @@ -40,7 +41,7 @@ type ExtractFunc func(k []byte, v []byte, next ExtractNextFunc) error // for [0x01, 0x01, 0x01] it will generate [0x01, 0x01, 0x02], etc func NextKey(key []byte) ([]byte, error) { if len(key) == 0 { - return key, fmt.Errorf("could not apply NextKey for the empty key") + return key, errors.New("could not apply NextKey for the empty key") } nextKey := common.Copy(key) for i := len(key) - 1; i >= 0; i-- { @@ -53,7 +54,7 @@ func NextKey(key []byte) ([]byte, error) { nextKey[i] = 0 } } - return key, fmt.Errorf("overflow while applying NextKey") + return key, errors.New("overflow while applying NextKey") } // LoadCommitHandler is a callback called each time a new batch is being diff --git a/erigon-lib/kv/helpers.go b/erigon-lib/kv/helpers.go index 3e6c6361ceb..e73c7f6b18b 100644 --- a/erigon-lib/kv/helpers.go +++ b/erigon-lib/kv/helpers.go @@ -19,7 +19,7 @@ package kv import ( "context" "encoding/binary" - "fmt" + "errors" "os" "sync" "sync/atomic" @@ -106,7 +106,7 @@ func bytes2bool(in []byte) bool { return in[0] == 1 } -var ErrChanged = fmt.Errorf("key must not change") +var ErrChanged = errors.New("key must not change") // EnsureNotChangedBool - used to store immutable config flags in db. protects from human mistakes func EnsureNotChangedBool(tx GetPut, bucket string, k []byte, value bool) (ok, enabled bool, err error) { diff --git a/erigon-lib/kv/mdbx/kv_mdbx.go b/erigon-lib/kv/mdbx/kv_mdbx.go index 7aed4bfc5b4..153d29fcb82 100644 --- a/erigon-lib/kv/mdbx/kv_mdbx.go +++ b/erigon-lib/kv/mdbx/kv_mdbx.go @@ -230,7 +230,7 @@ func PathDbMap() map[string]kv.RoDB { return maps.Clone(pathDbMap) } -var ErrDBDoesNotExists = fmt.Errorf("can't create database - because opening in `Accede` mode. probably another (main) process can create it") +var ErrDBDoesNotExists = errors.New("can't create database - because opening in `Accede` mode. probably another (main) process can create it") func (opts MdbxOpts) Open(ctx context.Context) (kv.RwDB, error) { opts = opts.WriteMap(dbg.WriteMap()) @@ -759,7 +759,7 @@ func (db *MdbxKV) BeginRo(ctx context.Context) (txn kv.Tx, err error) { } if !db.trackTxBegin() { - return nil, fmt.Errorf("db closed") + return nil, errors.New("db closed") } // will return nil err if context is cancelled (may appear to acquire the semaphore) @@ -806,7 +806,7 @@ func (db *MdbxKV) beginRw(ctx context.Context, flags uint) (txn kv.RwTx, err err } if !db.trackTxBegin() { - return nil, fmt.Errorf("db closed") + return nil, errors.New("db closed") } runtime.LockOSThread() @@ -1007,7 +1007,7 @@ func (tx *MdbxTx) CreateBucket(name string) error { flags ^= kv.DupSort } if flags != 0 { - return fmt.Errorf("some not supported flag provided for bucket") + return errors.New("some not supported flag provided for bucket") } dbi, err = tx.tx.OpenDBI(name, nativeFlags, nil, nil) diff --git a/erigon-lib/kv/membatchwithdb/memory_mutation_cursor.go b/erigon-lib/kv/membatchwithdb/memory_mutation_cursor.go index d75e165ffb2..ea33e49b175 100644 --- a/erigon-lib/kv/membatchwithdb/memory_mutation_cursor.go +++ b/erigon-lib/kv/membatchwithdb/memory_mutation_cursor.go @@ -18,7 +18,7 @@ package membatchwithdb import ( "bytes" - "fmt" + "errors" "github.com/erigontech/erigon-lib/common" "github.com/erigontech/erigon-lib/kv" @@ -107,7 +107,7 @@ func (m *memoryMutationCursor) getNextOnDb(t NextType) (key []byte, value []byte return } default: - err = fmt.Errorf("invalid next type") + err = errors.New("invalid next type") return } @@ -129,7 +129,7 @@ func (m *memoryMutationCursor) getNextOnDb(t NextType) (key []byte, value []byte return } default: - err = fmt.Errorf("invalid next type") + err = errors.New("invalid next type") return } } diff --git a/erigon-lib/kv/remotedb/kv_remote.go b/erigon-lib/kv/remotedb/kv_remote.go index 86490b25b93..ab511ec51bc 100644 --- a/erigon-lib/kv/remotedb/kv_remote.go +++ b/erigon-lib/kv/remotedb/kv_remote.go @@ -19,6 +19,7 @@ package remotedb import ( "bytes" "context" + "errors" "fmt" "runtime" "unsafe" @@ -191,16 +192,16 @@ func (db *DB) BeginTemporalRo(ctx context.Context) (kv.TemporalTx, error) { return t.(kv.TemporalTx), nil } func (db *DB) BeginRw(ctx context.Context) (kv.RwTx, error) { - return nil, fmt.Errorf("remote db provider doesn't support .BeginRw method") + return nil, errors.New("remote db provider doesn't support .BeginRw method") } func (db *DB) BeginRwNosync(ctx context.Context) (kv.RwTx, error) { - return nil, fmt.Errorf("remote db provider doesn't support .BeginRw method") + return nil, errors.New("remote db provider doesn't support .BeginRw method") } func (db *DB) BeginTemporalRw(ctx context.Context) (kv.RwTx, error) { - return nil, fmt.Errorf("remote db provider doesn't support .BeginTemporalRw method") + return nil, errors.New("remote db provider doesn't support .BeginTemporalRw method") } func (db *DB) BeginTemporalRwNosync(ctx context.Context) (kv.RwTx, error) { - return nil, fmt.Errorf("remote db provider doesn't support .BeginTemporalRwNosync method") + return nil, errors.New("remote db provider doesn't support .BeginTemporalRwNosync method") } func (db *DB) View(ctx context.Context, f func(tx kv.Tx) error) (err error) { @@ -221,10 +222,10 @@ func (db *DB) ViewTemporal(ctx context.Context, f func(tx kv.TemporalTx) error) } func (db *DB) Update(ctx context.Context, f func(tx kv.RwTx) error) (err error) { - return fmt.Errorf("remote db provider doesn't support .Update method") + return errors.New("remote db provider doesn't support .Update method") } func (db *DB) UpdateNosync(ctx context.Context, f func(tx kv.RwTx) error) (err error) { - return fmt.Errorf("remote db provider doesn't support .UpdateNosync method") + return errors.New("remote db provider doesn't support .UpdateNosync method") } func (tx *tx) ViewID() uint64 { return tx.viewID } @@ -359,7 +360,7 @@ func (tx *tx) Cursor(bucket string) (kv.Cursor, error) { } func (tx *tx) ListBuckets() ([]string, error) { - return nil, fmt.Errorf("function ListBuckets is not implemented for remoteTx") + return nil, errors.New("function ListBuckets is not implemented for remoteTx") } // func (c *remoteCursor) Put(k []byte, v []byte) error { panic("not supported") } diff --git a/erigon-lib/kv/remotedbserver/remotedbserver.go b/erigon-lib/kv/remotedbserver/remotedbserver.go index be73a5112c4..eed728034aa 100644 --- a/erigon-lib/kv/remotedbserver/remotedbserver.go +++ b/erigon-lib/kv/remotedbserver/remotedbserver.go @@ -535,7 +535,7 @@ func (s *KvServer) DomainGet(_ context.Context, req *remote.DomainGetReq) (reply if err := s.with(req.TxId, func(tx kv.Tx) error { ttx, ok := tx.(kv.TemporalTx) if !ok { - return fmt.Errorf("server DB doesn't implement kv.Temporal interface") + return errors.New("server DB doesn't implement kv.Temporal interface") } if req.Latest { reply.V, _, err = ttx.DomainGet(domainName, req.K, req.K2) @@ -559,7 +559,7 @@ func (s *KvServer) HistorySeek(_ context.Context, req *remote.HistorySeekReq) (r if err := s.with(req.TxId, func(tx kv.Tx) error { ttx, ok := tx.(kv.TemporalTx) if !ok { - return fmt.Errorf("server DB doesn't implement kv.Temporal interface") + return errors.New("server DB doesn't implement kv.Temporal interface") } reply.V, reply.Ok, err = ttx.HistorySeek(kv.History(req.Table), req.K, req.Ts) if err != nil { @@ -591,7 +591,7 @@ func (s *KvServer) IndexRange(_ context.Context, req *remote.IndexRangeReq) (*re if err := s.with(req.TxId, func(tx kv.Tx) error { ttx, ok := tx.(kv.TemporalTx) if !ok { - return fmt.Errorf("server DB doesn't implement kv.Temporal interface") + return errors.New("server DB doesn't implement kv.Temporal interface") } it, err := ttx.IndexRange(kv.InvertedIdx(req.Table), req.K, from, int(req.ToTs), order.By(req.OrderAscend), limit) if err != nil { diff --git a/erigon-lib/kv/stream/stream_test.go b/erigon-lib/kv/stream/stream_test.go index a812e7ae6ba..bc9550390f8 100644 --- a/erigon-lib/kv/stream/stream_test.go +++ b/erigon-lib/kv/stream/stream_test.go @@ -19,7 +19,7 @@ package stream_test import ( "bytes" "context" - "fmt" + "errors" "testing" "github.com/erigontech/erigon-lib/kv" @@ -244,7 +244,7 @@ func TestPaginated(t *testing.T) { }) t.Run("error", func(t *testing.T) { i := 0 - testErr := fmt.Errorf("test") + testErr := errors.New("test") s1 := stream.Paginate[uint64](func(pageToken string) (arr []uint64, nextPageToken string, err error) { i++ switch i { @@ -310,7 +310,7 @@ func TestPaginatedDual(t *testing.T) { }) t.Run("error", func(t *testing.T) { i := 0 - testErr := fmt.Errorf("test") + testErr := errors.New("test") s1 := stream.PaginateKV(func(pageToken string) (keys, values [][]byte, nextPageToken string, err error) { i++ switch i { diff --git a/erigon-lib/kv/temporal/historyv2/account_changeset_test.go b/erigon-lib/kv/temporal/historyv2/account_changeset_test.go index c37a800c865..e86634d9d57 100644 --- a/erigon-lib/kv/temporal/historyv2/account_changeset_test.go +++ b/erigon-lib/kv/temporal/historyv2/account_changeset_test.go @@ -19,6 +19,7 @@ package historyv2 import ( "bytes" "encoding/hex" + "errors" "fmt" "reflect" "testing" @@ -36,7 +37,7 @@ func TestEncodingAccount(t *testing.T) { ch := m.New() // empty StorageChangeSset first err := m.Encode(1, ch, func(k, v []byte) error { - return fmt.Errorf("must never call") + return errors.New("must never call") }) assert.NoError(t, err) diff --git a/erigon-lib/metrics/parsing.go b/erigon-lib/metrics/parsing.go index 91db2f03222..a838697da34 100644 --- a/erigon-lib/metrics/parsing.go +++ b/erigon-lib/metrics/parsing.go @@ -17,6 +17,7 @@ package metrics import ( + "errors" "fmt" "regexp" "strings" @@ -26,7 +27,7 @@ import ( func parseMetric(s string) (string, prometheus.Labels, error) { if len(s) == 0 { - return "", nil, fmt.Errorf("metric cannot be empty") + return "", nil, errors.New("metric cannot be empty") } ident, rest, ok := strings.Cut(s, "{") diff --git a/erigon-lib/mmap/total_memory_cgroups.go b/erigon-lib/mmap/total_memory_cgroups.go index dbca502d02f..05a4b0be873 100644 --- a/erigon-lib/mmap/total_memory_cgroups.go +++ b/erigon-lib/mmap/total_memory_cgroups.go @@ -88,7 +88,7 @@ func cgroupsV1MemoryLimit() (uint64, error) { if stat, err := cgroup.Stat(); err != nil { return 0, fmt.Errorf("failed to load memory cgroup1 stats: %w", err) } else if stat.Memory == nil || stat.Memory.Usage == nil { - return 0, fmt.Errorf("cgroup1 memory stats are nil; aborting") + return 0, errors.New("cgroup1 memory stats are nil; aborting") } else { return stat.Memory.Usage.Limit, nil } @@ -111,7 +111,7 @@ func cgroupsV2MemoryLimit() (uint64, error) { if stat, err := cgroup.Stat(); err != nil { return 0, fmt.Errorf("failed to load cgroup2 memory stats: %w", err) } else if stat.Memory == nil { - return 0, fmt.Errorf("cgroup2 memory stats are nil; aborting") + return 0, errors.New("cgroup2 memory stats are nil; aborting") } else { return stat.Memory.UsageLimit, nil } diff --git a/erigon-lib/recsplit/recsplit.go b/erigon-lib/recsplit/recsplit.go index 27c20ece508..239b57195ba 100644 --- a/erigon-lib/recsplit/recsplit.go +++ b/erigon-lib/recsplit/recsplit.go @@ -21,6 +21,7 @@ import ( "context" "crypto/rand" "encoding/binary" + "errors" "fmt" "io" "math" @@ -39,7 +40,7 @@ import ( "github.com/erigontech/erigon-lib/recsplit/eliasfano32" ) -var ErrCollision = fmt.Errorf("duplicate key") +var ErrCollision = errors.New("duplicate key") const RecSplitLogPrefix = "recsplit" @@ -349,7 +350,7 @@ func (rs *RecSplit) golombParam(m uint16) int { // the slice underlying key is not getting accessed by RecSplit after this invocation. func (rs *RecSplit) AddKey(key []byte, offset uint64) error { if rs.built { - return fmt.Errorf("cannot add keys after perfect hash function had been built") + return errors.New("cannot add keys after perfect hash function had been built") } rs.hasher.Reset() rs.hasher.Write(key) //nolint:errcheck @@ -582,7 +583,7 @@ func (rs *RecSplit) loadFuncOffset(k, _ []byte, _ etl.CurrentTableReader, _ etl. // of building the perfect hash function and writing index into a file func (rs *RecSplit) Build(ctx context.Context) error { if rs.built { - return fmt.Errorf("already built") + return errors.New("already built") } if rs.keysAdded != rs.keyExpectedCount { return fmt.Errorf("rs %s expected keys %d, got %d", rs.indexFileName, rs.keyExpectedCount, rs.keysAdded) diff --git a/erigon-lib/rlp/parse.go b/erigon-lib/rlp/parse.go index 5d53bc1b4f6..09545f4347e 100644 --- a/erigon-lib/rlp/parse.go +++ b/erigon-lib/rlp/parse.go @@ -26,7 +26,7 @@ import ( ) var ( - ErrBase = fmt.Errorf("rlp") + ErrBase = errors.New("rlp") ErrParse = fmt.Errorf("%w parse", ErrBase) ErrDecode = fmt.Errorf("%w decode", ErrBase) ) diff --git a/erigon-lib/rlp2/parse.go b/erigon-lib/rlp2/parse.go index b69e7de5a12..bda7935e15a 100644 --- a/erigon-lib/rlp2/parse.go +++ b/erigon-lib/rlp2/parse.go @@ -26,7 +26,7 @@ import ( ) var ( - ErrBase = fmt.Errorf("rlp") + ErrBase = errors.New("rlp") ErrParse = fmt.Errorf("%w parse", ErrBase) ErrDecode = fmt.Errorf("%w decode", ErrBase) ErrUnexpectedEOF = fmt.Errorf("%w EOF", ErrBase) diff --git a/erigon-lib/state/bps_tree.go b/erigon-lib/state/bps_tree.go index 0581ddd621a..bd9fd0b9836 100644 --- a/erigon-lib/state/bps_tree.go +++ b/erigon-lib/state/bps_tree.go @@ -20,9 +20,10 @@ import ( "bytes" "errors" "fmt" - "github.com/c2h5oh/datasize" "unsafe" + "github.com/c2h5oh/datasize" + "github.com/erigontech/erigon-lib/common" "github.com/erigontech/erigon-lib/log/v3" "github.com/erigontech/erigon-lib/recsplit/eliasfano32" @@ -77,7 +78,7 @@ func (it *BpsTreeIterator) Di() uint64 { func (it *BpsTreeIterator) KVFromGetter(g ArchiveGetter) ([]byte, []byte, error) { if it == nil { - return nil, nil, fmt.Errorf("iterator is nil") + return nil, nil, errors.New("iterator is nil") } //fmt.Printf("kv from %p getter %p tree %p offt %d\n", it, g, it.t, it.i) k, v, err := it.t.dataLookupFunc(it.i, g) diff --git a/erigon-lib/state/btree_index.go b/erigon-lib/state/btree_index.go index e0d53b6fa33..c42c5a89f53 100644 --- a/erigon-lib/state/btree_index.go +++ b/erigon-lib/state/btree_index.go @@ -614,7 +614,7 @@ func NewBtIndexWriter(args BtIndexWriterArgs, logger log.Logger) (*BtIndexWriter func (btw *BtIndexWriter) AddKey(key []byte, offset uint64) error { if btw.built { - return fmt.Errorf("cannot add keys after perfect hash function had been built") + return errors.New("cannot add keys after perfect hash function had been built") } binary.BigEndian.PutUint64(btw.numBuf[:], offset) @@ -646,7 +646,7 @@ func (btw *BtIndexWriter) loadFuncBucket(k, v []byte, _ etl.CurrentTableReader, // of building the perfect hash function and writing index into a file func (btw *BtIndexWriter) Build() error { if btw.built { - return fmt.Errorf("already built") + return errors.New("already built") } var err error if btw.indexF, err = os.Create(btw.tmpFilePath); err != nil { diff --git a/erigon-lib/state/domain_shared.go b/erigon-lib/state/domain_shared.go index ac766dd6fd1..68fd2601e31 100644 --- a/erigon-lib/state/domain_shared.go +++ b/erigon-lib/state/domain_shared.go @@ -48,7 +48,7 @@ import ( "github.com/erigontech/erigon-lib/types" ) -var ErrBehindCommitment = fmt.Errorf("behind commitment") +var ErrBehindCommitment = errors.New("behind commitment") // KvList sort.Interface to sort write list by keys type KvList struct { @@ -425,12 +425,12 @@ func (sd *SharedDomains) replaceShortenedKeysInBranch(prefix []byte, branch comm storageItem := sto.lookupFileByItsRange(fStartTxNum, fEndTxNum) if storageItem == nil { sd.logger.Crit(fmt.Sprintf("storage file of steps %d-%d not found\n", fStartTxNum/sd.aggTx.a.aggregationStep, fEndTxNum/sd.aggTx.a.aggregationStep)) - return nil, fmt.Errorf("storage file not found") + return nil, errors.New("storage file not found") } accountItem := acc.lookupFileByItsRange(fStartTxNum, fEndTxNum) if accountItem == nil { sd.logger.Crit(fmt.Sprintf("storage file of steps %d-%d not found\n", fStartTxNum/sd.aggTx.a.aggregationStep, fEndTxNum/sd.aggTx.a.aggregationStep)) - return nil, fmt.Errorf("account file not found") + return nil, errors.New("account file not found") } storageGetter := NewArchiveGetter(storageItem.decompressor.MakeGetter(), sto.d.compression) accountGetter := NewArchiveGetter(accountItem.decompressor.MakeGetter(), acc.d.compression) @@ -603,7 +603,7 @@ func (sd *SharedDomains) IndexAdd(table kv.InvertedIdx, key []byte) (err error) func (sd *SharedDomains) SetTx(tx kv.Tx) { if tx == nil { - panic(fmt.Errorf("tx is nil")) + panic("tx is nil") } sd.roTx = tx @@ -614,7 +614,7 @@ func (sd *SharedDomains) SetTx(tx kv.Tx) { sd.aggTx = casted.AggTx().(*AggregatorRoTx) if sd.aggTx == nil { - panic(fmt.Errorf("aggtx is nil")) + panic(errors.New("aggtx is nil")) } } @@ -975,7 +975,7 @@ func (sd *SharedDomains) DomainDel(domain kv.Domain, k1, k2 []byte, prevVal []by func (sd *SharedDomains) DomainDelPrefix(domain kv.Domain, prefix []byte) error { if domain != kv.StorageDomain { - return fmt.Errorf("DomainDelPrefix: not supported") + return errors.New("DomainDelPrefix: not supported") } type tuple struct { @@ -1340,7 +1340,7 @@ func (sdc *SharedDomainsCommitmentContext) restorePatriciaState(value []byte) (u fmt.Printf("[commitment] restored state: block=%d txn=%d rootHash=%x\n", cs.blockNum, cs.txNum, rootHash) } } else { - return 0, 0, fmt.Errorf("state storing is only supported hex patricia trie") + return 0, 0, errors.New("state storing is only supported hex patricia trie") } return cs.blockNum, cs.txNum, nil } diff --git a/erigon-lib/txpool/pool.go b/erigon-lib/txpool/pool.go index 7ac81600223..39b063885e5 100644 --- a/erigon-lib/txpool/pool.go +++ b/erigon-lib/txpool/pool.go @@ -468,12 +468,12 @@ func (p *TxPool) OnNewBlock(ctx context.Context, stateChanges *remote.StateChang if assert.Enable { for _, txn := range unwindTxs.Txs { if txn.SenderID == 0 { - panic(fmt.Errorf("onNewBlock.unwindTxs: senderID can't be zero")) + panic("onNewBlock.unwindTxs: senderID can't be zero") } } for _, txn := range minedTxs.Txs { if txn.SenderID == 0 { - panic(fmt.Errorf("onNewBlock.minedTxs: senderID can't be zero")) + panic("onNewBlock.minedTxs: senderID can't be zero") } } } @@ -515,7 +515,7 @@ func (p *TxPool) OnNewBlock(ctx context.Context, stateChanges *remote.StateChang func (p *TxPool) processRemoteTxs(ctx context.Context) error { if !p.Started() { - return fmt.Errorf("txpool not started yet") + return errors.New("txpool not started yet") } defer processBatchTxsTimer.ObserveDuration(time.Now()) @@ -1235,7 +1235,7 @@ func (p *TxPool) addTxs(blockNum uint64, cacheView kvcache.CacheView, senders *s if assert.Enable { for _, txn := range newTxs.Txs { if txn.SenderID == 0 { - panic(fmt.Errorf("senderID can't be zero")) + panic("senderID can't be zero") } } } @@ -1293,7 +1293,7 @@ func (p *TxPool) addTxsOnNewBlock(blockNum uint64, cacheView kvcache.CacheView, if assert.Enable { for _, txn := range newTxs.Txs { if txn.SenderID == 0 { - panic(fmt.Errorf("senderID can't be zero")) + panic("senderID can't be zero") } } } diff --git a/erigon-lib/txpool/txpool_grpc_server.go b/erigon-lib/txpool/txpool_grpc_server.go index 7d4bfebbe8b..85f90ef0d43 100644 --- a/erigon-lib/txpool/txpool_grpc_server.go +++ b/erigon-lib/txpool/txpool_grpc_server.go @@ -65,7 +65,7 @@ type txPool interface { var _ txpool_proto.TxpoolServer = (*GrpcServer)(nil) // compile-time interface check var _ txpool_proto.TxpoolServer = (*GrpcDisabled)(nil) // compile-time interface check -var ErrPoolDisabled = fmt.Errorf("TxPool Disabled") +var ErrPoolDisabled = errors.New("TxPool Disabled") type GrpcDisabled struct { txpool_proto.UnimplementedTxpoolServer diff --git a/erigon-lib/txpool/txpoolutil/all_components.go b/erigon-lib/txpool/txpoolutil/all_components.go index 85c6c486c3d..22bfddb43a8 100644 --- a/erigon-lib/txpool/txpoolutil/all_components.go +++ b/erigon-lib/txpool/txpoolutil/all_components.go @@ -18,7 +18,7 @@ package txpoolutil import ( "context" - "fmt" + "errors" "math/big" "time" @@ -52,7 +52,7 @@ func SaveChainConfigIfNeed(ctx context.Context, coreDB kv.RoDB, txPoolDB kv.RwDB } if cc != nil && !force { if cc.ChainID.Uint64() == 0 { - return nil, 0, fmt.Errorf("wrong chain config") + return nil, 0, errors.New("wrong chain config") } return cc, blockNum, nil } @@ -95,7 +95,7 @@ func SaveChainConfigIfNeed(ctx context.Context, coreDB kv.RoDB, txPoolDB kv.RwDB return nil, 0, err } if cc.ChainID.Uint64() == 0 { - return nil, 0, fmt.Errorf("wrong chain config") + return nil, 0, errors.New("wrong chain config") } return cc, blockNum, nil } diff --git a/eth/backend.go b/eth/backend.go index 7fb0a7f9d32..3be632bf80d 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -1024,7 +1024,7 @@ func (s *Ethereum) Init(stack *node.Node, config *ethconfig.Config, chainConfig badBlockHeader, hErr := rawdb.ReadHeaderByHash(tx, config.BadBlockHash) if badBlockHeader != nil { unwindPoint := badBlockHeader.Number.Uint64() - 1 - if err := s.stagedSync.UnwindTo(unwindPoint, stagedsync.BadBlock(config.BadBlockHash, fmt.Errorf("Init unwind")), tx); err != nil { + if err := s.stagedSync.UnwindTo(unwindPoint, stagedsync.BadBlock(config.BadBlockHash, errors.New("Init unwind")), tx); err != nil { return err } } @@ -1110,7 +1110,7 @@ func (s *Ethereum) Etherbase() (eb libcommon.Address, err error) { if etherbase != (libcommon.Address{}) { return etherbase, nil } - return libcommon.Address{}, fmt.Errorf("etherbase must be explicitly specified") + return libcommon.Address{}, errors.New("etherbase must be explicitly specified") } // isLocalBlock checks whether the specified block is mined diff --git a/eth/filters/api.go b/eth/filters/api.go index 3e2383f1ca4..97fadcbbda0 100644 --- a/eth/filters/api.go +++ b/eth/filters/api.go @@ -418,7 +418,7 @@ func (api *PublicFilterAPI) GetFilterLogs(ctx context.Context, id rpc.ID) ([]*ty api.filtersMu.Unlock() if !found || f.typ != LogsSubscription { - return nil, fmt.Errorf("filter not found") + return nil, errors.New("filter not found") } var filter *Filter @@ -477,7 +477,7 @@ func (api *PublicFilterAPI) GetFilterChanges(id rpc.ID) (interface{}, error) { } } - return []interface{}{}, fmt.Errorf("filter not found") + return []interface{}{}, errors.New("filter not found") } */ @@ -499,7 +499,7 @@ func (args *FilterCriteria) UnmarshalJSON(data []byte) error { if raw.BlockHash != nil { if raw.FromBlock != nil || raw.ToBlock != nil { // BlockHash is mutually exclusive with FromBlock/ToBlock criteria - return fmt.Errorf("cannot specify both BlockHash and FromBlock/ToBlock, choose one or the other") + return errors.New("cannot specify both BlockHash and FromBlock/ToBlock, choose one or the other") } args.BlockHash = raw.BlockHash } else { @@ -572,11 +572,11 @@ func (args *FilterCriteria) UnmarshalJSON(data []byte) error { } args.Topics[i] = append(args.Topics[i], parsed) } else { - return fmt.Errorf("invalid topic(s)") + return errors.New("invalid topic(s)") } } default: - return fmt.Errorf("invalid topic(s)") + return errors.New("invalid topic(s)") } } } diff --git a/eth/stagedsync/exec3.go b/eth/stagedsync/exec3.go index 7da7a4efc91..9a6a9ecc11c 100644 --- a/eth/stagedsync/exec3.go +++ b/eth/stagedsync/exec3.go @@ -1101,7 +1101,7 @@ func flushAndCheckCommitmentV3(ctx context.Context, header *types.Header, applyT } logger.Error(fmt.Sprintf("[%s] Wrong trie root of block %d: %x, expected (from header): %x. Block hash: %x", e.LogPrefix(), header.Number.Uint64(), rh, header.Root.Bytes(), header.Hash())) if cfg.badBlockHalt { - return false, fmt.Errorf("wrong trie root") + return false, errors.New("wrong trie root") } if cfg.hd != nil { cfg.hd.ReportBadHeaderPoS(header.Hash(), header.ParentHash) @@ -1532,11 +1532,11 @@ func reconstituteStep(last bool, return err } - plainStateCollector := etl.NewCollector(fmt.Sprintf("%s recon plainState", s.LogPrefix()), dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), logger) + plainStateCollector := etl.NewCollector(s.LogPrefix()+" recon plainState", dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), logger) defer plainStateCollector.Close() - codeCollector := etl.NewCollector(fmt.Sprintf("%s recon code", s.LogPrefix()), dirs.Tmp, etl.NewOldestEntryBuffer(etl.BufferOptimalSize), logger) + codeCollector := etl.NewCollector(s.LogPrefix()+" recon code", dirs.Tmp, etl.NewOldestEntryBuffer(etl.BufferOptimalSize), logger) defer codeCollector.Close() - plainContractCollector := etl.NewCollector(fmt.Sprintf("%s recon plainContract", s.LogPrefix()), dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), logger) + plainContractCollector := etl.NewCollector(s.LogPrefix()+" recon plainContract", dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), logger) defer plainContractCollector.Close() var transposedKey []byte @@ -1775,7 +1775,7 @@ func ReconstituteState(ctx context.Context, s *StageState, dirs datadir.Dirs, wo return fmt.Errorf("blockNum for mininmaxTxNum=%d not found. See lastBlockNum=%d,lastTxNum=%d", toTxNum, lastBn, lastTn) } if blockNum == 0 { - return fmt.Errorf("not enough transactions in the history data") + return errors.New("not enough transactions in the history data") } blockNum-- txNum, err = rawdbv3.TxNums.Max(tx, blockNum) @@ -1815,11 +1815,11 @@ func ReconstituteState(ctx context.Context, s *StageState, dirs datadir.Dirs, wo } } db.Close() - plainStateCollector := etl.NewCollector(fmt.Sprintf("%s recon plainState", s.LogPrefix()), dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), logger) + plainStateCollector := etl.NewCollector(s.LogPrefix()+" recon plainState", dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), logger) defer plainStateCollector.Close() - codeCollector := etl.NewCollector(fmt.Sprintf("%s recon code", s.LogPrefix()), dirs.Tmp, etl.NewOldestEntryBuffer(etl.BufferOptimalSize), logger) + codeCollector := etl.NewCollector(s.LogPrefix()+" recon code", dirs.Tmp, etl.NewOldestEntryBuffer(etl.BufferOptimalSize), logger) defer codeCollector.Close() - plainContractCollector := etl.NewCollector(fmt.Sprintf("%s recon plainContract", s.LogPrefix()), dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), logger) + plainContractCollector := etl.NewCollector(s.LogPrefix()+" recon plainContract", dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), logger) defer plainContractCollector.Close() fillWorker := exec3.NewFillWorker(txNum, aggSteps[len(aggSteps)-1]) diff --git a/eth/stagedsync/stage_bor_heimdall.go b/eth/stagedsync/stage_bor_heimdall.go index 5d742060cd6..baa7beaf6da 100644 --- a/eth/stagedsync/stage_bor_heimdall.go +++ b/eth/stagedsync/stage_bor_heimdall.go @@ -704,7 +704,7 @@ func initValidatorSets( } if zeroSpanBytes == nil { - return nil, fmt.Errorf("zero span not found") + return nil, errors.New("zero span not found") } var zeroSpan heimdall.Span diff --git a/eth/stagedsync/stage_execute.go b/eth/stagedsync/stage_execute.go index a6d69e51322..ead190bd79d 100644 --- a/eth/stagedsync/stage_execute.go +++ b/eth/stagedsync/stage_execute.go @@ -18,6 +18,7 @@ package stagedsync import ( "context" + "errors" "fmt" "time" @@ -151,7 +152,7 @@ func ExecBlockV3(s *StageState, u Unwinder, txc wrap.TxContainer, toBlock uint64 return nil } -var ErrTooDeepUnwind = fmt.Errorf("too deep unwind") +var ErrTooDeepUnwind = errors.New("too deep unwind") func unwindExec3(u *UnwindState, s *StageState, txc wrap.TxContainer, ctx context.Context, accumulator *shards.Accumulator, logger log.Logger) (err error) { var domains *libstate.SharedDomains diff --git a/eth/stagedsync/stage_mining_create_block.go b/eth/stagedsync/stage_mining_create_block.go index c312ddab0d9..d5e8d0bcb4c 100644 --- a/eth/stagedsync/stage_mining_create_block.go +++ b/eth/stagedsync/stage_mining_create_block.go @@ -134,7 +134,7 @@ func SpawnMiningCreateBlockStage(s *StageState, tx kv.RwTx, cfg MiningCreateBloc if cfg.miner.MiningConfig.Etherbase == (libcommon.Address{}) { if cfg.blockBuilderParameters == nil { - return fmt.Errorf("refusing to mine without etherbase") + return errors.New("refusing to mine without etherbase") } // If we do not have an etherbase, let's use the suggested one coinbase = cfg.blockBuilderParameters.SuggestedFeeRecipient diff --git a/eth/stagedsync/stage_trie3.go b/eth/stagedsync/stage_trie3.go index 50d24fbc4c1..6009e6773bc 100644 --- a/eth/stagedsync/stage_trie3.go +++ b/eth/stagedsync/stage_trie3.go @@ -20,6 +20,7 @@ import ( "bytes" "context" "encoding/hex" + "errors" "fmt" "sync/atomic" @@ -119,7 +120,7 @@ func collectAndComputeCommitment(ctx context.Context, tx kv.RwTx, tmpDir string, } logger.Info("Committing batch", "processed", fmt.Sprintf("%dM/%dM (%.2f%%)", processed.Load()/1_000_000, totalKeys.Load()/1_000_000, float64(processed.Load())/float64(totalKeys.Load())*100), - "intermediate root", fmt.Sprintf("%x", rh)) + "intermediate root", hex.EncodeToString(rh)) } processed.Add(1) sdCtx.TouchKey(kv.AccountsDomain, string(k), nil) @@ -235,7 +236,7 @@ func StageHashStateCfg(db kv.RwDB, dirs datadir.Dirs) HashStateCfg { } } -var ErrInvalidStateRootHash = fmt.Errorf("invalid state root hash") +var ErrInvalidStateRootHash = errors.New("invalid state root hash") func RebuildPatriciaTrieBasedOnFiles(rwTx kv.RwTx, cfg TrieCfg, ctx context.Context, logger log.Logger) (libcommon.Hash, error) { useExternalTx := rwTx != nil @@ -299,7 +300,7 @@ func RebuildPatriciaTrieBasedOnFiles(rwTx kv.RwTx, cfg TrieCfg, ctx context.Cont logger.Error(fmt.Sprintf("[RebuildCommitment] Wrong trie root of block %d: %x, expected (from header): %x. Block hash: %x", blockNum, rh, expectedRootHash, headerHash)) rwTx.Rollback() - return trie.EmptyRoot, fmt.Errorf("wrong trie root") + return trie.EmptyRoot, errors.New("wrong trie root") } logger.Info(fmt.Sprintf("[RebuildCommitment] Trie root of block %d txNum %d: %x. Could not verify with block hash because txnum of state is in the middle of the block.", blockNum, toTxNum, rh)) diff --git a/eth/tracers/js/goja.go b/eth/tracers/js/goja.go index 7d17ab97271..e3c756adf35 100644 --- a/eth/tracers/js/goja.go +++ b/eth/tracers/js/goja.go @@ -92,7 +92,7 @@ func fromBuf(vm *goja.Runtime, bufType goja.Value, buf goja.Value, allowString b b := obj.Get("buffer").Export().(goja.ArrayBuffer).Bytes() return b, nil } - return nil, fmt.Errorf("invalid buffer type") + return nil, errors.New("invalid buffer type") } // jsTracer is an implementation of the Tracer interface which evaluates diff --git a/eth/tracers/logger/logger.go b/eth/tracers/logger/logger.go index e97cd1a377d..a45476c3e99 100644 --- a/eth/tracers/logger/logger.go +++ b/eth/tracers/logger/logger.go @@ -280,14 +280,14 @@ func FormatLogs(logs []StructLog) []StructLogRes { if trace.Stack != nil { stack := make([]string, len(trace.Stack)) for i, stackValue := range trace.Stack { - stack[i] = fmt.Sprintf("%x", math.PaddedBigBytes(stackValue, 32)) + stack[i] = hex.EncodeToString(math.PaddedBigBytes(stackValue, 32)) } formatted[index].Stack = &stack } if trace.Memory != nil { memory := make([]string, 0, (len(trace.Memory)+31)/32) for i := 0; i+32 <= len(trace.Memory); i += 32 { - memory = append(memory, fmt.Sprintf("%x", trace.Memory[i:i+32])) + memory = append(memory, hex.EncodeToString(trace.Memory[i:i+32])) } formatted[index].Memory = &memory } diff --git a/ethdb/privateapi/ethbackend.go b/ethdb/privateapi/ethbackend.go index b9d40d91831..52d363f4718 100644 --- a/ethdb/privateapi/ethbackend.go +++ b/ethdb/privateapi/ethbackend.go @@ -19,7 +19,6 @@ package privateapi import ( "context" "errors" - "fmt" "google.golang.org/protobuf/types/known/emptypb" @@ -272,7 +271,7 @@ func (s *EthBackendServer) SubscribeLogs(server remote.ETHBACKEND_SubscribeLogsS if s.logsFilter != nil { return s.logsFilter.subscribeLogs(server) } - return fmt.Errorf("no logs filter available") + return errors.New("no logs filter available") } func (s *EthBackendServer) BorEvent(ctx context.Context, req *remote.BorEventRequest) (*remote.BorEventReply, error) { diff --git a/ethstats/ethstats.go b/ethstats/ethstats.go index a042435b40a..55da813f6e6 100644 --- a/ethstats/ethstats.go +++ b/ethstats/ethstats.go @@ -175,7 +175,7 @@ func (s *Service) Stop() error { // until termination. func (s *Service) loop() { // Resolve the URL, defaulting to TLS, but falling back to none too - path := fmt.Sprintf("%s/api", s.host) + path := s.host + "/api" urls := []string{path} // url.Parse and url.IsAbs is unsuitable (https://github.com/golang/go/issues/19779) @@ -399,7 +399,7 @@ func (s *Service) login(conn *connWrapper) error { Name: s.node, Node: nodeName, Port: 0, - Network: fmt.Sprintf("%d", s.networkid), + Network: strconv.FormatUint(s.networkid, 10), Protocol: strings.Join(protocols, ", "), API: "No", Os: runtime.GOOS, diff --git a/event/feed_test.go b/event/feed_test.go index 7eba94a0a4d..c5b1147d482 100644 --- a/event/feed_test.go +++ b/event/feed_test.go @@ -20,6 +20,7 @@ package event import ( + "errors" "fmt" "reflect" "sync" @@ -71,7 +72,7 @@ func checkPanic(want error, fn func()) (err error) { defer func() { panic := recover() if panic == nil { - err = fmt.Errorf("didn't panic") + err = errors.New("didn't panic") } else if !reflect.DeepEqual(panic, want) { err = fmt.Errorf("panicked with wrong error: got %q, want %q", panic, want) } diff --git a/migrations/migrations.go b/migrations/migrations.go index f9026692080..88043b63982 100644 --- a/migrations/migrations.go +++ b/migrations/migrations.go @@ -20,6 +20,7 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" "path/filepath" @@ -65,9 +66,9 @@ type Migration struct { } var ( - ErrMigrationNonUniqueName = fmt.Errorf("please provide unique migration name") - ErrMigrationCommitNotCalled = fmt.Errorf("migration before-commit function was not called") - ErrMigrationETLFilesDeleted = fmt.Errorf( + ErrMigrationNonUniqueName = errors.New("please provide unique migration name") + ErrMigrationCommitNotCalled = errors.New("migration before-commit function was not called") + ErrMigrationETLFilesDeleted = errors.New( "db migration progress was interrupted after extraction step and ETL files was deleted, please contact development team for help or re-sync from scratch", ) ) diff --git a/node/node.go b/node/node.go index 3e3d8393f6f..a6c03378760 100644 --- a/node/node.go +++ b/node/node.go @@ -307,7 +307,7 @@ func OpenDatabase(ctx context.Context, config *nodecfg.Config, label kv.Label, n name = "polygon-bridge" case kv.ConsensusDB: if len(name) == 0 { - return nil, fmt.Errorf("expected a consensus name") + return nil, errors.New("expected a consensus name") } default: name = "test" diff --git a/node/rpcstack.go b/node/rpcstack.go index c6ed3e6814f..42954f893c6 100644 --- a/node/rpcstack.go +++ b/node/rpcstack.go @@ -22,6 +22,7 @@ package node import ( "compress/gzip" "context" + "errors" "fmt" "io" "net" @@ -265,7 +266,7 @@ func (h *httpServer) enableRPC(apis []rpc.API, config httpConfig, allowList rpc. defer h.mu.Unlock() if h.rpcAllowed() { - return fmt.Errorf("JSON-RPC over HTTP is already enabled") + return errors.New("JSON-RPC over HTTP is already enabled") } // Create RPC server and handler. @@ -298,7 +299,7 @@ func (h *httpServer) enableWS(apis []rpc.API, config wsConfig, allowList rpc.All defer h.mu.Unlock() if h.wsAllowed() { - return fmt.Errorf("JSON-RPC over WebSocket is already enabled") + return errors.New("JSON-RPC over WebSocket is already enabled") } // Create RPC server and handler. diff --git a/p2p/discover/v4_udp.go b/p2p/discover/v4_udp.go index 1bcba52c113..544ec0ef936 100644 --- a/p2p/discover/v4_udp.go +++ b/p2p/discover/v4_udp.go @@ -448,7 +448,7 @@ func (t *UDPv4) RequestENR(n *enode.Node) (*enode.Node, error) { return nil, err } if respN.ID() != n.ID() { - return nil, fmt.Errorf("invalid ID in response record") + return nil, errors.New("invalid ID in response record") } if respN.Seq() < n.Seq() { return n, nil // response record is older diff --git a/p2p/discover/v5_udp.go b/p2p/discover/v5_udp.go index b457f2e4707..0ed36b0a3e7 100644 --- a/p2p/discover/v5_udp.go +++ b/p2p/discover/v5_udp.go @@ -441,7 +441,7 @@ func (t *UDPv5) verifyResponseNode(c *callV5, r *enr.Record, distances []uint, s } } if _, ok := seen[node.ID()]; ok { - return nil, fmt.Errorf("duplicate record") + return nil, errors.New("duplicate record") } seen[node.ID()] = struct{}{} return node, nil @@ -693,11 +693,11 @@ func (t *UDPv5) handleCallResponse(fromID enode.ID, fromAddr *net.UDPAddr, p v5w return false } if !fromAddr.IP.Equal(ac.node.IP()) || fromAddr.Port != ac.node.UDP() { - t.log.Trace(fmt.Sprintf("%s from wrong endpoint", p.Name()), "id", fromID, "addr", fromAddr) + t.log.Trace(p.Name()+" from wrong endpoint", "id", fromID, "addr", fromAddr) return false } if p.Kind() != ac.responseType { - t.log.Trace(fmt.Sprintf("Wrong discv5 response type %s", p.Name()), "id", fromID, "addr", fromAddr) + t.log.Trace("Wrong discv5 response type "+p.Name(), "id", fromID, "addr", fromAddr) return false } t.startResponseTimeout(ac) diff --git a/p2p/discover/v5wire/encoding.go b/p2p/discover/v5wire/encoding.go index 4b4ed628a97..d813ca2a31e 100644 --- a/p2p/discover/v5wire/encoding.go +++ b/p2p/discover/v5wire/encoding.go @@ -355,11 +355,11 @@ func (c *Codec) makeHandshakeAuth(toID enode.ID, addr string, challenge *Whoarey // key is part of the ID nonce signature. var remotePubkey = new(ecdsa.PublicKey) if err := challenge.Node.Load((*enode.Secp256k1)(remotePubkey)); err != nil { - return nil, nil, fmt.Errorf("can't find secp256k1 key for recipient") + return nil, nil, errors.New("can't find secp256k1 key for recipient") } ephkey, err := c.sc.ephemeralKeyGen() if err != nil { - return nil, nil, fmt.Errorf("can't generate ephemeral key") + return nil, nil, errors.New("can't generate ephemeral key") } ephpubkey := EncodePubkey(&ephkey.PublicKey) auth.pubkey = ephpubkey @@ -383,7 +383,7 @@ func (c *Codec) makeHandshakeAuth(toID enode.ID, addr string, challenge *Whoarey // Create session keys. sec := deriveKeys(sha256.New, ephkey, remotePubkey, c.localnode.ID(), challenge.Node.ID(), cdata) if sec == nil { - return nil, nil, fmt.Errorf("key derivation failed") + return nil, nil, errors.New("key derivation failed") } return auth, sec, err } diff --git a/p2p/dnsdisc/client.go b/p2p/dnsdisc/client.go index 850f5004532..aa1452f9e6a 100644 --- a/p2p/dnsdisc/client.go +++ b/p2p/dnsdisc/client.go @@ -200,7 +200,7 @@ func (c *Client) resolveEntry(ctx context.Context, domain, hash string) (entry, func (c *Client) doResolveEntry(ctx context.Context, domain, hash string) (entry, error) { wantHash, err := b32format.DecodeString(hash) if err != nil { - return nil, fmt.Errorf("invalid base32 hash") + return nil, errors.New("invalid base32 hash") } name := hash + "." + domain txts, err := c.cfg.Resolver.LookupTXT(ctx, hash+"."+domain) diff --git a/p2p/dnsdisc/tree.go b/p2p/dnsdisc/tree.go index 64105a172e8..c948d25da5b 100644 --- a/p2p/dnsdisc/tree.go +++ b/p2p/dnsdisc/tree.go @@ -24,6 +24,7 @@ import ( "crypto/ecdsa" "encoding/base32" "encoding/base64" + "errors" "fmt" "io" "slices" @@ -311,7 +312,7 @@ func parseLinkEntry(e string) (entry, error) { func parseLink(e string) (*linkEntry, error) { if !strings.HasPrefix(e, linkPrefix) { - return nil, fmt.Errorf("wrong/missing scheme 'enrtree' in URL") + return nil, errors.New("wrong/missing scheme 'enrtree' in URL") } e = e[len(linkPrefix):] keystring, domain, ok := strings.Cut(e, "@") diff --git a/p2p/enode/idscheme.go b/p2p/enode/idscheme.go index f0a3b8f6365..8f69df871d0 100644 --- a/p2p/enode/idscheme.go +++ b/p2p/enode/idscheme.go @@ -21,7 +21,7 @@ package enode import ( "crypto/ecdsa" - "fmt" + "errors" "io" "github.com/erigontech/erigon/crypto" @@ -69,7 +69,7 @@ func (V4ID) Verify(r *enr.Record, sig []byte) error { if err := r.Load(&entry); err != nil { return err } else if len(entry) != 33 { - return fmt.Errorf("invalid public key") + return errors.New("invalid public key") } h := sha3.NewLegacyKeccak256() diff --git a/p2p/nat/natpmp.go b/p2p/nat/natpmp.go index 4d29a5ddc93..b6239268fc4 100644 --- a/p2p/nat/natpmp.go +++ b/p2p/nat/natpmp.go @@ -20,6 +20,7 @@ package nat import ( + "errors" "fmt" "net" "strings" @@ -51,7 +52,7 @@ func (n *pmp) ExternalIP() (net.IP, error) { func (n *pmp) AddMapping(protocol string, extport, intport int, name string, lifetime time.Duration) error { if lifetime <= 0 { - return fmt.Errorf("lifetime must not be <= 0") + return errors.New("lifetime must not be <= 0") } // Note order of port arguments is switched between our // AddMapping and the client's AddPortMapping. diff --git a/p2p/node_key_config.go b/p2p/node_key_config.go index acbd401c7fa..9d6190973de 100644 --- a/p2p/node_key_config.go +++ b/p2p/node_key_config.go @@ -18,6 +18,7 @@ package p2p import ( "crypto/ecdsa" + "errors" "fmt" "os" "path" @@ -87,7 +88,7 @@ func (config NodeKeyConfig) LoadOrGenerateAndSave(keyfile string) (*ecdsa.Privat func (config NodeKeyConfig) LoadOrParseOrGenerateAndSave(file, hex, datadir string) (*ecdsa.PrivateKey, error) { switch { case file != "" && hex != "": - return nil, fmt.Errorf("P2P node key is set as both file and hex string - these options are mutually exclusive") + return nil, errors.New("P2P node key is set as both file and hex string - these options are mutually exclusive") case file != "": return config.load(file) case hex != "": diff --git a/p2p/sentry/simulator/sentry_simulator.go b/p2p/sentry/simulator/sentry_simulator.go index ca6947050e0..7baea19df6e 100644 --- a/p2p/sentry/simulator/sentry_simulator.go +++ b/p2p/sentry/simulator/sentry_simulator.go @@ -19,6 +19,7 @@ package simulator import ( "bytes" "context" + "errors" "fmt" "path/filepath" @@ -142,7 +143,7 @@ func (s *server) Close() { } func (s *server) NodeInfo(context.Context, *emptypb.Empty) (*types.NodeInfoReply, error) { - return nil, fmt.Errorf("TODO") + return nil, errors.New("TODO") } func (s *server) PeerById(ctx context.Context, in *isentry.PeerByIdRequest) (*isentry.PeerByIdReply, error) { @@ -151,7 +152,7 @@ func (s *server) PeerById(ctx context.Context, in *isentry.PeerByIdRequest) (*is peer, ok := s.peers[peerId] if !ok { - return nil, fmt.Errorf("unknown peer") + return nil, errors.New("unknown peer") } info := peer.Info() @@ -177,11 +178,11 @@ func (s *server) PeerCount(context.Context, *isentry.PeerCountRequest) (*isentry } func (s *server) PeerEvents(*isentry.PeerEventsRequest, isentry.Sentry_PeerEventsServer) error { - return fmt.Errorf("TODO") + return errors.New("TODO") } func (s *server) PeerMinBlock(context.Context, *isentry.PeerMinBlockRequest) (*emptypb.Empty, error) { - return nil, fmt.Errorf("TODO") + return nil, errors.New("TODO") } func (s *server) Peers(context.Context, *emptypb.Empty) (*isentry.PeersReply, error) { @@ -224,7 +225,7 @@ func (s *server) sendMessageById(ctx context.Context, peerId [64]byte, messageDa peer, ok := s.peers[peerId] if !ok { - return fmt.Errorf("unknown peer") + return errors.New("unknown peer") } switch messageData.Id { diff --git a/p2p/sentry/status_data_provider.go b/p2p/sentry/status_data_provider.go index f8db9e9cab7..0dfb5f03bbc 100644 --- a/p2p/sentry/status_data_provider.go +++ b/p2p/sentry/status_data_provider.go @@ -84,7 +84,7 @@ func uint256FromBigInt(num *big.Int) (*uint256.Int, error) { num256 := new(uint256.Int) overflow := num256.SetFromBig(num) if overflow { - return nil, fmt.Errorf("uint256FromBigInt: big.Int greater than 2^256-1") + return nil, errors.New("uint256FromBigInt: big.Int greater than 2^256-1") } return num256, nil } diff --git a/p2p/server.go b/p2p/server.go index 328e8613f1c..893743053ee 100644 --- a/p2p/server.go +++ b/p2p/server.go @@ -26,7 +26,6 @@ import ( "crypto/ecdsa" "encoding/hex" "errors" - "fmt" "net" "sort" "strconv" @@ -653,7 +652,7 @@ func (srv *Server) setupDiscovery(ctx context.Context) error { Unhandled: unhandled, Log: srv.logger, } - ntab, err := discover.ListenV4(ctx, fmt.Sprint(srv.Config.Protocols[0].Version), conn, srv.localnode, cfg) + ntab, err := discover.ListenV4(ctx, strconv.FormatUint(uint64(srv.Config.Protocols[0].Version), 10), conn, srv.localnode, cfg) if err != nil { return err } @@ -669,11 +668,12 @@ func (srv *Server) setupDiscovery(ctx context.Context) error { Bootnodes: srv.BootstrapNodesV5, Log: srv.logger, } + version := uint64(srv.Config.Protocols[0].Version) var err error if sconn != nil { - srv.DiscV5, err = discover.ListenV5(ctx, fmt.Sprint(srv.Config.Protocols[0].Version), sconn, srv.localnode, cfg) + srv.DiscV5, err = discover.ListenV5(ctx, strconv.FormatUint(version, 10), sconn, srv.localnode, cfg) } else { - srv.DiscV5, err = discover.ListenV5(ctx, fmt.Sprint(srv.Config.Protocols[0].Version), conn, srv.localnode, cfg) + srv.DiscV5, err = discover.ListenV5(ctx, strconv.FormatUint(version, 10), conn, srv.localnode, cfg) } if err != nil { return err @@ -987,13 +987,13 @@ func (srv *Server) checkInboundConn(fd net.Conn, remoteIP net.IP) error { } // Reject connections that do not match NetRestrict. if srv.NetRestrict != nil && !srv.NetRestrict.Contains(remoteIP) { - return fmt.Errorf("not whitelisted in NetRestrict") + return errors.New("not whitelisted in NetRestrict") } // Reject Internet peers that try too often. now := srv.clock.Now() srv.inboundHistory.expire(now, nil) if !netutil.IsLAN(remoteIP) && srv.inboundHistory.contains(remoteIP.String()) { - return fmt.Errorf("too many attempts") + return errors.New("too many attempts") } srv.inboundHistory.add(remoteIP.String(), now.Add(inboundThrottleTime)) return nil diff --git a/p2p/simulations/http.go b/p2p/simulations/http.go index 4c990b85023..07861bcaf3c 100644 --- a/p2p/simulations/http.go +++ b/p2p/simulations/http.go @@ -189,7 +189,7 @@ func (c *Client) CreateNode(config *adapters.NodeConfig) (*p2p.NodeInfo, error) // GetNode returns details of a node func (c *Client) GetNode(nodeID string) (*p2p.NodeInfo, error) { node := &p2p.NodeInfo{} - return node, c.Get(fmt.Sprintf("/nodes/%s", nodeID), node) + return node, c.Get("/nodes/"+nodeID, node) } // StartNode starts a node diff --git a/p2p/transport.go b/p2p/transport.go index 2135a5b51de..51dab6dceb9 100644 --- a/p2p/transport.go +++ b/p2p/transport.go @@ -22,6 +22,7 @@ package p2p import ( "bytes" "crypto/ecdsa" + "errors" "fmt" "io" "net" @@ -168,7 +169,7 @@ func readProtocolHandshake(rw MsgReader) (*protoHandshake, error) { return nil, err } if msg.Size > baseProtocolMaxMsgSize { - return nil, fmt.Errorf("message too big") + return nil, errors.New("message too big") } if msg.Code == discMsg { // Disconnect before protocol handshake is valid according to the diff --git a/polygon/bor/bor.go b/polygon/bor/bor.go index 3c8c38742d3..bddda08811e 100644 --- a/polygon/bor/bor.go +++ b/polygon/bor/bor.go @@ -400,19 +400,19 @@ type rwWrapper struct { } func (w rwWrapper) Update(ctx context.Context, f func(tx kv.RwTx) error) error { - return fmt.Errorf("Update not implemented") + return errors.New("Update not implemented") } func (w rwWrapper) UpdateNosync(ctx context.Context, f func(tx kv.RwTx) error) error { - return fmt.Errorf("UpdateNosync not implemented") + return errors.New("UpdateNosync not implemented") } func (w rwWrapper) BeginRw(ctx context.Context) (kv.RwTx, error) { - return nil, fmt.Errorf("BeginRw not implemented") + return nil, errors.New("BeginRw not implemented") } func (w rwWrapper) BeginRwNosync(ctx context.Context) (kv.RwTx, error) { - return nil, fmt.Errorf("BeginRwNosync not implemented") + return nil, errors.New("BeginRwNosync not implemented") } // This is used by the rpcdaemon and tests which need read only access to the provided data services diff --git a/polygon/bor/bor_test.go b/polygon/bor/bor_test.go index cd3956f2027..67de0e27703 100644 --- a/polygon/bor/bor_test.go +++ b/polygon/bor/bor_test.go @@ -19,6 +19,7 @@ package bor_test import ( "context" "encoding/json" + "errors" "fmt" "math/big" "testing" @@ -93,7 +94,7 @@ func (h *test_heimdall) FetchSpan(ctx context.Context, spanID uint64) (*heimdall nextSpan.StartBlock = 1 //256 } else { if spanID != uint64(h.currentSpan.Id+1) { - return nil, fmt.Errorf("Can't initialize span: non consecutive span") + return nil, errors.New("Can't initialize span: non consecutive span") } nextSpan.StartBlock = h.currentSpan.EndBlock + 1 @@ -125,42 +126,42 @@ func (h test_heimdall) currentSprintLength() int { } func (h test_heimdall) FetchCheckpoint(ctx context.Context, number int64) (*heimdall.Checkpoint, error) { - return nil, fmt.Errorf("TODO") + return nil, errors.New("TODO") } func (h test_heimdall) FetchCheckpointCount(ctx context.Context) (int64, error) { - return 0, fmt.Errorf("TODO") + return 0, errors.New("TODO") } func (h *test_heimdall) FetchCheckpoints(ctx context.Context, page uint64, limit uint64) ([]*heimdall.Checkpoint, error) { - return nil, fmt.Errorf("TODO") + return nil, errors.New("TODO") } func (h test_heimdall) FetchMilestone(ctx context.Context, number int64) (*heimdall.Milestone, error) { - return nil, fmt.Errorf("TODO") + return nil, errors.New("TODO") } func (h test_heimdall) FetchMilestoneCount(ctx context.Context) (int64, error) { - return 0, fmt.Errorf("TODO") + return 0, errors.New("TODO") } func (h test_heimdall) FetchFirstMilestoneNum(ctx context.Context) (int64, error) { - return 0, fmt.Errorf("TODO") + return 0, errors.New("TODO") } func (h test_heimdall) FetchNoAckMilestone(ctx context.Context, milestoneID string) error { - return fmt.Errorf("TODO") + return errors.New("TODO") } func (h test_heimdall) FetchLastNoAckMilestone(ctx context.Context) (string, error) { - return "", fmt.Errorf("TODO") + return "", errors.New("TODO") } func (h test_heimdall) FetchMilestoneID(ctx context.Context, milestoneID string) error { - return fmt.Errorf("TODO") + return errors.New("TODO") } func (h test_heimdall) FetchLatestSpan(ctx context.Context) (*heimdall.Span, error) { - return nil, fmt.Errorf("TODO") + return nil, errors.New("TODO") } func (h test_heimdall) Close() {} diff --git a/polygon/bor/finality/rawdb/milestone.go b/polygon/bor/finality/rawdb/milestone.go index 10e6a4703fc..4c839ff0883 100644 --- a/polygon/bor/finality/rawdb/milestone.go +++ b/polygon/bor/finality/rawdb/milestone.go @@ -195,7 +195,7 @@ func ReadLockField(db kv.RwDB) (bool, uint64, libcommon.Hash, map[string]struct{ } if err = json.Unmarshal(data, &lockField); err != nil { - log.Error(fmt.Sprintf("Unable to unmarshal the lock field in database"), "err", err) + log.Error("Unable to unmarshal the lock field in database", "err", err) return false, 0, libcommon.Hash{}, nil, fmt.Errorf("%w(%v) for lock field , data %v(%q)", ErrIncorrectLockField, err, data, string(data)) @@ -254,7 +254,7 @@ func ReadFutureMilestoneList(db kv.RwDB) ([]uint64, map[uint64]libcommon.Hash, e } if err = json.Unmarshal(data, &futureMilestoneField); err != nil { - log.Error(fmt.Sprintf("Unable to unmarshal the future milestone field in database"), "err", err) + log.Error("Unable to unmarshal the future milestone field in database", "err", err) return nil, nil, fmt.Errorf("%w(%v) for future milestone field, data %v(%q)", ErrIncorrectFutureMilestoneField, err, data, string(data)) diff --git a/polygon/bor/finality/whitelist.go b/polygon/bor/finality/whitelist.go index 7dffe9d363d..cbe72189524 100644 --- a/polygon/bor/finality/whitelist.go +++ b/polygon/bor/finality/whitelist.go @@ -162,9 +162,9 @@ func retryHeimdallHandler(fn heimdallHandler, config *config, tickerDuration tim if err != nil { if errors.Is(err, errMissingBlocks) { - config.logger.Debug(fmt.Sprintf("[bor] unable to handle %s", fnName), "err", err) + config.logger.Debug("[bor] unable to handle "+fnName, "err", err) } else { - config.logger.Warn(fmt.Sprintf("[bor] unable to handle %s", fnName), "err", err) + config.logger.Warn("[bor] unable to handle "+fnName, "err", err) } } case <-config.closeCh: diff --git a/polygon/bor/valset/validator_set.go b/polygon/bor/valset/validator_set.go index b36c527a56a..82990b715c9 100644 --- a/polygon/bor/valset/validator_set.go +++ b/polygon/bor/valset/validator_set.go @@ -20,6 +20,7 @@ package valset import ( "bytes" + "errors" "fmt" "math" "math/big" @@ -618,7 +619,7 @@ func (vals *ValidatorSet) updateWithChangeSet(changes []*Validator, allowDeletes // Check that the resulting set will not be empty. if numNewValidators == 0 && len(vals.Validators) == len(deletes) { - return fmt.Errorf("applying the validator changes would result in empty set") + return errors.New("applying the validator changes would result in empty set") } // Compute the priorities for updates. diff --git a/polygon/bridge/log_prefix.go b/polygon/bridge/log_prefix.go index 90e003d0926..e40d4746d13 100644 --- a/polygon/bridge/log_prefix.go +++ b/polygon/bridge/log_prefix.go @@ -16,8 +16,6 @@ package bridge -import "fmt" - func bridgeLogPrefix(message string) string { - return fmt.Sprintf("[bridge] %s", message) + return "[bridge] " + message } diff --git a/polygon/heimdall/checkpoint.go b/polygon/heimdall/checkpoint.go index 9be8e0b4fc7..2bffb8645d7 100644 --- a/polygon/heimdall/checkpoint.go +++ b/polygon/heimdall/checkpoint.go @@ -19,6 +19,7 @@ package heimdall import ( "encoding/binary" "encoding/json" + "errors" "fmt" "math/big" @@ -159,7 +160,7 @@ type CheckpointListResponse struct { Result Checkpoints `json:"result"` } -var ErrCheckpointNotFound = fmt.Errorf("checkpoint not found") +var ErrCheckpointNotFound = errors.New("checkpoint not found") func CheckpointIdAt(tx kv.Tx, block uint64) (CheckpointId, error) { var id uint64 diff --git a/polygon/heimdall/client.go b/polygon/heimdall/client.go index 205a28f8622..7f84698a415 100644 --- a/polygon/heimdall/client.go +++ b/polygon/heimdall/client.go @@ -26,6 +26,7 @@ import ( "net/url" "path" "sort" + "strconv" "strings" "time" @@ -530,7 +531,7 @@ func stateSyncListURL(urlString string, fromID uint64, to int64) (*url.URL, erro } func stateSyncURL(urlString string, id uint64) (*url.URL, error) { - return makeURL(urlString, fmt.Sprintf(fetchStateSyncEvent, fmt.Sprint(id)), "") + return makeURL(urlString, fmt.Sprintf(fetchStateSyncEvent, strconv.FormatUint(id, 10)), "") } func checkpointURL(urlString string, number int64) (*url.URL, error) { @@ -538,7 +539,7 @@ func checkpointURL(urlString string, number int64) (*url.URL, error) { if number == -1 { url = fmt.Sprintf(fetchCheckpoint, "latest") } else { - url = fmt.Sprintf(fetchCheckpoint, fmt.Sprint(number)) + url = fmt.Sprintf(fetchCheckpoint, strconv.FormatInt(number, 10)) } return makeURL(urlString, url, "") diff --git a/polygon/heimdall/entity_fetcher.go b/polygon/heimdall/entity_fetcher.go index e8c0061ed5d..954e5577450 100644 --- a/polygon/heimdall/entity_fetcher.go +++ b/polygon/heimdall/entity_fetcher.go @@ -19,7 +19,6 @@ package heimdall import ( "cmp" "context" - "fmt" "slices" "time" @@ -138,7 +137,7 @@ func (f *entityFetcherImpl[TEntity]) FetchAllEntities(ctx context.Context) ([]TE select { case <-progressLogTicker.C: f.logger.Debug( - heimdallLogPrefix(fmt.Sprintf("%s progress", f.name)), + heimdallLogPrefix(f.name+" progress"), "page", page, "len", len(entities), ) @@ -158,7 +157,7 @@ func (f *entityFetcherImpl[TEntity]) FetchAllEntities(ctx context.Context) ([]TE } f.logger.Debug( - heimdallLogPrefix(fmt.Sprintf("%s done", f.name)), + heimdallLogPrefix(f.name+" done"), "len", len(entities), "duration", time.Since(fetchStartTime), ) diff --git a/polygon/heimdall/event_record.go b/polygon/heimdall/event_record.go index 44084fe85e1..7e4a480b48a 100644 --- a/polygon/heimdall/event_record.go +++ b/polygon/heimdall/event_record.go @@ -44,7 +44,7 @@ type EventRecordWithTime struct { Time time.Time `json:"record_time" yaml:"record_time"` } -var ErrEventRecordNotFound = fmt.Errorf("event record not found") +var ErrEventRecordNotFound = errors.New("event record not found") // String returns the string representation of a state record func (e *EventRecordWithTime) String() string { diff --git a/polygon/heimdall/log_prefix.go b/polygon/heimdall/log_prefix.go index 01b98f1a73d..260d2776af3 100644 --- a/polygon/heimdall/log_prefix.go +++ b/polygon/heimdall/log_prefix.go @@ -16,8 +16,6 @@ package heimdall -import "fmt" - func heimdallLogPrefix(message string) string { - return fmt.Sprintf("[bor.heimdall] %s", message) + return "[bor.heimdall] " + message } diff --git a/polygon/heimdall/milestone.go b/polygon/heimdall/milestone.go index 32c12ca41fb..b0381ff9dfd 100644 --- a/polygon/heimdall/milestone.go +++ b/polygon/heimdall/milestone.go @@ -19,6 +19,7 @@ package heimdall import ( "encoding/binary" "encoding/json" + "errors" "fmt" "math/big" @@ -169,7 +170,7 @@ type MilestoneIDResponse struct { Result MilestoneID `json:"result"` } -var ErrMilestoneNotFound = fmt.Errorf("milestone not found") +var ErrMilestoneNotFound = errors.New("milestone not found") func MilestoneIdAt(tx kv.Tx, block uint64) (MilestoneId, error) { var id uint64 diff --git a/polygon/p2p/message_listener.go b/polygon/p2p/message_listener.go index 0d928dba223..8440771041a 100644 --- a/polygon/p2p/message_listener.go +++ b/polygon/p2p/message_listener.go @@ -241,5 +241,5 @@ func notifyInboundMessageObservers[TPacket any]( } func messageListenerLogPrefix(message string) string { - return fmt.Sprintf("[p2p.message.listener] %s", message) + return "[p2p.message.listener] " + message } diff --git a/polygon/sync/block_downloader.go b/polygon/sync/block_downloader.go index 456e8dbc00d..34d6479a383 100644 --- a/polygon/sync/block_downloader.go +++ b/polygon/sync/block_downloader.go @@ -202,7 +202,7 @@ func (d *blockDownloader) downloadBlocksUsingWaypoints( "peerCount", len(peers), "maxWorkers", d.maxWorkers, "blk/s", fmt.Sprintf("%.2f", float64(blockCount.Load())/time.Since(fetchStartTime).Seconds()), - "bytes/s", fmt.Sprintf("%s", common.ByteCount(uint64(float64(blocksTotalSize.Load())/time.Since(fetchStartTime).Seconds()))), + "bytes/s", common.ByteCount(uint64(float64(blocksTotalSize.Load())/time.Since(fetchStartTime).Seconds())), ) blockCount.Store(0) diff --git a/polygon/sync/log_prefix.go b/polygon/sync/log_prefix.go index 5cc8fb8e651..81b2e1d939a 100644 --- a/polygon/sync/log_prefix.go +++ b/polygon/sync/log_prefix.go @@ -16,8 +16,6 @@ package sync -import "fmt" - func syncLogPrefix(message string) string { - return fmt.Sprintf("[sync] %s", message) + return "[sync] " + message } diff --git a/rlp/encode.go b/rlp/encode.go index 3a817eefd2c..75c14a9913b 100644 --- a/rlp/encode.go +++ b/rlp/encode.go @@ -233,7 +233,7 @@ const wordBytes = (32 << (uint64(^big.Word(0)) >> 63)) / 8 func writeBigInt(i *big.Int, w *encBuffer) error { if i.Sign() == -1 { - return fmt.Errorf("rlp: cannot encode negative *big.Int") + return errors.New("rlp: cannot encode negative *big.Int") } bitlen := i.BitLen() if bitlen <= 64 { diff --git a/rpc/handler_test.go b/rpc/handler_test.go index 9dc15c2997f..ea8ea06d1cf 100644 --- a/rpc/handler_test.go +++ b/rpc/handler_test.go @@ -19,7 +19,7 @@ package rpc import ( "bytes" "context" - "fmt" + "errors" "reflect" "testing" @@ -65,10 +65,10 @@ func TestHandlerDoesNotDoubleWriteNull(t *testing.T) { dummyFunc := func(id int, stream *jsoniter.Stream) error { if id == 1 { stream.WriteNil() - return fmt.Errorf("id 1") + return errors.New("id 1") } if id == 2 { - return fmt.Errorf("id 2") + return errors.New("id 2") } if id == 3 { stream.WriteEmptyObject() @@ -79,7 +79,7 @@ func TestHandlerDoesNotDoubleWriteNull(t *testing.T) { stream.WriteObjectField("structLogs") stream.WriteEmptyArray() stream.WriteObjectEnd() - return fmt.Errorf("id 4") + return errors.New("id 4") } return nil } diff --git a/rpc/types.go b/rpc/types.go index f9ff7d0fa4a..fec2d2d7123 100644 --- a/rpc/types.go +++ b/rpc/types.go @@ -22,6 +22,7 @@ package rpc import ( "context" "encoding/json" + "errors" "fmt" "math" "math/big" @@ -139,7 +140,7 @@ func (bn *BlockNumber) UnmarshalJSON(data []byte) error { } } if blckNum > math.MaxInt64 { - return fmt.Errorf("block number larger than int64") + return errors.New("block number larger than int64") } *bn = BlockNumber(blckNum) return nil @@ -236,10 +237,10 @@ func (bnh *BlockNumberOrHash) UnmarshalJSON(data []byte) error { err := json.Unmarshal(data, &e) if err == nil { if e.BlockNumber != nil && e.BlockHash != nil { - return fmt.Errorf("cannot specify both BlockHash and BlockNumber, choose one or the other") + return errors.New("cannot specify both BlockHash and BlockNumber, choose one or the other") } if e.BlockNumber == nil && e.BlockHash == nil { - return fmt.Errorf("at least one of BlockNumber or BlockHash is needed if a dictionary is provided") + return errors.New("at least one of BlockNumber or BlockHash is needed if a dictionary is provided") } bnh.BlockNumber = e.BlockNumber bnh.BlockHash = e.BlockHash @@ -250,7 +251,7 @@ func (bnh *BlockNumberOrHash) UnmarshalJSON(data []byte) error { blckNum, err := strconv.ParseUint(string(data), 10, 64) if err == nil { if blckNum > math.MaxInt64 { - return fmt.Errorf("blocknumber too high") + return errors.New("blocknumber too high") } bn := BlockNumber(blckNum) bnh.BlockNumber = &bn @@ -295,7 +296,7 @@ func (bnh *BlockNumberOrHash) UnmarshalJSON(data []byte) error { return err } if blckNum > math.MaxInt64 { - return fmt.Errorf("blocknumber too high") + return errors.New("blocknumber too high") } bn := BlockNumber(blckNum) bnh.BlockNumber = &bn diff --git a/tests/block_test_util.go b/tests/block_test_util.go index ae056b86913..04fb61ca684 100644 --- a/tests/block_test_util.go +++ b/tests/block_test_util.go @@ -25,6 +25,7 @@ import ( "context" "encoding/hex" "encoding/json" + "errors" "fmt" "math/big" "reflect" @@ -244,10 +245,10 @@ func (bt *BlockTest) insertBlocks(m *mock.MockSentry) ([]btBlock, error) { func validateHeader(h *btHeader, h2 *types.Header) error { if h == nil { - return fmt.Errorf("validateHeader: h == nil") + return errors.New("validateHeader: h == nil") } if h2 == nil { - return fmt.Errorf("validateHeader: h2 == nil") + return errors.New("validateHeader: h2 == nil") } if h.Bloom != h2.Bloom { return fmt.Errorf("bloom: want: %x have: %x", h.Bloom, h2.Bloom) diff --git a/tests/init_test.go b/tests/init_test.go index f919d195397..441e223e944 100644 --- a/tests/init_test.go +++ b/tests/init_test.go @@ -21,6 +21,7 @@ package tests import ( "encoding/json" + "errors" "fmt" "io" "os" @@ -185,7 +186,7 @@ func (tm *testMatcher) checkFailureWithName(t *testing.T, name string, err error t.Logf("error: %v", err) return nil } - return fmt.Errorf("test succeeded unexpectedly") + return errors.New("test succeeded unexpectedly") } return err } diff --git a/tests/rlp_test_util.go b/tests/rlp_test_util.go index e28094c3b04..7af1af689a9 100644 --- a/tests/rlp_test_util.go +++ b/tests/rlp_test_util.go @@ -62,7 +62,7 @@ func FromHex(s string) ([]byte, error) { func (t *RLPTest) Run() error { outb, err := FromHex(t.Out) if err != nil { - return fmt.Errorf("invalid hex in Out") + return errors.New("invalid hex in Out") } // Handle simple decoding tests with no actual In value. @@ -90,7 +90,7 @@ func checkDecodeInterface(b []byte, isValid bool) error { case isValid && err != nil: return fmt.Errorf("decoding failed: %w", err) case !isValid && err == nil: - return fmt.Errorf("decoding of invalid value succeeded") + return errors.New("decoding of invalid value succeeded") } return nil } diff --git a/tests/state_test_util.go b/tests/state_test_util.go index 5b8079692f9..0e3a1122dab 100644 --- a/tests/state_test_util.go +++ b/tests/state_test_util.go @@ -24,6 +24,7 @@ import ( "encoding/binary" "encoding/hex" "encoding/json" + "errors" "fmt" "math/big" "strconv" @@ -438,7 +439,7 @@ func toMessage(tx stTransaction, ps stPostState, baseFee *big.Int) (core.Message gasPrice = math.NewHexOrDecimal256(gp.Int64()) } if gasPrice == nil { - return nil, fmt.Errorf("no gas price provided") + return nil, errors.New("no gas price provided") } gpi := big.Int(*gasPrice) diff --git a/turbo/adapter/ethapi/api.go b/turbo/adapter/ethapi/api.go index 7073ff39e67..1ed331b3d0f 100644 --- a/turbo/adapter/ethapi/api.go +++ b/turbo/adapter/ethapi/api.go @@ -20,6 +20,7 @@ package ethapi import ( + "encoding/hex" "errors" "fmt" "math/big" @@ -98,7 +99,7 @@ func (args *CallArgs) ToMessage(globalGasCap uint64, baseFee *uint256.Int) (type if args.GasPrice != nil { overflow := gasPrice.SetFromBig(args.GasPrice.ToInt()) if overflow { - return types.Message{}, fmt.Errorf("args.GasPrice higher than 2^256-1") + return types.Message{}, errors.New("args.GasPrice higher than 2^256-1") } } gasFeeCap, gasTipCap = gasPrice, gasPrice @@ -109,7 +110,7 @@ func (args *CallArgs) ToMessage(globalGasCap uint64, baseFee *uint256.Int) (type gasPrice = new(uint256.Int) overflow := gasPrice.SetFromBig(args.GasPrice.ToInt()) if overflow { - return types.Message{}, fmt.Errorf("args.GasPrice higher than 2^256-1") + return types.Message{}, errors.New("args.GasPrice higher than 2^256-1") } gasFeeCap, gasTipCap = gasPrice, gasPrice } else { @@ -118,14 +119,14 @@ func (args *CallArgs) ToMessage(globalGasCap uint64, baseFee *uint256.Int) (type if args.MaxFeePerGas != nil { overflow := gasFeeCap.SetFromBig(args.MaxFeePerGas.ToInt()) if overflow { - return types.Message{}, fmt.Errorf("args.GasPrice higher than 2^256-1") + return types.Message{}, errors.New("args.GasPrice higher than 2^256-1") } } gasTipCap = new(uint256.Int) if args.MaxPriorityFeePerGas != nil { overflow := gasTipCap.SetFromBig(args.MaxPriorityFeePerGas.ToInt()) if overflow { - return types.Message{}, fmt.Errorf("args.GasPrice higher than 2^256-1") + return types.Message{}, errors.New("args.GasPrice higher than 2^256-1") } } // Backfill the legacy gasPrice for EVM execution, unless we're all zeroes @@ -137,7 +138,7 @@ func (args *CallArgs) ToMessage(globalGasCap uint64, baseFee *uint256.Int) (type if args.MaxFeePerBlobGas != nil { blobFee, overflow := uint256.FromBig(args.MaxFeePerBlobGas.ToInt()) if overflow { - return types.Message{}, fmt.Errorf("args.MaxFeePerBlobGas higher than 2^256-1") + return types.Message{}, errors.New("args.MaxFeePerBlobGas higher than 2^256-1") } maxFeePerBlobGas = blobFee } @@ -147,7 +148,7 @@ func (args *CallArgs) ToMessage(globalGasCap uint64, baseFee *uint256.Int) (type if args.Value != nil { overflow := value.SetFromBig(args.Value.ToInt()) if overflow { - return types.Message{}, fmt.Errorf("args.Value higher than 2^256-1") + return types.Message{}, errors.New("args.Value higher than 2^256-1") } } var data []byte @@ -248,14 +249,14 @@ func FormatLogs(logs []logger.StructLog) []StructLogRes { if trace.Stack != nil { stack := make([]string, len(trace.Stack)) for i, stackValue := range trace.Stack { - stack[i] = fmt.Sprintf("%x", math.PaddedBigBytes(stackValue, 32)) + stack[i] = hex.EncodeToString(math.PaddedBigBytes(stackValue, 32)) } formatted[index].Stack = &stack } if trace.Memory != nil { memory := make([]string, 0, (len(trace.Memory)+31)/32) for i := 0; i+32 <= len(trace.Memory); i += 32 { - memory = append(memory, fmt.Sprintf("%x", trace.Memory[i:i+32])) + memory = append(memory, hex.EncodeToString(trace.Memory[i:i+32])) } formatted[index].Memory = &memory } diff --git a/turbo/adapter/ethapi/state_overrides.go b/turbo/adapter/ethapi/state_overrides.go index b887607a134..77f782b6183 100644 --- a/turbo/adapter/ethapi/state_overrides.go +++ b/turbo/adapter/ethapi/state_overrides.go @@ -17,6 +17,7 @@ package ethapi import ( + "errors" "fmt" "math/big" @@ -45,7 +46,7 @@ func (overrides *StateOverrides) Override(state *state.IntraBlockState) error { if account.Balance != nil { balance, overflow := uint256.FromBig((*big.Int)(*account.Balance)) if overflow { - return fmt.Errorf("account.Balance higher than 2^256-1") + return errors.New("account.Balance higher than 2^256-1") } state.SetBalance(addr, balance, tracing.BalanceChangeUnspecified) } diff --git a/turbo/app/import_cmd.go b/turbo/app/import_cmd.go index 453cabf8727..b9549189c67 100644 --- a/turbo/app/import_cmd.go +++ b/turbo/app/import_cmd.go @@ -150,7 +150,7 @@ func ImportChain(ethereum *eth.Ethereum, chainDB kv.RwDB, fn string, logger log. for batch := 0; ; batch++ { // Load a batch of RLP blocks. if checkInterrupt() { - return fmt.Errorf("interrupted") + return errors.New("interrupted") } i := 0 for ; i < importBatchSize; i++ { @@ -173,7 +173,7 @@ func ImportChain(ethereum *eth.Ethereum, chainDB kv.RwDB, fn string, logger log. } // Import the batch. if checkInterrupt() { - return fmt.Errorf("interrupted") + return errors.New("interrupted") } br, _ := ethereum.BlockIO() diff --git a/turbo/app/snapshots_cmd.go b/turbo/app/snapshots_cmd.go index d06d4ec7243..4314f54c5c2 100644 --- a/turbo/app/snapshots_cmd.go +++ b/turbo/app/snapshots_cmd.go @@ -585,7 +585,7 @@ func doDecompressSpeed(cliCtx *cli.Context) error { } args := cliCtx.Args() if args.Len() < 1 { - return fmt.Errorf("expecting file path as a first argument") + return errors.New("expecting file path as a first argument") } f := args.First() @@ -758,7 +758,7 @@ func doUncompress(cliCtx *cli.Context) error { args := cliCtx.Args() if args.Len() < 1 { - return fmt.Errorf("expecting file path as a first argument") + return errors.New("expecting file path as a first argument") } f := args.First() @@ -811,7 +811,7 @@ func doCompress(cliCtx *cli.Context) error { args := cliCtx.Args() if args.Len() < 1 { - return fmt.Errorf("expecting file path as a first argument") + return errors.New("expecting file path as a first argument") } f := args.First() dirs := datadir.New(cliCtx.String(utils.DataDirFlag.Name)) diff --git a/turbo/cli/flags.go b/turbo/cli/flags.go index 66c8413bf23..8ca78ab237f 100644 --- a/turbo/cli/flags.go +++ b/turbo/cli/flags.go @@ -274,7 +274,7 @@ func ApplyFlagsForEthConfig(ctx *cli.Context, cfg *ethconfig.Config, logger log. } // Sanitize prune flag if ctx.String(PruneModeFlag.Name) != "archive" && (ctx.IsSet(PruneBlocksDistanceFlag.Name) || ctx.IsSet(PruneDistanceFlag.Name)) { - utils.Fatalf(fmt.Sprintf("error: --prune.distance and --prune.distance.blocks are only allowed with --prune.mode=archive")) + utils.Fatalf("error: --prune.distance and --prune.distance.blocks are only allowed with --prune.mode=archive") } distance := ctx.Uint64(PruneDistanceFlag.Name) blockDistance := ctx.Uint64(PruneBlocksDistanceFlag.Name) @@ -399,7 +399,7 @@ func ApplyFlagsForEthConfigCobra(f *pflag.FlagSet, cfg *ethconfig.Config) { chainId := cfg.NetworkID if *pruneMode != "archive" && (pruneBlockDistance != nil || pruneDistance != nil) { - utils.Fatalf(fmt.Sprintf("error: --prune.distance and --prune.distance.blocks are only allowed with --prune.mode=archive")) + utils.Fatalf("error: --prune.distance and --prune.distance.blocks are only allowed with --prune.mode=archive") } var distance, blockDistance uint64 = math.MaxUint64, math.MaxUint64 if pruneBlockDistance != nil { diff --git a/turbo/cmdtest/test_cmd.go b/turbo/cmdtest/test_cmd.go index 62e1205c297..cb3adc07b0b 100644 --- a/turbo/cmdtest/test_cmd.go +++ b/turbo/cmdtest/test_cmd.go @@ -27,6 +27,7 @@ import ( "os" "os/exec" "regexp" + "strconv" "strings" "sync" "sync/atomic" @@ -66,7 +67,7 @@ var id int32 // reexec init function for that name (e.g. "geth-test" in cmd/geth/run_test.go) func (tt *TestCmd) Run(name string, args ...string) { id := atomic.AddInt32(&id, 1) - tt.stderr = &testlogger{t: tt.T, name: fmt.Sprintf("%d", id)} + tt.stderr = &testlogger{t: tt.T, name: strconv.FormatUint(uint64(id), 10)} tt.cmd = &exec.Cmd{ Path: reexec.Self(), Args: append([]string{name}, args...), diff --git a/turbo/engineapi/engine_server.go b/turbo/engineapi/engine_server.go index bac99bb68eb..7d9d9d1853d 100644 --- a/turbo/engineapi/engine_server.go +++ b/turbo/engineapi/engine_server.go @@ -340,11 +340,11 @@ func (s *EngineServer) getQuickPayloadStatusIfPossible(ctx context.Context, bloc } if s.config.TerminalTotalDifficulty == nil { s.logger.Error(fmt.Sprintf("[%s] not a proof-of-stake chain", prefix)) - return nil, fmt.Errorf("not a proof-of-stake chain") + return nil, errors.New("not a proof-of-stake chain") } if s.hd == nil { - return nil, fmt.Errorf("headerdownload is nil") + return nil, errors.New("headerdownload is nil") } headHash, finalizedHash, safeHash, err := s.chainRW.GetForkChoice(ctx) @@ -453,11 +453,11 @@ func (s *EngineServer) getPayload(ctx context.Context, payloadId uint64, version return nil, errCaplinEnabled } if !s.proposing { - return nil, fmt.Errorf("execution layer not running as a proposer. enable proposer by taking out the --proposer.disable flag on startup") + return nil, errors.New("execution layer not running as a proposer. enable proposer by taking out the --proposer.disable flag on startup") } if s.config.TerminalTotalDifficulty == nil { - return nil, fmt.Errorf("not a proof-of-stake chain") + return nil, errors.New("not a proof-of-stake chain") } s.logger.Debug("[GetPayload] acquiring lock") @@ -554,7 +554,7 @@ func (s *EngineServer) forkchoiceUpdated(ctx context.Context, forkchoiceState *e } if !s.proposing { - return nil, fmt.Errorf("execution layer not running as a proposer. enable proposer by taking out the --proposer.disable flag on startup") + return nil, errors.New("execution layer not running as a proposer. enable proposer by taking out the --proposer.disable flag on startup") } headHeader := s.chainRW.GetHeaderByHash(ctx, forkchoiceState.HeadHash) diff --git a/turbo/execution/eth1/eth1_chain_reader.go/chain_reader.go b/turbo/execution/eth1/eth1_chain_reader.go/chain_reader.go index 43104b7fc1f..e0730a84da8 100644 --- a/turbo/execution/eth1/eth1_chain_reader.go/chain_reader.go +++ b/turbo/execution/eth1/eth1_chain_reader.go/chain_reader.go @@ -18,6 +18,7 @@ package eth1_chain_reader import ( "context" + "errors" "fmt" "math/big" "time" @@ -408,7 +409,7 @@ func (c ChainReaderWriterEth1) AssembleBlock(baseHash libcommon.Hash, attributes return 0, err } if resp.Busy { - return 0, fmt.Errorf("execution data is still syncing") + return 0, errors.New("execution data is still syncing") } return resp.Id, nil } @@ -421,7 +422,7 @@ func (c ChainReaderWriterEth1) GetAssembledBlock(id uint64) (*cltypes.Eth1Block, return nil, nil, nil, err } if resp.Busy { - return nil, nil, nil, fmt.Errorf("execution data is still syncing") + return nil, nil, nil, errors.New("execution data is still syncing") } if resp.Data == nil { return nil, nil, nil, nil diff --git a/turbo/execution/eth1/getters.go b/turbo/execution/eth1/getters.go index a0e5964c2f8..e9865424f2d 100644 --- a/turbo/execution/eth1/getters.go +++ b/turbo/execution/eth1/getters.go @@ -292,7 +292,7 @@ func (e *EthereumExecutionModule) CurrentHeader(ctx context.Context, _ *emptypb. return nil, fmt.Errorf("ethereumExecutionModule.CurrentHeader: blockReader.Header error %w", err) } if h == nil { - return nil, fmt.Errorf("ethereumExecutionModule.CurrentHeader: no current header yet - probabably node not synced yet") + return nil, errors.New("ethereumExecutionModule.CurrentHeader: no current header yet - probabably node not synced yet") } return &execution.GetHeaderResponse{ Header: eth1_utils.HeaderToHeaderRPC(h), diff --git a/turbo/jsonrpc/debug_api.go b/turbo/jsonrpc/debug_api.go index ee2ee32d5b3..b17d09bfd78 100644 --- a/turbo/jsonrpc/debug_api.go +++ b/turbo/jsonrpc/debug_api.go @@ -18,6 +18,7 @@ package jsonrpc import ( "context" + "errors" "fmt" jsoniter "github.com/json-iterator/go" @@ -89,7 +90,7 @@ func (api *PrivateDebugAPIImpl) StorageRangeAt(ctx context.Context, blockHash co number := rawdb.ReadHeaderNumber(tx, blockHash) if number == nil { - return StorageRangeResult{}, fmt.Errorf("block not found") + return StorageRangeResult{}, errors.New("block not found") } minTxNum, err := rawdbv3.TxNums.Min(tx, *number) if err != nil { @@ -110,7 +111,7 @@ func (api *PrivateDebugAPIImpl) AccountRange(ctx context.Context, blockNrOrHash if number, ok := blockNrOrHash.Number(); ok { if number == rpc.PendingBlockNumber { - return state.IteratorDump{}, fmt.Errorf("accountRange for pending block not supported") + return state.IteratorDump{}, errors.New("accountRange for pending block not supported") } if number == rpc.LatestBlockNumber { var err error @@ -302,7 +303,7 @@ func (api *PrivateDebugAPIImpl) AccountAt(ctx context.Context, blockHash common. canonicalHash, _ := api._blockReader.CanonicalHash(ctx, tx, *number) isCanonical := canonicalHash == blockHash if !isCanonical { - return nil, fmt.Errorf("block hash is not canonical") + return nil, errors.New("block hash is not canonical") } minTxNum, err := rawdbv3.TxNums.Min(tx, *number) @@ -357,7 +358,7 @@ func (api *PrivateDebugAPIImpl) GetRawHeader(ctx context.Context, blockNrOrHash return nil, err } if header == nil { - return nil, fmt.Errorf("header not found") + return nil, errors.New("header not found") } return rlp.EncodeToBytes(header) } @@ -377,7 +378,7 @@ func (api *PrivateDebugAPIImpl) GetRawBlock(ctx context.Context, blockNrOrHash r return nil, err } if block == nil { - return nil, fmt.Errorf("block not found") + return nil, errors.New("block not found") } return rlp.EncodeToBytes(block) } diff --git a/turbo/jsonrpc/erigon_receipts.go b/turbo/jsonrpc/erigon_receipts.go index 51bd791d345..7006c952ea5 100644 --- a/turbo/jsonrpc/erigon_receipts.go +++ b/turbo/jsonrpc/erigon_receipts.go @@ -18,6 +18,7 @@ package jsonrpc import ( "context" + "errors" "fmt" "github.com/RoaringBitmap/roaring" @@ -130,7 +131,7 @@ func (api *ErigonImpl) GetLogs(ctx context.Context, crit filters.FilterCriteria) // {{A}} matches topic A in any positions. Logs with {{B}, {A}} will be matched func (api *ErigonImpl) GetLatestLogs(ctx context.Context, crit filters.FilterCriteria, logOptions filters.LogFilterOptions) (types.ErigonLogs, error) { if logOptions.LogCount != 0 && logOptions.BlockCount != 0 { - return nil, fmt.Errorf("logs count & block count are ambigious") + return nil, errors.New("logs count & block count are ambigious") } if logOptions.LogCount == 0 && logOptions.BlockCount == 0 { logOptions = filters.DefaultLogFilterOptions() diff --git a/turbo/jsonrpc/eth_api.go b/turbo/jsonrpc/eth_api.go index 2fcbf8de72c..33b24943ecd 100644 --- a/turbo/jsonrpc/eth_api.go +++ b/turbo/jsonrpc/eth_api.go @@ -19,7 +19,7 @@ package jsonrpc import ( "bytes" "context" - "fmt" + "errors" "math/big" "sync" "sync/atomic" @@ -261,7 +261,7 @@ func (api *BaseAPI) chainConfigWithGenesis(ctx context.Context, tx kv.Tx) (*chai return nil, nil, err } if genesisBlock == nil { - return nil, nil, fmt.Errorf("genesis block not found in database") + return nil, nil, errors.New("genesis block not found in database") } cc, err = rawdb.ReadChainConfig(tx, genesisBlock.Hash()) if err != nil { @@ -320,7 +320,7 @@ func (api *BaseAPI) checkPruneHistory(tx kv.Tx, block uint64) error { } prunedTo := p.History.PruneTo(latest) if block < prunedTo { - return fmt.Errorf("history has been pruned for this block") + return errors.New("history has been pruned for this block") } } diff --git a/turbo/jsonrpc/eth_call.go b/turbo/jsonrpc/eth_call.go index e638eae19b8..9fab1333d3c 100644 --- a/turbo/jsonrpc/eth_call.go +++ b/turbo/jsonrpc/eth_call.go @@ -202,7 +202,7 @@ func (api *APIImpl) EstimateGas(ctx context.Context, argsOrNil *ethapi2.CallArgs stateReader := rpchelper.CreateLatestCachedStateReader(cacheView, dbtx) state := state.New(stateReader) if state == nil { - return 0, fmt.Errorf("can't get the current state") + return 0, errors.New("can't get the current state") } balance := state.GetBalance(*args.From) // from can't be nil @@ -254,7 +254,7 @@ func (api *APIImpl) EstimateGas(ctx context.Context, argsOrNil *ethapi2.CallArgs } } if block == nil { - return 0, fmt.Errorf("could not find latest block in cache or db") + return 0, errors.New("could not find latest block in cache or db") } stateReader, err := rpchelper.CreateStateReaderFromBlockNumber(ctx, dbtx, latestCanBlockNumber, isLatest, 0, api.stateCache, chainConfig.ChainName) @@ -332,7 +332,7 @@ func (api *APIImpl) EstimateGas(ctx context.Context, argsOrNil *ethapi2.CallArgs // GetProof is partially implemented; no Storage proofs, and proofs must be for // blocks within maxGetProofRewindBlockCount blocks of the head. func (api *APIImpl) GetProof(ctx context.Context, address libcommon.Address, storageKeys []libcommon.Hash, blockNrOrHash rpc.BlockNumberOrHash) (*accounts.AccProofResult, error) { - return nil, fmt.Errorf("not supported by Erigon3") + return nil, errors.New("not supported by Erigon3") /* tx, err := api.db.BeginRo(ctx) if err != nil { diff --git a/turbo/jsonrpc/eth_callMany.go b/turbo/jsonrpc/eth_callMany.go index 9b2bab097ec..8e36057e34c 100644 --- a/turbo/jsonrpc/eth_callMany.go +++ b/turbo/jsonrpc/eth_callMany.go @@ -19,6 +19,7 @@ package jsonrpc import ( "context" "encoding/hex" + "errors" "fmt" "math/big" "time" @@ -108,7 +109,7 @@ func (api *APIImpl) CallMany(ctx context.Context, bundles []Bundle, simulateCont return nil, err } if len(bundles) == 0 { - return nil, fmt.Errorf("empty bundles") + return nil, errors.New("empty bundles") } empty := true for _, bundle := range bundles { @@ -118,7 +119,7 @@ func (api *APIImpl) CallMany(ctx context.Context, bundles []Bundle, simulateCont } if empty { - return nil, fmt.Errorf("empty bundles") + return nil, errors.New("empty bundles") } defer func(start time.Time) { log.Trace("Executing EVM callMany finished", "runtime", time.Since(start)) }(time.Now()) diff --git a/turbo/jsonrpc/overlay_api.go b/turbo/jsonrpc/overlay_api.go index e48579170cb..baf85f5e5c6 100644 --- a/turbo/jsonrpc/overlay_api.go +++ b/turbo/jsonrpc/overlay_api.go @@ -18,6 +18,7 @@ package jsonrpc import ( "context" + "errors" "fmt" "runtime" "sync" @@ -120,7 +121,7 @@ func (api *OverlayAPIImpl) CallConstructor(ctx context.Context, address common.A } if !ok { - return nil, fmt.Errorf("contract construction txn not found") + return nil, errors.New("contract construction txn not found") } err = api.BaseAPI.checkPruneHistory(tx, blockNum) @@ -144,7 +145,7 @@ func (api *OverlayAPIImpl) CallConstructor(ctx context.Context, address common.A } if transactionIndex == -1 { - return nil, fmt.Errorf("could not find txn hash") + return nil, errors.New("could not find txn hash") } replayTransactions = block.Transactions()[:transactionIndex] diff --git a/turbo/jsonrpc/parity_api.go b/turbo/jsonrpc/parity_api.go index c47c1be644a..6ade56eed38 100644 --- a/turbo/jsonrpc/parity_api.go +++ b/turbo/jsonrpc/parity_api.go @@ -18,6 +18,7 @@ package jsonrpc import ( "context" + "errors" "fmt" libcommon "github.com/erigontech/erigon-lib/common" @@ -70,7 +71,7 @@ func (api *ParityAPIImpl) ListStorageKeys(ctx context.Context, account libcommon if err != nil { return nil, err } else if a == nil { - return nil, fmt.Errorf("acc not found") + return nil, errors.New("acc not found") } bn := rawdb.ReadCurrentBlockNumber(tx) diff --git a/turbo/jsonrpc/parity_api_test.go b/turbo/jsonrpc/parity_api_test.go index 721dcb80803..03c296a0716 100644 --- a/turbo/jsonrpc/parity_api_test.go +++ b/turbo/jsonrpc/parity_api_test.go @@ -18,7 +18,7 @@ package jsonrpc import ( "context" - "fmt" + "errors" "testing" "github.com/erigontech/erigon/rpc/rpccfg" @@ -122,5 +122,5 @@ func TestParityAPIImpl_ListStorageKeys_AccNotFound(t *testing.T) { api := NewParityAPIImpl(newBaseApiForTest(m), m.DB) addr := libcommon.HexToAddress("0x920fd5070602feaea2e251e9e7238b6c376bcaef") _, err := api.ListStorageKeys(context.Background(), addr, 2, nil, latestBlock) - assert.Error(err, fmt.Errorf("acc not found")) + assert.Error(err, errors.New("acc not found")) } diff --git a/turbo/jsonrpc/trace_adhoc.go b/turbo/jsonrpc/trace_adhoc.go index 57952a31795..76326f64c60 100644 --- a/turbo/jsonrpc/trace_adhoc.go +++ b/turbo/jsonrpc/trace_adhoc.go @@ -20,6 +20,7 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" "math" "strings" @@ -178,7 +179,7 @@ func (args *TraceCallParam) ToMessage(globalGasCap uint64, baseFee *uint256.Int) if args.GasPrice != nil { overflow := gasPrice.SetFromBig(args.GasPrice.ToInt()) if overflow { - return types.Message{}, fmt.Errorf("args.GasPrice higher than 2^256-1") + return types.Message{}, errors.New("args.GasPrice higher than 2^256-1") } } gasFeeCap, gasTipCap = gasPrice, gasPrice @@ -189,7 +190,7 @@ func (args *TraceCallParam) ToMessage(globalGasCap uint64, baseFee *uint256.Int) // User specified the legacy gas field, convert to 1559 gas typing gasPrice, overflow = uint256.FromBig(args.GasPrice.ToInt()) if overflow { - return types.Message{}, fmt.Errorf("args.GasPrice higher than 2^256-1") + return types.Message{}, errors.New("args.GasPrice higher than 2^256-1") } gasFeeCap, gasTipCap = gasPrice, gasPrice } else { @@ -198,14 +199,14 @@ func (args *TraceCallParam) ToMessage(globalGasCap uint64, baseFee *uint256.Int) if args.MaxFeePerGas != nil { overflow := gasFeeCap.SetFromBig(args.MaxFeePerGas.ToInt()) if overflow { - return types.Message{}, fmt.Errorf("args.GasPrice higher than 2^256-1") + return types.Message{}, errors.New("args.GasPrice higher than 2^256-1") } } gasTipCap = new(uint256.Int) if args.MaxPriorityFeePerGas != nil { overflow := gasTipCap.SetFromBig(args.MaxPriorityFeePerGas.ToInt()) if overflow { - return types.Message{}, fmt.Errorf("args.GasPrice higher than 2^256-1") + return types.Message{}, errors.New("args.GasPrice higher than 2^256-1") } } // Backfill the legacy gasPrice for EVM execution, unless we're all zeroes @@ -226,7 +227,7 @@ func (args *TraceCallParam) ToMessage(globalGasCap uint64, baseFee *uint256.Int) if args.Value != nil { overflow := value.SetFromBig(args.Value.ToInt()) if overflow { - return types.Message{}, fmt.Errorf("args.Value higher than 2^256-1") + return types.Message{}, errors.New("args.Value higher than 2^256-1") } } var data []byte @@ -1007,7 +1008,7 @@ func (api *TraceAPIImpl) Call(ctx context.Context, args TraceCallParam, traceTyp var overflow bool baseFee, overflow = uint256.FromBig(header.BaseFee) if overflow { - return nil, fmt.Errorf("header.BaseFee uint256 overflow") + return nil, errors.New("header.BaseFee uint256 overflow") } } msg, err := args.ToMessage(api.gasCap, baseFee) @@ -1073,7 +1074,7 @@ func (api *TraceAPIImpl) CallMany(ctx context.Context, calls json.RawMessage, pa return nil, err } if tok != json.Delim('[') { - return nil, fmt.Errorf("expected array of [callparam, tracetypes]") + return nil, errors.New("expected array of [callparam, tracetypes]") } for dec.More() { tok, err = dec.Token() @@ -1081,7 +1082,7 @@ func (api *TraceAPIImpl) CallMany(ctx context.Context, calls json.RawMessage, pa return nil, err } if tok != json.Delim('[') { - return nil, fmt.Errorf("expected [callparam, tracetypes]") + return nil, errors.New("expected [callparam, tracetypes]") } callParams = append(callParams, TraceCallParam{}) args := &callParams[len(callParams)-1] @@ -1096,7 +1097,7 @@ func (api *TraceAPIImpl) CallMany(ctx context.Context, calls json.RawMessage, pa return nil, err } if tok != json.Delim(']') { - return nil, fmt.Errorf("expected end of [callparam, tracetypes]") + return nil, errors.New("expected end of [callparam, tracetypes]") } } tok, err = dec.Token() @@ -1104,7 +1105,7 @@ func (api *TraceAPIImpl) CallMany(ctx context.Context, calls json.RawMessage, pa return nil, err } if tok != json.Delim(']') { - return nil, fmt.Errorf("expected end of array of [callparam, tracetypes]") + return nil, errors.New("expected end of array of [callparam, tracetypes]") } var baseFee *uint256.Int if parentNrOrHash == nil { @@ -1129,7 +1130,7 @@ func (api *TraceAPIImpl) CallMany(ctx context.Context, calls json.RawMessage, pa var overflow bool baseFee, overflow = uint256.FromBig(parentHeader.BaseFee) if overflow { - return nil, fmt.Errorf("header.BaseFee uint256 overflow") + return nil, errors.New("header.BaseFee uint256 overflow") } } msgs := make([]types.Message, len(callParams)) diff --git a/turbo/jsonrpc/trace_filtering.go b/turbo/jsonrpc/trace_filtering.go index c60674140ac..a93414c23f6 100644 --- a/turbo/jsonrpc/trace_filtering.go +++ b/turbo/jsonrpc/trace_filtering.go @@ -325,7 +325,7 @@ func (api *TraceAPIImpl) Filter(ctx context.Context, req TraceFilterRequest, gas toBlock = uint64(*req.ToBlock) } if fromBlock > toBlock { - return fmt.Errorf("invalid parameters: fromBlock cannot be greater than toBlock") + return errors.New("invalid parameters: fromBlock cannot be greater than toBlock") } return api.filterV3(ctx, dbtx.(kv.TemporalTx), fromBlock, toBlock, req, stream, *gasBailOut, traceConfig) diff --git a/turbo/jsonrpc/tracing.go b/turbo/jsonrpc/tracing.go index 29011399f36..aa6f5256b73 100644 --- a/turbo/jsonrpc/tracing.go +++ b/turbo/jsonrpc/tracing.go @@ -18,6 +18,7 @@ package jsonrpc import ( "context" + "errors" "fmt" "time" @@ -370,7 +371,7 @@ func (api *PrivateDebugAPIImpl) TraceCall(ctx context.Context, args ethapi.CallA var overflow bool baseFee, overflow = uint256.FromBig(header.BaseFee) if overflow { - return fmt.Errorf("header.BaseFee uint256 overflow") + return errors.New("header.BaseFee uint256 overflow") } } msg, err := args.ToMessage(api.GasCap, baseFee) @@ -412,7 +413,7 @@ func (api *PrivateDebugAPIImpl) TraceCallMany(ctx context.Context, bundles []Bun } if len(bundles) == 0 { stream.WriteNil() - return fmt.Errorf("empty bundles") + return errors.New("empty bundles") } empty := true for _, bundle := range bundles { @@ -423,7 +424,7 @@ func (api *PrivateDebugAPIImpl) TraceCallMany(ctx context.Context, bundles []Bun if empty { stream.WriteNil() - return fmt.Errorf("empty bundles") + return errors.New("empty bundles") } defer func(start time.Time) { log.Trace("Tracing CallMany finished", "runtime", time.Since(start)) }(time.Now()) diff --git a/turbo/jsonrpc/txpool_api.go b/turbo/jsonrpc/txpool_api.go index b0e47f54835..cb2ac72bed0 100644 --- a/turbo/jsonrpc/txpool_api.go +++ b/turbo/jsonrpc/txpool_api.go @@ -19,6 +19,7 @@ package jsonrpc import ( "context" "fmt" + "strconv" "github.com/erigontech/erigon-lib/common/hexutil" @@ -111,7 +112,7 @@ func (api *TxPoolAPIImpl) Content(ctx context.Context) (map[string]map[string]ma for account, txs := range pending { dump := make(map[string]*RPCTransaction) for _, txn := range txs { - dump[fmt.Sprintf("%d", txn.GetNonce())] = newRPCPendingTransaction(txn, curHeader, cc) + dump[strconv.FormatUint(txn.GetNonce(), 10)] = newRPCPendingTransaction(txn, curHeader, cc) } content["pending"][account.Hex()] = dump } @@ -119,7 +120,7 @@ func (api *TxPoolAPIImpl) Content(ctx context.Context) (map[string]map[string]ma for account, txs := range baseFee { dump := make(map[string]*RPCTransaction) for _, txn := range txs { - dump[fmt.Sprintf("%d", txn.GetNonce())] = newRPCPendingTransaction(txn, curHeader, cc) + dump[strconv.FormatUint(txn.GetNonce(), 10)] = newRPCPendingTransaction(txn, curHeader, cc) } content["baseFee"][account.Hex()] = dump } @@ -127,7 +128,7 @@ func (api *TxPoolAPIImpl) Content(ctx context.Context) (map[string]map[string]ma for account, txs := range queued { dump := make(map[string]*RPCTransaction) for _, txn := range txs { - dump[fmt.Sprintf("%d", txn.GetNonce())] = newRPCPendingTransaction(txn, curHeader, cc) + dump[strconv.FormatUint(txn.GetNonce(), 10)] = newRPCPendingTransaction(txn, curHeader, cc) } content["queued"][account.Hex()] = dump } @@ -186,19 +187,19 @@ func (api *TxPoolAPIImpl) ContentFrom(ctx context.Context, addr libcommon.Addres // Flatten the pending transactions dump := make(map[string]*RPCTransaction) for _, txn := range pending { - dump[fmt.Sprintf("%d", txn.GetNonce())] = newRPCPendingTransaction(txn, curHeader, cc) + dump[strconv.FormatUint(txn.GetNonce(), 10)] = newRPCPendingTransaction(txn, curHeader, cc) } content["pending"] = dump // Flatten the baseFee transactions dump = make(map[string]*RPCTransaction) for _, txn := range baseFee { - dump[fmt.Sprintf("%d", txn.GetNonce())] = newRPCPendingTransaction(txn, curHeader, cc) + dump[strconv.FormatUint(txn.GetNonce(), 10)] = newRPCPendingTransaction(txn, curHeader, cc) } content["baseFee"] = dump // Flatten the queued transactions dump = make(map[string]*RPCTransaction) for _, txn := range queued { - dump[fmt.Sprintf("%d", txn.GetNonce())] = newRPCPendingTransaction(txn, curHeader, cc) + dump[strconv.FormatUint(txn.GetNonce(), 10)] = newRPCPendingTransaction(txn, curHeader, cc) } content["queued"] = dump return content, nil diff --git a/turbo/rpchelper/filters.go b/turbo/rpchelper/filters.go index 20a17da7965..b6896fbad7d 100644 --- a/turbo/rpchelper/filters.go +++ b/turbo/rpchelper/filters.go @@ -574,7 +574,7 @@ func (ff *Filters) onNewEvent(event *remote.SubscribeReply) error { case remote.Event_PENDING_BLOCK: return ff.onPendingBlock(event) default: - return fmt.Errorf("unsupported event type") + return errors.New("unsupported event type") } } diff --git a/turbo/snapshotsync/freezeblocks/block_reader.go b/turbo/snapshotsync/freezeblocks/block_reader.go index ad3a4a41ff0..277e000cc48 100644 --- a/turbo/snapshotsync/freezeblocks/block_reader.go +++ b/turbo/snapshotsync/freezeblocks/block_reader.go @@ -254,7 +254,7 @@ func (r *RemoteBlockReader) BodyRlp(ctx context.Context, tx kv.Getter, hash comm } func (r *RemoteBlockReader) LastEventId(ctx context.Context, tx kv.Tx) (uint64, bool, error) { - return 0, false, fmt.Errorf("not implemented") + return 0, false, errors.New("not implemented") } func (r *RemoteBlockReader) EventLookup(ctx context.Context, tx kv.Getter, txnHash common.Hash) (uint64, bool, error) { @@ -301,7 +301,7 @@ func (r *RemoteBlockReader) LastFrozenSpanId() uint64 { } func (r *RemoteBlockReader) LastMilestoneId(ctx context.Context, tx kv.Tx) (uint64, bool, error) { - return 0, false, fmt.Errorf("not implemented") + return 0, false, errors.New("not implemented") } func (r *RemoteBlockReader) Milestone(ctx context.Context, tx kv.Getter, spanId uint64) ([]byte, error) { @@ -309,7 +309,7 @@ func (r *RemoteBlockReader) Milestone(ctx context.Context, tx kv.Getter, spanId } func (r *RemoteBlockReader) LastCheckpointId(ctx context.Context, tx kv.Tx) (uint64, bool, error) { - return 0, false, fmt.Errorf("not implemented") + return 0, false, errors.New("not implemented") } func (r *RemoteBlockReader) Checkpoint(ctx context.Context, tx kv.Getter, spanId uint64) ([]byte, error) { diff --git a/turbo/snapshotsync/freezeblocks/block_snapshots.go b/turbo/snapshotsync/freezeblocks/block_snapshots.go index c397d9267b0..dfc505091c8 100644 --- a/turbo/snapshotsync/freezeblocks/block_snapshots.go +++ b/turbo/snapshotsync/freezeblocks/block_snapshots.go @@ -728,13 +728,13 @@ func (s *RoSnapshots) buildMissedIndicesIfNeed(ctx context.Context, logPrefix st return nil } if !s.Cfg().ProduceE2 && s.IndicesMax() == 0 { - return fmt.Errorf("please remove --snap.stop, erigon can't work without creating basic indices") + return errors.New("please remove --snap.stop, erigon can't work without creating basic indices") } if !s.Cfg().ProduceE2 { return nil } if !s.SegmentsReady() { - return fmt.Errorf("not all snapshot segments are available") + return errors.New("not all snapshot segments are available") } s.LogStat("missed-idx") @@ -944,7 +944,7 @@ func (s *RoSnapshots) AddSnapshotsToSilkworm(silkwormInstance *silkworm.Silkworm } if len(mappedHeaderSnapshots) != len(mappedBodySnapshots) || len(mappedBodySnapshots) != len(mappedTxnSnapshots) { - return fmt.Errorf("addSnapshots: the number of headers/bodies/txs snapshots must be the same") + return errors.New("addSnapshots: the number of headers/bodies/txs snapshots must be the same") } for i := 0; i < len(mappedHeaderSnapshots); i++ { diff --git a/turbo/snapshotsync/freezeblocks/caplin_snapshots.go b/turbo/snapshotsync/freezeblocks/caplin_snapshots.go index edf460fb36d..bb5bd7d8c08 100644 --- a/turbo/snapshotsync/freezeblocks/caplin_snapshots.go +++ b/turbo/snapshotsync/freezeblocks/caplin_snapshots.go @@ -56,7 +56,7 @@ func BeaconSimpleIdx(ctx context.Context, sn snaptype.FileInfo, salt uint32, tmp num := make([]byte, binary.MaxVarintLen64) if err := snaptype.BuildIndex(ctx, sn, salt, sn.From, tmpDir, log.LvlDebug, p, func(idx *recsplit.RecSplit, i, offset uint64, word []byte) error { if i%20_000 == 0 { - logger.Log(lvl, fmt.Sprintf("Generating idx for %s", sn.Type.Name()), "progress", i) + logger.Log(lvl, "Generating idx for "+sn.Type.Name(), "progress", i) } p.Processed.Add(1) n := binary.PutUvarint(num, i) @@ -665,7 +665,7 @@ func (s *CaplinSnapshots) ReadBlobSidecars(slot uint64) ([]*cltypes.BlobSidecar, return nil, nil } if len(buf)%sidecarSSZSize != 0 { - return nil, fmt.Errorf("invalid sidecar list length") + return nil, errors.New("invalid sidecar list length") } sidecars := make([]*cltypes.BlobSidecar, len(buf)/sidecarSSZSize) for i := 0; i < len(buf); i += sidecarSSZSize { diff --git a/turbo/stages/bodydownload/body_algos.go b/turbo/stages/bodydownload/body_algos.go index 61bcd0d7d24..b5f8d068f54 100644 --- a/turbo/stages/bodydownload/body_algos.go +++ b/turbo/stages/bodydownload/body_algos.go @@ -19,6 +19,7 @@ package bodydownload import ( "bytes" "context" + "errors" "fmt" "math/big" @@ -75,7 +76,7 @@ func (bd *BodyDownload) UpdateFromDb(db kv.Tx) (headHeight, headTime uint64, hea headTd256 = new(uint256.Int) overflow := headTd256.SetFromBig(headTd) if overflow { - return 0, 0, libcommon.Hash{}, nil, fmt.Errorf("headTd higher than 2^256-1") + return 0, 0, libcommon.Hash{}, nil, errors.New("headTd higher than 2^256-1") } headTime = 0 headHeader, err := bd.br.Header(context.Background(), db, headHash, headHeight) diff --git a/turbo/stages/headerdownload/header_algos.go b/turbo/stages/headerdownload/header_algos.go index 7b452eb6119..8cd2aa3a8fa 100644 --- a/turbo/stages/headerdownload/header_algos.go +++ b/turbo/stages/headerdownload/header_algos.go @@ -28,6 +28,7 @@ import ( "math/big" "slices" "sort" + "strconv" "strings" "time" @@ -270,7 +271,7 @@ func (hd *HeaderDownload) logAnchorState() { slices.Sort(bs) for j, b := range bs { if j == 0 { - sbb.WriteString(fmt.Sprintf("%d", b)) + sbb.WriteString(strconv.Itoa(b)) } else if j == len(bs)-1 { if bs[j-1]+1 == b { // Close interval diff --git a/turbo/transactions/call.go b/turbo/transactions/call.go index 7baeeff2437..ccfebaa001d 100644 --- a/turbo/transactions/call.go +++ b/turbo/transactions/call.go @@ -18,6 +18,7 @@ package transactions import ( "context" + "errors" "fmt" "time" @@ -89,7 +90,7 @@ func DoCall( var overflow bool baseFee, overflow = uint256.FromBig(header.BaseFee) if overflow { - return nil, fmt.Errorf("header.BaseFee uint256 overflow") + return nil, errors.New("header.BaseFee uint256 overflow") } } msg, err := args.ToMessage(gasCap, baseFee) @@ -221,7 +222,7 @@ func NewReusableCaller( var overflow bool baseFee, overflow = uint256.FromBig(header.BaseFee) if overflow { - return nil, fmt.Errorf("header.BaseFee uint256 overflow") + return nil, errors.New("header.BaseFee uint256 overflow") } } diff --git a/turbo/trie/debug.go b/turbo/trie/debug.go index 4d115a97e5d..ecc340338c6 100644 --- a/turbo/trie/debug.go +++ b/turbo/trie/debug.go @@ -23,6 +23,7 @@ package trie import ( "bytes" + "encoding/hex" "fmt" "io" @@ -44,12 +45,12 @@ func (n *fullNode) fstring(ind string) string { resp := fmt.Sprintf("full\n%s ", ind) for i, node := range &n.Children { if node == nil { - resp += fmt.Sprintf("%s: ", indices[i]) + resp += indices[i] + ": " } else { - resp += fmt.Sprintf("%s: %v", indices[i], node.fstring(ind+" ")) + resp += indices[i] + ": " + node.fstring(ind+" ") } } - return resp + fmt.Sprintf("\n%s] ", ind) + return resp + "\n" + ind + "]" } func (n *fullNode) print(w io.Writer) { fmt.Fprintf(w, "f(") @@ -113,9 +114,9 @@ func (an accountNode) fstring(ind string) string { encodedAccount := make([]byte, an.EncodingLengthForHashing()) an.EncodeForHashing(encodedAccount) if an.storage == nil { - return fmt.Sprintf("%x", encodedAccount) + return hex.EncodeToString(encodedAccount) } - return fmt.Sprintf("%x %v", encodedAccount, an.storage.fstring(ind+" ")) + return hex.EncodeToString(encodedAccount) + " " + an.storage.fstring(ind+" ") } func (an accountNode) print(w io.Writer) { diff --git a/turbo/trie/hashbuilder.go b/turbo/trie/hashbuilder.go index 0d0a738e92a..194d4062182 100644 --- a/turbo/trie/hashbuilder.go +++ b/turbo/trie/hashbuilder.go @@ -18,6 +18,7 @@ package trie import ( "bytes" + "errors" "fmt" "io" "math/bits" @@ -488,7 +489,7 @@ func (hb *HashBuilder) extensionHash(key []byte) error { fmt.Printf("extensionHash [%x]=>[%x]\nHash [%x]\n", key, capture, hb.hashStack[len(hb.hashStack)-hashStackStride:len(hb.hashStack)]) } if _, ok := hb.nodeStack[len(hb.nodeStack)-1].(*fullNode); ok { - return fmt.Errorf("extensionHash cannot be emitted when a node is on top of the stack") + return errors.New("extensionHash cannot be emitted when a node is on top of the stack") } return nil } @@ -670,7 +671,7 @@ func (hb *HashBuilder) emptyRoot() { func (hb *HashBuilder) RootHash() (libcommon.Hash, error) { if !hb.hasRoot() { - return libcommon.Hash{}, fmt.Errorf("no root in the tree") + return libcommon.Hash{}, errors.New("no root in the tree") } return hb.rootHash(), nil } diff --git a/turbo/trie/proof.go b/turbo/trie/proof.go index 38c26ed6b6d..12fb58b667f 100644 --- a/turbo/trie/proof.go +++ b/turbo/trie/proof.go @@ -18,6 +18,7 @@ package trie import ( "bytes" + "errors" "fmt" libcommon "github.com/erigontech/erigon-lib/common" @@ -127,7 +128,7 @@ func decodeRef(buf []byte) (node, []byte, error) { switch { case kind == rlp.List: if len(buf)-len(rest) >= length.Hash { - return nil, nil, fmt.Errorf("embedded nodes must be less than hash size") + return nil, nil, errors.New("embedded nodes must be less than hash size") } n, err := decodeNode(buf) if err != nil { @@ -192,7 +193,7 @@ func decodeShort(elems []byte) (*shortNode, error) { func decodeNode(encoded []byte) (node, error) { if len(encoded) == 0 { - return nil, fmt.Errorf("nodes must not be zero length") + return nil, errors.New("nodes must not be zero length") } elems, _, err := rlp.SplitList(encoded) if err != nil { @@ -240,7 +241,7 @@ func verifyProof(root libcommon.Hash, key []byte, proofs map[libcommon.Hash]node switch nt := node.(type) { case *fullNode: if len(key) == 0 { - return nil, fmt.Errorf("full nodes should not have values") + return nil, errors.New("full nodes should not have values") } node, key = nt.Children[key[0]], key[1:] if node == nil { @@ -307,13 +308,13 @@ func VerifyAccountProofByHash(stateRoot libcommon.Hash, accountKey libcommon.Has // A nil value proves the account does not exist. switch { case proof.Nonce != 0: - return fmt.Errorf("account is not in state, but has non-zero nonce") + return errors.New("account is not in state, but has non-zero nonce") case proof.Balance.ToInt().Sign() != 0: - return fmt.Errorf("account is not in state, but has balance") + return errors.New("account is not in state, but has balance") case proof.StorageHash != libcommon.Hash{}: - return fmt.Errorf("account is not in state, but has non-empty storage hash") + return errors.New("account is not in state, but has non-empty storage hash") case proof.CodeHash != libcommon.Hash{}: - return fmt.Errorf("account is not in state, but has non-empty code hash") + return errors.New("account is not in state, but has non-empty code hash") default: return nil } @@ -347,7 +348,7 @@ func VerifyStorageProof(storageRoot libcommon.Hash, proof accounts.StorProofResu func VerifyStorageProofByHash(storageRoot libcommon.Hash, keyHash libcommon.Hash, proof accounts.StorProofResult) error { if storageRoot == EmptyRoot || storageRoot == (libcommon.Hash{}) { if proof.Value.ToInt().Sign() != 0 { - return fmt.Errorf("empty storage root cannot have non-zero values") + return errors.New("empty storage root cannot have non-zero values") } // The spec here is a bit unclear. The yellow paper makes it clear that the // EmptyRoot hash is a special case where the trie is empty. Since the trie @@ -357,7 +358,7 @@ func VerifyStorageProofByHash(storageRoot libcommon.Hash, keyHash libcommon.Hash // pre-image of the EmptyRoot) should be included. This implementation // chooses to require the proof be empty. if len(proof.Proof) > 0 { - return fmt.Errorf("empty storage root should not have proof nodes") + return errors.New("empty storage root should not have proof nodes") } return nil } diff --git a/turbo/trie/retain_list.go b/turbo/trie/retain_list.go index 98087593a62..1bc58892915 100644 --- a/turbo/trie/retain_list.go +++ b/turbo/trie/retain_list.go @@ -22,6 +22,7 @@ package trie import ( "bytes" "encoding/binary" + "errors" "fmt" "math/big" "sort" @@ -151,7 +152,7 @@ func (pr *ProofRetainer) ProofResult() (*accounts.AccProofResult, error) { } if pr.acc.Initialised && result.StorageHash == (libcommon.Hash{}) { - return nil, fmt.Errorf("did not find storage root in proof elements") + return nil, errors.New("did not find storage root in proof elements") } result.StorageProof = make([]accounts.StorProofResult, len(pr.storageKeys)) diff --git a/turbo/trie/structural_test.go b/turbo/trie/structural_test.go index b50458bf234..4c1ff56c538 100644 --- a/turbo/trie/structural_test.go +++ b/turbo/trie/structural_test.go @@ -24,6 +24,7 @@ package trie import ( "bytes" "encoding/binary" + "errors" "fmt" "slices" "testing" @@ -550,7 +551,7 @@ func TestStorageOnly(t *testing.T) { require.Equal(t, fmt.Sprintf("%b", uint16(0b100000)), fmt.Sprintf("%b", hasTree)) require.NotNil(t, hashes) case 5: - require.NoError(t, fmt.Errorf("not expected")) + require.NoError(t, errors.New("not expected")) } return nil