Skip to content

Commit 0580c7f

Browse files
refactor: replace fmt.Errorf with errors.New when not formatting is required (#1067)
1 parent 5cf1f18 commit 0580c7f

File tree

13 files changed

+51
-50
lines changed

13 files changed

+51
-50
lines changed

dht_options.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ package dht
22

33
import (
44
"context"
5-
"fmt"
5+
"errors"
66
"testing"
77
"time"
88

@@ -129,7 +129,7 @@ func NamespacedValidator(ns string, v record.Validator) Option {
129129
return func(c *dhtcfg.Config) error {
130130
nsval, ok := c.Validator.(record.NamespacedValidator)
131131
if !ok {
132-
return fmt.Errorf("can only add namespaced validators to a NamespacedValidator")
132+
return errors.New("can only add namespaced validators to a NamespacedValidator")
133133
}
134134
nsval[ns] = v
135135
return nil

fullrt/dht.go

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -261,7 +261,7 @@ func (dht *FullRT) TriggerRefresh(ctx context.Context) error {
261261
case dht.triggerRefresh <- struct{}{}:
262262
return nil
263263
case <-dht.ctx.Done():
264-
return fmt.Errorf("dht is closed")
264+
return errors.New("dht is closed")
265265
}
266266
}
267267

@@ -520,7 +520,7 @@ func (dht *FullRT) PutValue(ctx context.Context, key string, value []byte, opts
520520
return err
521521
}
522522
if i != 0 {
523-
return fmt.Errorf("can't replace a newer value with an older value")
523+
return errors.New("can't replace a newer value with an older value")
524524
}
525525
}
526526

@@ -546,7 +546,7 @@ func (dht *FullRT) PutValue(ctx context.Context, key string, value []byte, opts
546546
}, peers, true)
547547

548548
if successes == 0 {
549-
return fmt.Errorf("failed to complete put")
549+
return errors.New("failed to complete put")
550550
}
551551

552552
return nil
@@ -834,7 +834,7 @@ func (dht *FullRT) Provide(ctx context.Context, key cid.Cid, brdcst bool) (err e
834834
if !dht.enableProviders {
835835
return routing.ErrNotSupported
836836
} else if !key.Defined() {
837-
return fmt.Errorf("invalid cid: undefined")
837+
return errors.New("invalid cid: undefined")
838838
}
839839
keyMH := key.Hash()
840840
logger.Debugw("providing", "cid", key, "mh", internal.LoggableProviderRecordBytes(keyMH))
@@ -895,7 +895,7 @@ func (dht *FullRT) Provide(ctx context.Context, key cid.Cid, brdcst bool) (err e
895895
}
896896

897897
if successes == 0 {
898-
return fmt.Errorf("failed to complete provide")
898+
return errors.New("failed to complete provide")
899899
}
900900

901901
return ctx.Err()
@@ -989,7 +989,7 @@ func (dht *FullRT) ProvideMany(ctx context.Context, keys []multihash.Multihash)
989989
// TODO: We may want to limit the type of addresses in our provider records
990990
// For example, in a WAN-only DHT prohibit sharing non-WAN addresses (e.g. 192.168.0.100)
991991
if len(pi.Addrs) < 1 {
992-
return fmt.Errorf("no known addresses for self, cannot put provider")
992+
return errors.New("no known addresses for self, cannot put provider")
993993
}
994994

995995
fn := func(ctx context.Context, p, k peer.ID) error {
@@ -1016,7 +1016,7 @@ func (dht *FullRT) PutMany(ctx context.Context, keys []string, values [][]byte)
10161016
}
10171017

10181018
if len(keys) != len(values) {
1019-
return fmt.Errorf("number of keys does not match the number of values")
1019+
return errors.New("number of keys does not match the number of values")
10201020
}
10211021

10221022
keysAsPeerIDs := make([]peer.ID, 0, len(keys))
@@ -1027,7 +1027,7 @@ func (dht *FullRT) PutMany(ctx context.Context, keys []string, values [][]byte)
10271027
}
10281028

10291029
if len(keys) != len(keyRecMap) {
1030-
return fmt.Errorf("does not support duplicate keys")
1030+
return errors.New("does not support duplicate keys")
10311031
}
10321032

10331033
fn := func(ctx context.Context, p, k peer.ID) error {
@@ -1200,7 +1200,7 @@ func (dht *FullRT) bulkMessageSend(ctx context.Context, keys []peer.ID, fn func(
12001200

12011201
if numSendsSuccessful == 0 {
12021202
logger.Infof("bulk send failed")
1203-
return fmt.Errorf("failed to complete bulk sending")
1203+
return errors.New("failed to complete bulk sending")
12041204
}
12051205

12061206
logger.Infof("bulk send complete: %d keys, %d unique, %d successful, %d skipped peers, %d fails",
@@ -1244,7 +1244,7 @@ func (dht *FullRT) FindProviders(ctx context.Context, c cid.Cid) ([]peer.AddrInf
12441244
if !dht.enableProviders {
12451245
return nil, routing.ErrNotSupported
12461246
} else if !c.Defined() {
1247-
return nil, fmt.Errorf("invalid cid: undefined")
1247+
return nil, errors.New("invalid cid: undefined")
12481248
}
12491249

12501250
var providers []peer.AddrInfo

handlers.go

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@ import (
44
"bytes"
55
"context"
66
"errors"
7-
"fmt"
87
"time"
98

109
"github.com/libp2p/go-libp2p/core/peer"
@@ -256,7 +255,7 @@ func (dht *IpfsDHT) handleFindPeer(ctx context.Context, from peer.ID, pmes *pb.M
256255
var closest []peer.ID
257256

258257
if len(pmes.GetKey()) == 0 {
259-
return nil, fmt.Errorf("handleFindPeer with empty key")
258+
return nil, errors.New("handleFindPeer with empty key")
260259
}
261260

262261
// if looking for self... special case where we send it on CloserPeers.
@@ -304,9 +303,9 @@ func (dht *IpfsDHT) handleFindPeer(ctx context.Context, from peer.ID, pmes *pb.M
304303
func (dht *IpfsDHT) handleGetProviders(ctx context.Context, p peer.ID, pmes *pb.Message) (_ *pb.Message, _err error) {
305304
key := pmes.GetKey()
306305
if len(key) > 80 {
307-
return nil, fmt.Errorf("handleGetProviders key size too large")
306+
return nil, errors.New("handleGetProviders key size too large")
308307
} else if len(key) == 0 {
309-
return nil, fmt.Errorf("handleGetProviders key is empty")
308+
return nil, errors.New("handleGetProviders key is empty")
310309
}
311310

312311
resp := pb.NewMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel())

internal/config/config.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ package config
22

33
import (
44
"context"
5+
"errors"
56
"fmt"
67
"time"
78

@@ -101,7 +102,7 @@ func (c *Config) ApplyFallbacks(h host.Host) error {
101102
nsval["ipns"] = ipns.Validator{KeyBook: h.Peerstore()}
102103
}
103104
} else {
104-
return fmt.Errorf("the default Validator was changed without being marked as changed")
105+
return errors.New("the default Validator was changed without being marked as changed")
105106
}
106107
}
107108
return nil

internal/logging.go

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
package internal
22

33
import (
4+
"errors"
45
"fmt"
56
"strings"
67

@@ -20,7 +21,7 @@ func multibaseB32Encode(k []byte) string {
2021

2122
func tryFormatLoggableRecordKey(k string) (string, error) {
2223
if len(k) == 0 {
23-
return "", fmt.Errorf("LoggableRecordKey is empty")
24+
return "", errors.New("LoggableRecordKey is empty")
2425
}
2526
var proto, cstr string
2627
if k[0] == '/' {
@@ -73,7 +74,7 @@ func (lk LoggableProviderRecordBytes) String() string {
7374

7475
func tryFormatLoggableProviderKey(k []byte) (string, error) {
7576
if len(k) == 0 {
76-
return "", fmt.Errorf("LoggableProviderKey is empty")
77+
return "", errors.New("LoggableProviderKey is empty")
7778
}
7879

7980
encodedKey := multibaseB32Encode(k)

internal/net/message_manager.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ package net
33
import (
44
"bufio"
55
"context"
6-
"fmt"
6+
"errors"
77
"io"
88
"sync"
99
"time"
@@ -26,7 +26,7 @@ import (
2626
var dhtReadMessageTimeout = 10 * time.Second
2727

2828
// ErrReadTimeout is an error that occurs when no message is read within the timeout period.
29-
var ErrReadTimeout = fmt.Errorf("timed out reading response")
29+
var ErrReadTimeout = errors.New("timed out reading response")
3030

3131
var logger = logging.Logger("dht")
3232

@@ -197,7 +197,7 @@ func (ms *peerMessageSender) prepOrInvalidate(ctx context.Context) error {
197197

198198
func (ms *peerMessageSender) prep(ctx context.Context) error {
199199
if ms.invalid {
200-
return fmt.Errorf("message sender has been invalidated")
200+
return errors.New("message sender has been invalidated")
201201
}
202202
if ms.s != nil {
203203
return nil

lookup.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ package dht
22

33
import (
44
"context"
5-
"fmt"
5+
"errors"
66
"time"
77

88
"github.com/libp2p/go-libp2p-kad-dht/internal"
@@ -24,7 +24,7 @@ func (dht *IpfsDHT) GetClosestPeers(ctx context.Context, key string) ([]peer.ID,
2424
defer span.End()
2525

2626
if key == "" {
27-
return nil, fmt.Errorf("can't lookup empty key")
27+
return nil, errors.New("can't lookup empty key")
2828
}
2929

3030
// TODO: I can break the interface! return []peer.ID

lookup_optim.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ package dht
22

33
import (
44
"context"
5-
"fmt"
5+
"errors"
66
"math"
77
"sync"
88
"sync/atomic"
@@ -111,7 +111,7 @@ func (dht *IpfsDHT) optimisticProvide(outerCtx context.Context, keyMH multihash.
111111
key := string(keyMH)
112112

113113
if key == "" {
114-
return fmt.Errorf("can't lookup empty key")
114+
return errors.New("can't lookup empty key")
115115
}
116116

117117
// initialize new context for all putProvider operations.

netsize/netsize.go

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
package netsize
22

33
import (
4-
"fmt"
4+
"errors"
55
"math"
66
"math/big"
77
"sort"
@@ -20,8 +20,8 @@ import (
2020
const invalidEstimate int32 = -1
2121

2222
var (
23-
ErrNotEnoughData = fmt.Errorf("not enough data")
24-
ErrWrongNumOfPeers = fmt.Errorf("expected bucket size number of peers")
23+
ErrNotEnoughData = errors.New("not enough data")
24+
ErrWrongNumOfPeers = errors.New("expected bucket size number of peers")
2525
)
2626

2727
var (
@@ -144,7 +144,6 @@ func (e *Estimator) Track(key string, peers []peer.ID) error {
144144

145145
// NetworkSize instructs the Estimator to calculate the current network size estimate.
146146
func (e *Estimator) NetworkSize() (int32, error) {
147-
148147
// return cached calculation lock-free (fast path)
149148
if estimate := atomic.LoadInt32(&e.netSizeCache); estimate != invalidEstimate {
150149
logger.Debugw("Cached network size estimation", "estimate", estimate)
@@ -234,7 +233,6 @@ func (e *Estimator) NetworkSize() (int32, error) {
234233
// I actually thought this cannot happen as peers would have been added to the routing table before
235234
// the Track function gets called. But they seem sometimes not to be added.
236235
func (e *Estimator) calcWeight(key string, peers []peer.ID) float64 {
237-
238236
cpl := kbucket.CommonPrefixLen(kbucket.ConvertKey(key), e.localID)
239237
bucketLevel := e.rt.NPeersForCpl(uint(cpl))
240238

pb/protocol_messenger.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -193,7 +193,7 @@ func (pm *ProtocolMessenger) PutProviderAddrs(ctx context.Context, p peer.ID, ke
193193
// TODO: We may want to limit the type of addresses in our provider records
194194
// For example, in a WAN-only DHT prohibit sharing non-WAN addresses (e.g. 192.168.0.100)
195195
if len(self.Addrs) < 1 {
196-
return fmt.Errorf("no known addresses for self, cannot put provider")
196+
return errors.New("no known addresses for self, cannot put provider")
197197
}
198198

199199
pmes := NewMessage(Message_ADD_PROVIDER, key, 0)

providers/providers_manager.go

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ package providers
33
import (
44
"context"
55
"encoding/binary"
6+
"errors"
67
"fmt"
78
"io"
89
"strings"
@@ -36,11 +37,13 @@ const (
3637

3738
// ProvideValidity is the default time that a Provider Record should last on DHT
3839
// This value is also known as Provider Record Expiration Interval.
39-
var ProvideValidity = amino.DefaultProvideValidity
40-
var defaultCleanupInterval = time.Hour
41-
var lruCacheSize = 256
42-
var batchBufferSize = 256
43-
var log = logging.Logger("providers")
40+
var (
41+
ProvideValidity = amino.DefaultProvideValidity
42+
defaultCleanupInterval = time.Hour
43+
lruCacheSize = 256
44+
batchBufferSize = 256
45+
log = logging.Logger("providers")
46+
)
4447

4548
// ProviderStore represents a store that associates peers and their addresses to keys.
4649
type ProviderStore interface {
@@ -403,7 +406,7 @@ func loadProviderSet(ctx context.Context, dstore ds.Datastore, k []byte) (*provi
403406
func readTimeValue(data []byte) (time.Time, error) {
404407
nsec, n := binary.Varint(data)
405408
if n <= 0 {
406-
return time.Time{}, fmt.Errorf("failed to parse time")
409+
return time.Time{}, errors.New("failed to parse time")
407410
}
408411

409412
return time.Unix(0, nsec), nil

query_test.go

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ package dht
22

33
import (
44
"context"
5-
"fmt"
5+
"errors"
66
"testing"
77
"time"
88

@@ -34,7 +34,7 @@ func TestRTEvictionOnFailedQuery(t *testing.T) {
3434
// peers should be in the RT because of fixLowPeers
3535
require.NoError(t, tu.WaitFor(ctx, func() error {
3636
if !checkRoutingTable(d1, d2) {
37-
return fmt.Errorf("should have routes")
37+
return errors.New("should have routes")
3838
}
3939
return nil
4040
}))
@@ -45,7 +45,7 @@ func TestRTEvictionOnFailedQuery(t *testing.T) {
4545
// peers will still be in the RT because we have decoupled membership from connectivity
4646
require.NoError(t, tu.WaitFor(ctx, func() error {
4747
if !checkRoutingTable(d1, d2) {
48-
return fmt.Errorf("should have routes")
48+
return errors.New("should have routes")
4949
}
5050
return nil
5151
}))
@@ -59,7 +59,7 @@ func TestRTEvictionOnFailedQuery(t *testing.T) {
5959

6060
require.NoError(t, tu.WaitFor(ctx, func() error {
6161
if checkRoutingTable(d1, d2) {
62-
return fmt.Errorf("should not have routes")
62+
return errors.New("should not have routes")
6363
}
6464
return nil
6565
}))
@@ -80,22 +80,22 @@ func TestRTAdditionOnSuccessfulQuery(t *testing.T) {
8080
// d1 has d2
8181
require.NoError(t, tu.WaitFor(ctx, func() error {
8282
if !checkRoutingTable(d1, d2) {
83-
return fmt.Errorf("should have routes")
83+
return errors.New("should have routes")
8484
}
8585
return nil
8686
}))
8787
// d2 has d3
8888
require.NoError(t, tu.WaitFor(ctx, func() error {
8989
if !checkRoutingTable(d2, d3) {
90-
return fmt.Errorf("should have routes")
90+
return errors.New("should have routes")
9191
}
9292
return nil
9393
}))
9494

9595
// however, d1 does not know about d3
9696
require.NoError(t, tu.WaitFor(ctx, func() error {
9797
if checkRoutingTable(d1, d3) {
98-
return fmt.Errorf("should not have routes")
98+
return errors.New("should not have routes")
9999
}
100100
return nil
101101
}))
@@ -105,7 +105,7 @@ func TestRTAdditionOnSuccessfulQuery(t *testing.T) {
105105
require.NoError(t, err)
106106
require.NoError(t, tu.WaitFor(ctx, func() error {
107107
if !checkRoutingTable(d1, d3) {
108-
return fmt.Errorf("should have routes")
108+
return errors.New("should have routes")
109109
}
110110
return nil
111111
}))

0 commit comments

Comments
 (0)