@@ -5,15 +5,18 @@ import (
5
5
"context"
6
6
"errors"
7
7
"fmt"
8
+ "maps"
8
9
"math/rand"
9
10
"sync"
10
11
"sync/atomic"
11
12
"time"
12
13
13
14
"github.com/multiformats/go-base32"
14
15
ma "github.com/multiformats/go-multiaddr"
16
+ manet "github.com/multiformats/go-multiaddr/net"
15
17
"github.com/multiformats/go-multihash"
16
18
19
+ "github.com/libp2p/go-libp2p-kbucket/peerdiversity"
17
20
"github.com/libp2p/go-libp2p-routing-helpers/tracing"
18
21
"github.com/libp2p/go-libp2p/core/event"
19
22
"github.com/libp2p/go-libp2p/core/host"
@@ -32,6 +35,7 @@ import (
32
35
"google.golang.org/protobuf/proto"
33
36
34
37
kaddht "github.com/libp2p/go-libp2p-kad-dht"
38
+ "github.com/libp2p/go-libp2p-kad-dht/amino"
35
39
"github.com/libp2p/go-libp2p-kad-dht/crawler"
36
40
"github.com/libp2p/go-libp2p-kad-dht/internal"
37
41
internalConfig "github.com/libp2p/go-libp2p-kad-dht/internal/config"
@@ -113,6 +117,8 @@ type FullRT struct {
113
117
self peer.ID
114
118
115
119
peerConnectednessSubscriber event.Subscription
120
+
121
+ ipDiversityFilterLimit int
116
122
}
117
123
118
124
// NewFullRT creates a DHT client that tracks the full network. It takes a protocol prefix for the given network,
@@ -127,10 +133,11 @@ type FullRT struct {
127
133
// bootstrap peers).
128
134
func NewFullRT (h host.Host , protocolPrefix protocol.ID , options ... Option ) (* FullRT , error ) {
129
135
fullrtcfg := config {
130
- crawlInterval : time .Hour ,
131
- bulkSendParallelism : 20 ,
132
- waitFrac : 0.3 ,
133
- timeoutPerOp : 5 * time .Second ,
136
+ crawlInterval : time .Hour ,
137
+ bulkSendParallelism : 20 ,
138
+ waitFrac : 0.3 ,
139
+ timeoutPerOp : 5 * time .Second ,
140
+ ipDiversityFilterLimit : amino .DefaultMaxPeersPerIPGroup ,
134
141
}
135
142
if err := fullrtcfg .apply (options ... ); err != nil {
136
143
return nil , err
@@ -156,7 +163,7 @@ func NewFullRT(h host.Host, protocolPrefix protocol.ID, options ...Option) (*Ful
156
163
return nil , err
157
164
}
158
165
159
- ms := net .NewMessageSenderImpl (h , []protocol. ID { dhtcfg . ProtocolPrefix + "/kad/1.0.0" } )
166
+ ms := net .NewMessageSenderImpl (h , amino . Protocols )
160
167
protoMessenger , err := dht_pb .NewProtocolMessenger (ms )
161
168
if err != nil {
162
169
return nil , err
@@ -266,14 +273,9 @@ func (dht *FullRT) TriggerRefresh(ctx context.Context) error {
266
273
}
267
274
268
275
func (dht * FullRT ) Stat () map [string ]peer.ID {
269
- newMap := make (map [string ]peer.ID )
270
-
271
276
dht .kMapLk .RLock ()
272
- for k , v := range dht .keyToPeerMap {
273
- newMap [k ] = v
274
- }
275
- dht .kMapLk .RUnlock ()
276
- return newMap
277
+ defer dht .kMapLk .RUnlock ()
278
+ return maps .Clone (dht .keyToPeerMap )
277
279
}
278
280
279
281
// Ready indicates that the routing table has been refreshed recently. It is recommended to be used for operations where
@@ -449,7 +451,7 @@ func (dht *FullRT) CheckPeers(ctx context.Context, peers ...peer.ID) (int, int)
449
451
func workers (numWorkers int , fn func (peer.AddrInfo ), inputs <- chan peer.AddrInfo ) {
450
452
jobs := make (chan peer.AddrInfo )
451
453
defer close (jobs )
452
- for i := 0 ; i < numWorkers ; i ++ {
454
+ for range numWorkers {
453
455
go func () {
454
456
for j := range jobs {
455
457
fn (j )
@@ -461,30 +463,78 @@ func workers(numWorkers int, fn func(peer.AddrInfo), inputs <-chan peer.AddrInfo
461
463
}
462
464
}
463
465
466
+ // GetClosestPeers tries to return the `dht.bucketSize` closest known peers to
467
+ // the given key.
468
+ //
469
+ // If the IP diversity filter limit is set, the returned peers will contain at
470
+ // most `dht.ipDiversityFilterLimit` peers sharing the same IP group. Hence,
471
+ // the peers may not be the absolute closest peers to the given key, but they
472
+ // will be more diverse in terms of IP addresses.
464
473
func (dht * FullRT ) GetClosestPeers (ctx context.Context , key string ) ([]peer.ID , error ) {
465
474
_ , span := internal .StartSpan (ctx , "FullRT.GetClosestPeers" , trace .WithAttributes (internal .KeyAsAttribute ("Key" , key )))
466
475
defer span .End ()
467
476
468
477
kbID := kb .ConvertKey (key )
469
478
kadKey := kadkey .KbucketIDToKey (kbID )
470
- dht .rtLk .RLock ()
471
- closestKeys := kademlia .ClosestN (kadKey , dht .rt , dht .bucketSize )
472
- dht .rtLk .RUnlock ()
473
479
474
- peers := make ([]peer.ID , 0 , len (closestKeys ))
475
- for _ , k := range closestKeys {
476
- dht .kMapLk .RLock ()
477
- p , ok := dht .keyToPeerMap [string (k )]
478
- if ! ok {
479
- logger .Errorf ("key not found in map" )
480
- }
481
- dht .kMapLk .RUnlock ()
482
- dht .peerAddrsLk .RLock ()
483
- peerAddrs := dht .peerAddrs [p ]
484
- dht .peerAddrsLk .RUnlock ()
480
+ ipGroupCounts := make (map [peerdiversity.PeerIPGroupKey ]map [peer.ID ]struct {})
481
+ peers := make ([]peer.ID , 0 , dht .bucketSize )
482
+
483
+ // If ipDiversityFilterLimit is non-zero, the step is slightly larger than
484
+ // the bucket size, allowing to have a few backup peers in case some are
485
+ // filtered out by the diversity filter. Multiple calls to ClosestN are
486
+ // expensive, but increasing the `count` parameter is cheap.
487
+ step := dht .bucketSize + 2 * dht .ipDiversityFilterLimit
488
+ for nClosest := 0 ; nClosest < dht .rt .Size (); nClosest += step {
489
+ dht .rtLk .RLock ()
490
+ // Get the last `step` closest peers, because we already tried the `nClosest` closest peers
491
+ closestKeys := kademlia .ClosestN (kadKey , dht .rt , nClosest + step )[nClosest :]
492
+ dht .rtLk .RUnlock ()
493
+
494
+ PeersLoop:
495
+ for _ , k := range closestKeys {
496
+ dht .kMapLk .RLock ()
497
+ // Recover the peer ID from the key
498
+ p , ok := dht .keyToPeerMap [string (k )]
499
+ if ! ok {
500
+ logger .Errorf ("key not found in map" )
501
+ continue
502
+ }
503
+ dht .kMapLk .RUnlock ()
504
+ dht .peerAddrsLk .RLock ()
505
+ peerAddrs := dht .peerAddrs [p ]
506
+ dht .peerAddrsLk .RUnlock ()
507
+
508
+ if dht .ipDiversityFilterLimit > 0 {
509
+ for _ , addr := range peerAddrs {
510
+ ip , err := manet .ToIP (addr )
511
+ if err != nil {
512
+ continue
513
+ }
514
+ ipGroup := peerdiversity .IPGroupKey (ip )
515
+ if len (ipGroup ) == 0 {
516
+ continue
517
+ }
518
+ if _ , ok := ipGroupCounts [ipGroup ]; ! ok {
519
+ ipGroupCounts [ipGroup ] = make (map [peer.ID ]struct {})
520
+ }
521
+ if len (ipGroupCounts [ipGroup ]) >= dht .ipDiversityFilterLimit {
522
+ // This ip group is already overrepresented, skip this peer
523
+ continue PeersLoop
524
+ }
525
+ ipGroupCounts [ipGroup ][p ] = struct {}{}
526
+ }
527
+ }
528
+
529
+ // Add the peer's known addresses to the peerstore so that it can be
530
+ // dialed by the caller.
531
+ dht .h .Peerstore ().AddAddrs (p , peerAddrs , peerstore .TempAddrTTL )
532
+ peers = append (peers , p )
485
533
486
- dht .h .Peerstore ().AddAddrs (p , peerAddrs , peerstore .TempAddrTTL )
487
- peers = append (peers , p )
534
+ if len (peers ) == dht .bucketSize {
535
+ return peers , nil
536
+ }
537
+ }
488
538
}
489
539
return peers , nil
490
540
}
@@ -615,7 +665,7 @@ func (dht *FullRT) SearchValue(ctx context.Context, key string, opts ...routing.
615
665
}
616
666
617
667
stopCh := make (chan struct {})
618
- valCh , lookupRes := dht .getValues (ctx , key , stopCh )
668
+ valCh , lookupRes := dht .getValues (ctx , key )
619
669
620
670
out := make (chan []byte )
621
671
go func () {
@@ -743,7 +793,7 @@ type lookupWithFollowupResult struct {
743
793
peers []peer.ID // the top K not unreachable peers at the end of the query
744
794
}
745
795
746
- func (dht * FullRT ) getValues (ctx context.Context , key string , stopQuery chan struct {} ) (<- chan RecvdVal , <- chan * lookupWithFollowupResult ) {
796
+ func (dht * FullRT ) getValues (ctx context.Context , key string ) (<- chan RecvdVal , <- chan * lookupWithFollowupResult ) {
747
797
valCh := make (chan RecvdVal , 1 )
748
798
lookupResCh := make (chan * lookupWithFollowupResult , 1 )
749
799
@@ -1004,7 +1054,7 @@ func (dht *FullRT) ProvideMany(ctx context.Context, keys []multihash.Multihash)
1004
1054
keysAsPeerIDs = append (keysAsPeerIDs , peer .ID (k ))
1005
1055
}
1006
1056
1007
- return dht .bulkMessageSend (ctx , keysAsPeerIDs , fn , true )
1057
+ return dht .bulkMessageSend (ctx , keysAsPeerIDs , fn )
1008
1058
}
1009
1059
1010
1060
func (dht * FullRT ) PutMany (ctx context.Context , keys []string , values [][]byte ) error {
@@ -1035,10 +1085,10 @@ func (dht *FullRT) PutMany(ctx context.Context, keys []string, values [][]byte)
1035
1085
return dht .protoMessenger .PutValue (ctx , p , record .MakePutRecord (keyStr , keyRecMap [keyStr ]))
1036
1086
}
1037
1087
1038
- return dht .bulkMessageSend (ctx , keysAsPeerIDs , fn , false )
1088
+ return dht .bulkMessageSend (ctx , keysAsPeerIDs , fn )
1039
1089
}
1040
1090
1041
- func (dht * FullRT ) bulkMessageSend (ctx context.Context , keys []peer.ID , fn func (ctx context.Context , target , k peer.ID ) error , isProvRec bool ) error {
1091
+ func (dht * FullRT ) bulkMessageSend (ctx context.Context , keys []peer.ID , fn func (ctx context.Context , target , k peer.ID ) error ) error {
1042
1092
ctx , span := internal .StartSpan (ctx , "FullRT.BulkMessageSend" )
1043
1093
defer span .End ()
1044
1094
@@ -1089,7 +1139,7 @@ func (dht *FullRT) bulkMessageSend(ctx context.Context, keys []peer.ID, fn func(
1089
1139
workCh := make (chan workMessage , 1 )
1090
1140
wg := sync.WaitGroup {}
1091
1141
wg .Add (dht .bulkSendParallelism )
1092
- for i := 0 ; i < dht .bulkSendParallelism ; i ++ {
1142
+ for range dht .bulkSendParallelism {
1093
1143
go func () {
1094
1144
defer wg .Done ()
1095
1145
defer logger .Debugf ("bulk send goroutine done" )
0 commit comments