9
9
"github.com/libp2p/go-libp2p/core/network"
10
10
"github.com/libp2p/go-libp2p/core/peer"
11
11
"github.com/libp2p/go-libp2p/core/protocol"
12
+ ma "github.com/multiformats/go-multiaddr"
12
13
13
14
logging "github.com/ipfs/go-log/v2"
14
15
"github.com/libp2p/go-msgio/pbio"
@@ -135,11 +136,61 @@ type HandleQueryResult func(p peer.ID, rtPeers []*peer.AddrInfo)
135
136
// HandleQueryFail is a callback on failed peer query
136
137
type HandleQueryFail func (p peer.ID , err error )
137
138
139
+ type peerAddrs struct {
140
+ peers map [peer.ID ]map [string ]ma.Multiaddr
141
+ lk * sync.RWMutex
142
+ }
143
+
144
+ func newPeerAddrs () peerAddrs {
145
+ return peerAddrs {
146
+ peers : make (map [peer.ID ]map [string ]ma.Multiaddr ),
147
+ lk : new (sync.RWMutex ),
148
+ }
149
+ }
150
+
151
+ func (ps peerAddrs ) RemoveSourceAndAddPeers (source peer.ID , peers map [peer.ID ][]ma.Multiaddr ) {
152
+ ps .lk .Lock ()
153
+ defer ps .lk .Unlock ()
154
+
155
+ // remove source from peerstore
156
+ delete (ps .peers , source )
157
+ // add peers to peerstore
158
+ ps .addAddrsNoLock (peers )
159
+ }
160
+
161
+ func (ps peerAddrs ) addAddrsNoLock (peers map [peer.ID ][]ma.Multiaddr ) {
162
+ for p , addrs := range peers {
163
+ ps .addPeerAddrsNoLock (p , addrs )
164
+ }
165
+ }
166
+
167
+ func (ps peerAddrs ) addPeerAddrsNoLock (p peer.ID , addrs []ma.Multiaddr ) {
168
+ if _ , ok := ps .peers [p ]; ! ok {
169
+ ps .peers [p ] = make (map [string ]ma.Multiaddr )
170
+ }
171
+ for _ , addr := range addrs {
172
+ ps .peers [p ][string (addr .Bytes ())] = addr
173
+ }
174
+ }
175
+
176
+ func (ps peerAddrs ) PeerInfo (p peer.ID ) peer.AddrInfo {
177
+ ps .lk .RLock ()
178
+ defer ps .lk .RUnlock ()
179
+
180
+ addrs := make ([]ma.Multiaddr , 0 , len (ps .peers [p ]))
181
+ for _ , addr := range ps .peers [p ] {
182
+ addrs = append (addrs , addr )
183
+ }
184
+ return peer.AddrInfo {ID : p , Addrs : addrs }
185
+ }
186
+
138
187
// Run crawls dht peers from an initial seed of `startingPeers`
139
188
func (c * DefaultCrawler ) Run (ctx context.Context , startingPeers []* peer.AddrInfo , handleSuccess HandleQueryResult , handleFail HandleQueryFail ) {
140
189
jobs := make (chan peer.ID , 1 )
141
190
results := make (chan * queryResult , 1 )
142
191
192
+ peerAddrs := newPeerAddrs ()
193
+
143
194
// Start worker goroutines
144
195
var wg sync.WaitGroup
145
196
wg .Add (c .parallelism )
@@ -148,7 +199,8 @@ func (c *DefaultCrawler) Run(ctx context.Context, startingPeers []*peer.AddrInfo
148
199
defer wg .Done ()
149
200
for p := range jobs {
150
201
qctx , cancel := context .WithTimeout (ctx , c .queryTimeout )
151
- res := c .queryPeer (qctx , p )
202
+ ai := peerAddrs .PeerInfo (p )
203
+ res := c .queryPeer (qctx , ai )
152
204
cancel () // do not defer, cleanup after each job
153
205
results <- res
154
206
}
@@ -162,26 +214,28 @@ func (c *DefaultCrawler) Run(ctx context.Context, startingPeers []*peer.AddrInfo
162
214
peersSeen := make (map [peer.ID ]struct {})
163
215
164
216
numSkipped := 0
217
+ peerAddrs .lk .Lock ()
165
218
for _ , ai := range startingPeers {
166
219
extendAddrs := c .host .Peerstore ().Addrs (ai .ID )
167
220
if len (ai .Addrs ) > 0 {
168
221
extendAddrs = append (extendAddrs , ai .Addrs ... )
169
- c .host .Peerstore ().AddAddrs (ai .ID , extendAddrs , c .dialAddressExtendDur )
170
222
}
171
223
if len (extendAddrs ) == 0 {
172
224
numSkipped ++
173
225
continue
174
226
}
227
+ peerAddrs .addPeerAddrsNoLock (ai .ID , extendAddrs )
175
228
176
229
toDial = append (toDial , ai )
177
230
peersSeen [ai .ID ] = struct {}{}
178
231
}
232
+ peerAddrs .lk .Unlock ()
179
233
180
234
if numSkipped > 0 {
181
235
logger .Infof ("%d starting peers were skipped due to lack of addresses. Starting crawl with %d peers" , numSkipped , len (toDial ))
182
236
}
183
237
184
- numQueried := 0
238
+ peersQueried := make ( map [peer. ID ] struct {})
185
239
outstanding := 0
186
240
187
241
for len (toDial ) > 0 || outstanding > 0 {
@@ -197,14 +251,20 @@ func (c *DefaultCrawler) Run(ctx context.Context, startingPeers []*peer.AddrInfo
197
251
if len (res .data ) > 0 {
198
252
logger .Debugf ("peer %v had %d peers" , res .peer , len (res .data ))
199
253
rtPeers := make ([]* peer.AddrInfo , 0 , len (res .data ))
254
+ addrsToUpdate := make (map [peer.ID ][]ma.Multiaddr )
200
255
for p , ai := range res .data {
201
- c .host .Peerstore ().AddAddrs (p , ai .Addrs , c .dialAddressExtendDur )
256
+ if _ , ok := peersQueried [p ]; ! ok {
257
+ addrsToUpdate [p ] = ai .Addrs
258
+ }
202
259
if _ , ok := peersSeen [p ]; ! ok {
203
260
peersSeen [p ] = struct {}{}
204
261
toDial = append (toDial , ai )
205
262
}
206
263
rtPeers = append (rtPeers , ai )
207
264
}
265
+ peersQueried [res .peer ] = struct {}{}
266
+ peerAddrs .RemoveSourceAndAddPeers (res .peer , addrsToUpdate )
267
+
208
268
if handleSuccess != nil {
209
269
handleSuccess (res .peer , rtPeers )
210
270
}
@@ -214,9 +274,8 @@ func (c *DefaultCrawler) Run(ctx context.Context, startingPeers []*peer.AddrInfo
214
274
outstanding --
215
275
case jobCh <- nextPeerID :
216
276
outstanding ++
217
- numQueried ++
218
277
toDial = toDial [1 :]
219
- logger .Debugf ("starting %d out of %d" , numQueried , len (peersSeen ))
278
+ logger .Debugf ("starting %d out of %d" , len ( peersQueried ) + 1 , len (peersSeen ))
220
279
}
221
280
}
222
281
}
@@ -227,20 +286,25 @@ type queryResult struct {
227
286
err error
228
287
}
229
288
230
- func (c * DefaultCrawler ) queryPeer (ctx context.Context , nextPeer peer.ID ) * queryResult {
231
- tmpRT , err := kbucket .NewRoutingTable (20 , kbucket .ConvertPeerID (nextPeer ), time .Hour , c .host .Peerstore (), time .Hour , nil )
289
+ func (c * DefaultCrawler ) queryPeer (ctx context.Context , nextPeer peer.AddrInfo ) * queryResult {
290
+ tmpRT , err := kbucket .NewRoutingTable (20 , kbucket .ConvertPeerID (nextPeer . ID ), time .Hour , c .host .Peerstore (), time .Hour , nil )
232
291
if err != nil {
233
- logger .Errorf ("error creating rt for peer %v : %v" , nextPeer , err )
234
- return & queryResult {nextPeer , nil , err }
292
+ logger .Errorf ("error creating rt for peer %v : %v" , nextPeer . ID , err )
293
+ return & queryResult {nextPeer . ID , nil , err }
235
294
}
236
295
237
296
connCtx , cancel := context .WithTimeout (ctx , c .connectTimeout )
238
297
defer cancel ()
239
- err = c .host .Connect (connCtx , peer. AddrInfo { ID : nextPeer } )
298
+ err = c .host .Connect (connCtx , nextPeer )
240
299
if err != nil {
241
- logger .Debugf ("could not connect to peer %v: %v" , nextPeer , err )
242
- return & queryResult {nextPeer , nil , err }
300
+ logger .Debugf ("could not connect to peer %v: %v" , nextPeer . ID , err )
301
+ return & queryResult {nextPeer . ID , nil , err }
243
302
}
303
+ // Extend peerstore address ttl for addresses whose ttl is below
304
+ // c.dialAddressExtendDur. By now identify has already cleaned up addresses
305
+ // provided to Connect above and only kept the listen addresses advertised by
306
+ // the remote peer
307
+ c .host .Peerstore ().AddAddrs (nextPeer .ID , c .host .Peerstore ().Addrs (nextPeer .ID ), c .dialAddressExtendDur )
244
308
245
309
localPeers := make (map [peer.ID ]* peer.AddrInfo )
246
310
var retErr error
@@ -249,9 +313,9 @@ func (c *DefaultCrawler) queryPeer(ctx context.Context, nextPeer peer.ID) *query
249
313
if err != nil {
250
314
panic (err )
251
315
}
252
- peers , err := c .dhtRPC .GetClosestPeers (ctx , nextPeer , generatePeer )
316
+ peers , err := c .dhtRPC .GetClosestPeers (ctx , nextPeer . ID , generatePeer )
253
317
if err != nil {
254
- logger .Debugf ("error finding data on peer %v with cpl %d : %v" , nextPeer , cpl , err )
318
+ logger .Debugf ("error finding data on peer %v with cpl %d : %v" , nextPeer . ID , cpl , err )
255
319
retErr = err
256
320
break
257
321
}
@@ -263,8 +327,8 @@ func (c *DefaultCrawler) queryPeer(ctx context.Context, nextPeer peer.ID) *query
263
327
}
264
328
265
329
if retErr != nil {
266
- return & queryResult {nextPeer , nil , retErr }
330
+ return & queryResult {nextPeer . ID , nil , retErr }
267
331
}
268
332
269
- return & queryResult {nextPeer , localPeers , retErr }
333
+ return & queryResult {nextPeer . ID , localPeers , retErr }
270
334
}
0 commit comments