@@ -265,40 +265,21 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
265
265
}
266
266
} ;
267
267
268
- {
269
- // A stream termination has been sent. This batch has ended. Process a completed batch.
270
- // Remove the request from the peer's active batches
271
- self . peers
272
- . get_mut ( peer_id)
273
- . map ( |active_requests| active_requests. remove ( & batch_id) ) ;
274
-
275
- match batch. download_completed ( blocks) {
276
- Ok ( received) => {
277
- let awaiting_batches = batch_id
278
- . saturating_sub ( self . optimistic_start . unwrap_or ( self . processing_target ) )
279
- / EPOCHS_PER_BATCH ;
280
- debug ! ( self . log, "Batch downloaded" ; "epoch" => batch_id, "blocks" => received, "batch_state" => self . visualize_batch_state( ) , "awaiting_batches" => awaiting_batches) ;
281
-
282
- // pre-emptively request more blocks from peers whilst we process current blocks,
283
- self . request_batches ( network) ?;
284
- self . process_completed_batches ( network)
285
- }
286
- Err ( result) => {
287
- let ( expected_boundary, received_boundary, outcome) = result?;
288
- warn ! ( self . log, "Batch received out of range blocks" ; "expected_boundary" => expected_boundary, "received_boundary" => received_boundary,
289
- "peer_id" => %peer_id, batch) ;
290
-
291
- if let BatchOperationOutcome :: Failed { blacklist } = outcome {
292
- return Err ( RemoveChain :: ChainFailed {
293
- blacklist,
294
- failing_batch : batch_id,
295
- } ) ;
296
- }
297
- // this batch can't be used, so we need to request it again.
298
- self . retry_batch_download ( network, batch_id)
299
- }
300
- }
301
- }
268
+ // A stream termination has been sent. This batch has ended. Process a completed batch.
269
+ // Remove the request from the peer's active batches
270
+ self . peers
271
+ . get_mut ( peer_id)
272
+ . map ( |active_requests| active_requests. remove ( & batch_id) ) ;
273
+
274
+ let received = batch. download_completed ( blocks) ?;
275
+ let awaiting_batches = batch_id
276
+ . saturating_sub ( self . optimistic_start . unwrap_or ( self . processing_target ) )
277
+ / EPOCHS_PER_BATCH ;
278
+ debug ! ( self . log, "Batch downloaded" ; "epoch" => batch_id, "blocks" => received, "batch_state" => self . visualize_batch_state( ) , "awaiting_batches" => awaiting_batches) ;
279
+
280
+ // pre-emptively request more blocks from peers whilst we process current blocks,
281
+ self . request_batches ( network) ?;
282
+ self . process_completed_batches ( network)
302
283
}
303
284
304
285
/// Processes the batch with the given id.
0 commit comments