@@ -163,6 +163,20 @@ pub async fn fetch_and_process_engine_blobs<T: BeaconChainTypes>(
163
163
return Ok ( None ) ;
164
164
}
165
165
166
+ if chain
167
+ . canonical_head
168
+ . fork_choice_read_lock ( )
169
+ . contains_block ( & block_root)
170
+ {
171
+ // Avoid computing columns if block has already been imported.
172
+ debug ! (
173
+ log,
174
+ "Ignoring EL blobs response" ;
175
+ "info" => "block has already been imported" ,
176
+ ) ;
177
+ return Ok ( None ) ;
178
+ }
179
+
166
180
let data_columns_receiver = spawn_compute_and_publish_data_columns_task (
167
181
& chain,
168
182
block. clone ( ) ,
@@ -248,18 +262,21 @@ fn spawn_compute_and_publish_data_columns_task<T: BeaconChainTypes>(
248
262
}
249
263
} ;
250
264
251
- if let Err ( e) = data_columns_sender. send ( all_data_columns. clone ( ) ) {
252
- error ! ( log, "Failed to send computed data columns" ; "error" => ?e) ;
265
+ if data_columns_sender. send ( all_data_columns. clone ( ) ) . is_err ( ) {
266
+ // Data column receiver have been dropped - block may have already been imported.
267
+ // This race condition exists because gossip columns may arrive and trigger block
268
+ // import during the computation. Here we just drop the computed columns.
269
+ debug ! (
270
+ log,
271
+ "Failed to send computed data columns" ;
272
+ ) ;
273
+ return ;
253
274
} ;
254
275
255
- // Check indices from cache before sending the columns, to make sure we don't
256
- // publish components already seen on gossip.
257
- let is_supernode = chain_cloned. data_availability_checker . is_supernode ( ) ;
258
-
259
276
// At the moment non supernodes are not required to publish any columns.
260
277
// TODO(das): we could experiment with having full nodes publish their custodied
261
278
// columns here.
262
- if !is_supernode {
279
+ if !chain_cloned . data_availability_checker . is_supernode ( ) {
263
280
return ;
264
281
}
265
282
0 commit comments