3
3
import shutil
4
4
import tempfile
5
5
import time
6
- from typing import Iterator , Optional
6
+ from typing import Callable , Collection , Iterable , Iterator , Optional
7
7
from unittest import main as ut_main
8
8
9
9
from structlog import get_logger
16
16
from hathor .daa import DifficultyAdjustmentAlgorithm , TestMode
17
17
from hathor .event import EventManager
18
18
from hathor .event .storage import EventStorage
19
+ from hathor .manager import HathorManager
19
20
from hathor .p2p .peer_id import PeerId
21
+ from hathor .p2p .sync_v1 .agent import NodeSyncTimestamp
22
+ from hathor .p2p .sync_v2 .agent import NodeBlockSync
20
23
from hathor .p2p .sync_version import SyncVersion
21
24
from hathor .pubsub import PubSubManager
22
25
from hathor .reactor import ReactorProtocol as Reactor , get_global_reactor
23
26
from hathor .simulator .clock import MemoryReactorHeapClock
24
- from hathor .transaction import BaseTransaction
27
+ from hathor .transaction import BaseTransaction , Block , Transaction
25
28
from hathor .transaction .storage .transaction_storage import TransactionStorage
29
+ from hathor .types import VertexId
26
30
from hathor .util import Random , not_none
27
31
from hathor .wallet import BaseWallet , HDWallet , Wallet
28
32
from tests .test_memory_reactor_clock import TestMemoryReactorClock
33
37
USE_MEMORY_STORAGE = os .environ .get ('HATHOR_TEST_MEMORY_STORAGE' , 'false' ).lower () == 'true'
34
38
35
39
36
- def shorten_hash (container ):
37
- container_type = type (container )
38
- return container_type (h [- 2 :].hex () for h in container )
40
+ def short_hashes (container : Collection [bytes ]) -> Iterable [str ]:
41
+ return map (lambda hash_bytes : hash_bytes [- 2 :].hex (), container )
39
42
40
43
41
44
def _load_peer_id_pool (file_path : Optional [str ] = None ) -> Iterator [PeerId ]:
@@ -50,7 +53,7 @@ def _load_peer_id_pool(file_path: Optional[str] = None) -> Iterator[PeerId]:
50
53
yield PeerId .create_from_json (peer_id_dict )
51
54
52
55
53
- def _get_default_peer_id_pool_filepath ():
56
+ def _get_default_peer_id_pool_filepath () -> str :
54
57
this_file_path = os .path .dirname (__file__ )
55
58
file_name = 'peer_id_pool.json'
56
59
file_path = os .path .join (this_file_path , file_name )
@@ -109,19 +112,19 @@ class TestCase(unittest.TestCase):
109
112
use_memory_storage : bool = USE_MEMORY_STORAGE
110
113
seed_config : Optional [int ] = None
111
114
112
- def setUp (self ):
113
- self .tmpdirs = []
115
+ def setUp (self ) -> None :
116
+ self .tmpdirs : list [ str ] = []
114
117
self .clock = TestMemoryReactorClock ()
115
118
self .clock .advance (time .time ())
116
119
self .log = logger .new ()
117
120
self .reset_peer_id_pool ()
118
121
self .seed = secrets .randbits (64 ) if self .seed_config is None else self .seed_config
119
122
self .log .info ('set seed' , seed = self .seed )
120
123
self .rng = Random (self .seed )
121
- self ._pending_cleanups = []
124
+ self ._pending_cleanups : list [ Callable ] = []
122
125
self ._settings = get_global_settings ()
123
126
124
- def tearDown (self ):
127
+ def tearDown (self ) -> None :
125
128
self .clean_tmpdirs ()
126
129
for fn in self ._pending_cleanups :
127
130
fn ()
@@ -144,12 +147,12 @@ def get_random_peer_id_from_pool(self, pool: Optional[list[PeerId]] = None,
144
147
pool .remove (peer_id )
145
148
return peer_id
146
149
147
- def mkdtemp (self ):
150
+ def mkdtemp (self ) -> str :
148
151
tmpdir = tempfile .mkdtemp ()
149
152
self .tmpdirs .append (tmpdir )
150
153
return tmpdir
151
154
152
- def _create_test_wallet (self , unlocked = False ):
155
+ def _create_test_wallet (self , unlocked : bool = False ) -> Wallet :
153
156
""" Generate a Wallet with a number of keypairs for testing
154
157
:rtype: Wallet
155
158
"""
@@ -169,14 +172,14 @@ def get_builder(self, network: str) -> TestBuilder:
169
172
.set_network (network )
170
173
return builder
171
174
172
- def create_peer_from_builder (self , builder , start_manager = True ):
175
+ def create_peer_from_builder (self , builder : Builder , start_manager : bool = True ) -> HathorManager :
173
176
artifacts = builder .build ()
174
177
manager = artifacts .manager
175
178
176
179
if artifacts .rocksdb_storage :
177
180
self ._pending_cleanups .append (artifacts .rocksdb_storage .close )
178
181
179
- manager .avg_time_between_blocks = 0.0001
182
+ # manager.avg_time_between_blocks = 0.0001 # FIXME: This property is not defined. Fix this.
180
183
181
184
if start_manager :
182
185
manager .start ()
@@ -277,7 +280,7 @@ def create_peer( # type: ignore[no-untyped-def]
277
280
278
281
return manager
279
282
280
- def run_to_completion (self ):
283
+ def run_to_completion (self ) -> None :
281
284
""" This will advance the test's clock until all calls scheduled are done.
282
285
"""
283
286
for call in self .clock .getDelayedCalls ():
@@ -300,7 +303,11 @@ def assertIsTopological(self, tx_sequence: Iterator[BaseTransaction], message: O
300
303
self .assertIn (dep , valid_deps , message )
301
304
valid_deps .add (tx .hash )
302
305
303
- def _syncVersionFlags (self , enable_sync_v1 = None , enable_sync_v2 = None ):
306
+ def _syncVersionFlags (
307
+ self ,
308
+ enable_sync_v1 : bool | None = None ,
309
+ enable_sync_v2 : bool | None = None
310
+ ) -> tuple [bool , bool ]:
304
311
"""Internal: use this to check and get the flags and optionally provide override values."""
305
312
if enable_sync_v1 is None :
306
313
assert hasattr (self , '_enable_sync_v1' ), ('`_enable_sync_v1` has no default by design, either set one on '
@@ -313,19 +320,19 @@ def _syncVersionFlags(self, enable_sync_v1=None, enable_sync_v2=None):
313
320
assert enable_sync_v1 or enable_sync_v2 , 'enable at least one sync version'
314
321
return enable_sync_v1 , enable_sync_v2
315
322
316
- def assertTipsEqual (self , manager1 , manager2 ) :
323
+ def assertTipsEqual (self , manager1 : HathorManager , manager2 : HathorManager ) -> None :
317
324
_ , enable_sync_v2 = self ._syncVersionFlags ()
318
325
if enable_sync_v2 :
319
326
self .assertTipsEqualSyncV2 (manager1 , manager2 )
320
327
else :
321
328
self .assertTipsEqualSyncV1 (manager1 , manager2 )
322
329
323
- def assertTipsNotEqual (self , manager1 , manager2 ) :
330
+ def assertTipsNotEqual (self , manager1 : HathorManager , manager2 : HathorManager ) -> None :
324
331
s1 = set (manager1 .tx_storage .get_all_tips ())
325
332
s2 = set (manager2 .tx_storage .get_all_tips ())
326
333
self .assertNotEqual (s1 , s2 )
327
334
328
- def assertTipsEqualSyncV1 (self , manager1 , manager2 ) :
335
+ def assertTipsEqualSyncV1 (self , manager1 : HathorManager , manager2 : HathorManager ) -> None :
329
336
# XXX: this is the original implementation of assertTipsEqual
330
337
s1 = set (manager1 .tx_storage .get_all_tips ())
331
338
s2 = set (manager2 .tx_storage .get_all_tips ())
@@ -335,39 +342,45 @@ def assertTipsEqualSyncV1(self, manager1, manager2):
335
342
s2 = set (manager2 .tx_storage .get_tx_tips ())
336
343
self .assertEqual (s1 , s2 )
337
344
338
- def assertTipsEqualSyncV2 (self , manager1 , manager2 , * , strict_sync_v2_indexes = True ):
345
+ def assertTipsEqualSyncV2 (
346
+ self ,
347
+ manager1 : HathorManager ,
348
+ manager2 : HathorManager ,
349
+ * ,
350
+ strict_sync_v2_indexes : bool = True
351
+ ) -> None :
339
352
# tx tips
340
353
if strict_sync_v2_indexes :
341
- tips1 = manager1 .tx_storage .indexes .mempool_tips .get ()
342
- tips2 = manager2 .tx_storage .indexes .mempool_tips .get ()
354
+ tips1 = not_none ( not_none ( manager1 .tx_storage .indexes ) .mempool_tips ) .get ()
355
+ tips2 = not_none ( not_none ( manager2 .tx_storage .indexes ) .mempool_tips ) .get ()
343
356
else :
344
357
tips1 = {tx .hash for tx in manager1 .tx_storage .iter_mempool_tips_from_best_index ()}
345
358
tips2 = {tx .hash for tx in manager2 .tx_storage .iter_mempool_tips_from_best_index ()}
346
- self .log .debug ('tx tips1' , len = len (tips1 ), list = shorten_hash (tips1 ))
347
- self .log .debug ('tx tips2' , len = len (tips2 ), list = shorten_hash (tips2 ))
359
+ self .log .debug ('tx tips1' , len = len (tips1 ), list = short_hashes (tips1 ))
360
+ self .log .debug ('tx tips2' , len = len (tips2 ), list = short_hashes (tips2 ))
348
361
self .assertEqual (tips1 , tips2 )
349
362
350
363
# best block
351
364
s1 = set (manager1 .tx_storage .get_best_block_tips ())
352
365
s2 = set (manager2 .tx_storage .get_best_block_tips ())
353
- self .log .debug ('block tips1' , len = len (s1 ), list = shorten_hash (s1 ))
354
- self .log .debug ('block tips2' , len = len (s2 ), list = shorten_hash (s2 ))
366
+ self .log .debug ('block tips1' , len = len (s1 ), list = short_hashes (s1 ))
367
+ self .log .debug ('block tips2' , len = len (s2 ), list = short_hashes (s2 ))
355
368
self .assertEqual (s1 , s2 )
356
369
357
370
# best block (from height index)
358
- b1 = manager1 .tx_storage .indexes .height .get_tip ()
359
- b2 = manager2 .tx_storage .indexes .height .get_tip ()
371
+ b1 = not_none ( manager1 .tx_storage .indexes ) .height .get_tip ()
372
+ b2 = not_none ( manager2 .tx_storage .indexes ) .height .get_tip ()
360
373
self .assertIn (b1 , s2 )
361
374
self .assertIn (b2 , s1 )
362
375
363
- def assertConsensusEqual (self , manager1 , manager2 ) :
376
+ def assertConsensusEqual (self , manager1 : HathorManager , manager2 : HathorManager ) -> None :
364
377
_ , enable_sync_v2 = self ._syncVersionFlags ()
365
378
if enable_sync_v2 :
366
379
self .assertConsensusEqualSyncV2 (manager1 , manager2 )
367
380
else :
368
381
self .assertConsensusEqualSyncV1 (manager1 , manager2 )
369
382
370
- def assertConsensusEqualSyncV1 (self , manager1 , manager2 ) :
383
+ def assertConsensusEqualSyncV1 (self , manager1 : HathorManager , manager2 : HathorManager ) -> None :
371
384
self .assertEqual (manager1 .tx_storage .get_vertices_count (), manager2 .tx_storage .get_vertices_count ())
372
385
for tx1 in manager1 .tx_storage .get_all_transactions ():
373
386
tx2 = manager2 .tx_storage .get_transaction (tx1 .hash )
@@ -381,12 +394,20 @@ def assertConsensusEqualSyncV1(self, manager1, manager2):
381
394
self .assertIsNone (tx2_meta .voided_by )
382
395
else :
383
396
# If tx1 is voided, then tx2 must be voided.
397
+ assert tx1_meta .voided_by is not None
398
+ assert tx2_meta .voided_by is not None
384
399
self .assertGreaterEqual (len (tx1_meta .voided_by ), 1 )
385
400
self .assertGreaterEqual (len (tx2_meta .voided_by ), 1 )
386
401
# Hard verification
387
402
# self.assertEqual(tx1_meta.voided_by, tx2_meta.voided_by)
388
403
389
- def assertConsensusEqualSyncV2 (self , manager1 , manager2 , * , strict_sync_v2_indexes = True ):
404
+ def assertConsensusEqualSyncV2 (
405
+ self ,
406
+ manager1 : HathorManager ,
407
+ manager2 : HathorManager ,
408
+ * ,
409
+ strict_sync_v2_indexes : bool = True
410
+ ) -> None :
390
411
# The current sync algorithm does not propagate voided blocks/txs
391
412
# so the count might be different even though the consensus is equal
392
413
# One peer might have voided txs that the other does not have
@@ -397,7 +418,9 @@ def assertConsensusEqualSyncV2(self, manager1, manager2, *, strict_sync_v2_index
397
418
# the following is specific to sync-v2
398
419
399
420
# helper function:
400
- def get_all_executed_or_voided (tx_storage ):
421
+ def get_all_executed_or_voided (
422
+ tx_storage : TransactionStorage
423
+ ) -> tuple [set [VertexId ], set [VertexId ], set [VertexId ]]:
401
424
"""Get all txs separated into three sets: executed, voided, partial"""
402
425
tx_executed = set ()
403
426
tx_voided = set ()
@@ -424,14 +447,16 @@ def get_all_executed_or_voided(tx_storage):
424
447
self .log .debug ('node1 rest' , len_voided = len (tx_voided1 ), len_partial = len (tx_partial1 ))
425
448
self .log .debug ('node2 rest' , len_voided = len (tx_voided2 ), len_partial = len (tx_partial2 ))
426
449
427
- def assertConsensusValid (self , manager ) :
450
+ def assertConsensusValid (self , manager : HathorManager ) -> None :
428
451
for tx in manager .tx_storage .get_all_transactions ():
429
452
if tx .is_block :
453
+ assert isinstance (tx , Block )
430
454
self .assertBlockConsensusValid (tx )
431
455
else :
456
+ assert isinstance (tx , Transaction )
432
457
self .assertTransactionConsensusValid (tx )
433
458
434
- def assertBlockConsensusValid (self , block ) :
459
+ def assertBlockConsensusValid (self , block : Block ) -> None :
435
460
self .assertTrue (block .is_block )
436
461
if not block .parents :
437
462
# Genesis
@@ -442,7 +467,8 @@ def assertBlockConsensusValid(self, block):
442
467
parent_meta = parent .get_metadata ()
443
468
self .assertIsNone (parent_meta .voided_by )
444
469
445
- def assertTransactionConsensusValid (self , tx ):
470
+ def assertTransactionConsensusValid (self , tx : Transaction ) -> None :
471
+ assert tx .storage is not None
446
472
self .assertFalse (tx .is_block )
447
473
meta = tx .get_metadata ()
448
474
if meta .voided_by and tx .hash in meta .voided_by :
@@ -462,38 +488,40 @@ def assertTransactionConsensusValid(self, tx):
462
488
spent_meta = spent_tx .get_metadata ()
463
489
464
490
if spent_meta .voided_by is not None :
465
- self . assertIsNotNone ( meta .voided_by )
491
+ assert meta .voided_by is not None
466
492
self .assertTrue (spent_meta .voided_by )
467
493
self .assertTrue (meta .voided_by )
468
494
self .assertTrue (spent_meta .voided_by .issubset (meta .voided_by ))
469
495
470
496
for parent in tx .get_parents ():
471
497
parent_meta = parent .get_metadata ()
472
498
if parent_meta .voided_by is not None :
473
- self . assertIsNotNone ( meta .voided_by )
499
+ assert meta .voided_by is not None
474
500
self .assertTrue (parent_meta .voided_by )
475
501
self .assertTrue (meta .voided_by )
476
502
self .assertTrue (parent_meta .voided_by .issubset (meta .voided_by ))
477
503
478
- def assertSyncedProgress (self , node_sync ) :
504
+ def assertSyncedProgress (self , node_sync : NodeSyncTimestamp | NodeBlockSync ) -> None :
479
505
"""Check "synced" status of p2p-manager, uses self._enable_sync_vX to choose which check to run."""
480
506
enable_sync_v1 , enable_sync_v2 = self ._syncVersionFlags ()
481
507
if enable_sync_v2 :
508
+ assert isinstance (node_sync , NodeBlockSync )
482
509
self .assertV2SyncedProgress (node_sync )
483
510
elif enable_sync_v1 :
511
+ assert isinstance (node_sync , NodeSyncTimestamp )
484
512
self .assertV1SyncedProgress (node_sync )
485
513
486
- def assertV1SyncedProgress (self , node_sync ) :
514
+ def assertV1SyncedProgress (self , node_sync : NodeSyncTimestamp ) -> None :
487
515
self .assertEqual (node_sync .synced_timestamp , node_sync .peer_timestamp )
488
516
489
- def assertV2SyncedProgress (self , node_sync ) :
517
+ def assertV2SyncedProgress (self , node_sync : NodeBlockSync ) -> None :
490
518
self .assertEqual (node_sync .synced_block , node_sync .peer_best_block )
491
519
492
- def clean_tmpdirs (self ):
520
+ def clean_tmpdirs (self ) -> None :
493
521
for tmpdir in self .tmpdirs :
494
522
shutil .rmtree (tmpdir )
495
523
496
- def clean_pending (self , required_to_quiesce = True ):
524
+ def clean_pending (self , required_to_quiesce : bool = True ) -> None :
497
525
"""
498
526
This handy method cleans all pending tasks from the reactor.
499
527
0 commit comments