27
27
28
28
from hathor import daa
29
29
from hathor .checkpoint import Checkpoint
30
- from hathor .conf import HathorSettings
30
+ from hathor .conf . settings import HathorSettings
31
31
from hathor .consensus import ConsensusAlgorithm
32
32
from hathor .event .event_manager import EventManager
33
33
from hathor .exception import (
60
60
from hathor .verification .verification_service import VerificationService
61
61
from hathor .wallet import BaseWallet
62
62
63
- settings = HathorSettings ()
64
63
logger = get_logger ()
65
64
cpu = get_cpu_profiler ()
66
65
67
66
68
- DEFAULT_CAPABILITIES = [
69
- settings .CAPABILITY_WHITELIST ,
70
- settings .CAPABILITY_SYNC_VERSION ,
71
- settings .CAPABILITY_GET_BEST_BLOCKCHAIN
72
- ]
73
-
74
-
75
67
class HathorManager :
76
68
""" HathorManager manages the node with the help of other specialized classes.
77
69
@@ -95,6 +87,7 @@ class UnhealthinessReason(str, Enum):
95
87
def __init__ (self ,
96
88
reactor : Reactor ,
97
89
* ,
90
+ settings : HathorSettings ,
98
91
pubsub : PubSubManager ,
99
92
consensus_algorithm : ConsensusAlgorithm ,
100
93
peer_id : PeerId ,
@@ -130,6 +123,7 @@ def __init__(self,
130
123
'Either enable it, or use the reset-event-queue CLI command to remove all event-related data'
131
124
)
132
125
126
+ self .settings = settings
133
127
self ._cmd_path : Optional [str ] = None
134
128
135
129
self .log = logger .new ()
@@ -223,7 +217,7 @@ def __init__(self,
223
217
if capabilities is not None :
224
218
self .capabilities = capabilities
225
219
else :
226
- self .capabilities = DEFAULT_CAPABILITIES
220
+ self .capabilities = self . get_default_capabilities ()
227
221
228
222
# This is included in some logs to provide more context
229
223
self .environment_info = environment_info
@@ -233,6 +227,13 @@ def __init__(self,
233
227
self .lc_check_sync_state .clock = self .reactor
234
228
self .lc_check_sync_state_interval = self .CHECK_SYNC_STATE_INTERVAL
235
229
230
+ def get_default_capabilities (self ) -> list [str ]:
231
+ return [
232
+ self .settings .CAPABILITY_WHITELIST ,
233
+ self .settings .CAPABILITY_SYNC_VERSION ,
234
+ self .settings .CAPABILITY_GET_BEST_BLOCKCHAIN
235
+ ]
236
+
236
237
def start (self ) -> None :
237
238
""" A factory must be started only once. And it is usually automatically started.
238
239
"""
@@ -443,7 +444,7 @@ def _initialize_components_full_verification(self) -> None:
443
444
# It's safe to skip block weight verification during initialization because
444
445
# we trust the difficulty stored in metadata
445
446
skip_block_weight_verification = True
446
- if block_count % settings .VERIFY_WEIGHT_EVERY_N_BLOCKS == 0 :
447
+ if block_count % self . settings .VERIFY_WEIGHT_EVERY_N_BLOCKS == 0 :
447
448
skip_block_weight_verification = False
448
449
449
450
try :
@@ -628,14 +629,14 @@ def _verify_soft_voided_txs(self) -> None:
628
629
soft_voided_meta = soft_voided_tx .get_metadata ()
629
630
voided_set = soft_voided_meta .voided_by or set ()
630
631
# If the tx is not marked as soft voided, then we can't continue the initialization
631
- if settings .SOFT_VOIDED_ID not in voided_set :
632
+ if self . settings .SOFT_VOIDED_ID not in voided_set :
632
633
self .log .error (
633
634
'Error initializing node. Your database is not compatible with the current version of the'
634
635
' full node. You must use the latest available snapshot or sync from the beginning.'
635
636
)
636
637
sys .exit (- 1 )
637
638
638
- assert {soft_voided_id , settings .SOFT_VOIDED_ID }.issubset (voided_set )
639
+ assert {soft_voided_id , self . settings .SOFT_VOIDED_ID }.issubset (voided_set )
639
640
640
641
def _verify_checkpoints (self ) -> None :
641
642
""" Method to verify if all checkpoints that exist in the database have the correct hash and are winners.
@@ -774,7 +775,7 @@ def make_block_template(self, parent_block_hash: VertexId, timestamp: Optional[i
774
775
"""
775
776
parent_block = self .tx_storage .get_transaction (parent_block_hash )
776
777
assert isinstance (parent_block , Block )
777
- parent_txs = self .generate_parent_txs (parent_block .timestamp + settings .MAX_DISTANCE_BETWEEN_BLOCKS )
778
+ parent_txs = self .generate_parent_txs (parent_block .timestamp + self . settings .MAX_DISTANCE_BETWEEN_BLOCKS )
778
779
if timestamp is None :
779
780
current_timestamp = int (max (self .tx_storage .latest_timestamp , self .reactor .seconds ()))
780
781
else :
@@ -810,7 +811,7 @@ def _make_block_template(self, parent_block: Block, parent_txs: 'ParentTxs', cur
810
811
timestamp_abs_min = parent_block .timestamp + 1
811
812
# and absolute maximum limited by max time between blocks
812
813
if not parent_block .is_genesis :
813
- timestamp_abs_max = parent_block .timestamp + settings .MAX_DISTANCE_BETWEEN_BLOCKS - 1
814
+ timestamp_abs_max = parent_block .timestamp + self . settings .MAX_DISTANCE_BETWEEN_BLOCKS - 1
814
815
else :
815
816
timestamp_abs_max = 0xffffffff
816
817
assert timestamp_abs_max > timestamp_abs_min
@@ -819,12 +820,12 @@ def _make_block_template(self, parent_block: Block, parent_txs: 'ParentTxs', cur
819
820
timestamp_min = max (timestamp_abs_min , parent_txs .max_timestamp + 1 )
820
821
assert timestamp_min <= timestamp_abs_max
821
822
# when we have weight decay, the max timestamp will be when the next decay happens
822
- if with_weight_decay and settings .WEIGHT_DECAY_ENABLED :
823
+ if with_weight_decay and self . settings .WEIGHT_DECAY_ENABLED :
823
824
# we either have passed the first decay or not, the range will vary depending on that
824
- if timestamp_min > timestamp_abs_min + settings .WEIGHT_DECAY_ACTIVATE_DISTANCE :
825
- timestamp_max_decay = timestamp_min + settings .WEIGHT_DECAY_WINDOW_SIZE
825
+ if timestamp_min > timestamp_abs_min + self . settings .WEIGHT_DECAY_ACTIVATE_DISTANCE :
826
+ timestamp_max_decay = timestamp_min + self . settings .WEIGHT_DECAY_WINDOW_SIZE
826
827
else :
827
- timestamp_max_decay = timestamp_abs_min + settings .WEIGHT_DECAY_ACTIVATE_DISTANCE
828
+ timestamp_max_decay = timestamp_abs_min + self . settings .WEIGHT_DECAY_ACTIVATE_DISTANCE
828
829
timestamp_max = min (timestamp_abs_max , timestamp_max_decay )
829
830
else :
830
831
timestamp_max = timestamp_abs_max
@@ -833,7 +834,10 @@ def _make_block_template(self, parent_block: Block, parent_txs: 'ParentTxs', cur
833
834
# this is the min weight to cause an increase of twice the WEIGHT_TOL, we make sure to generate a template with
834
835
# at least this weight (note that the user of the API can set its own weight, the block sumit API will also
835
836
# protect agains a weight that is too small but using WEIGHT_TOL instead of 2*WEIGHT_TOL)
836
- min_significant_weight = calculate_min_significant_weight (parent_block_metadata .score , 2 * settings .WEIGHT_TOL )
837
+ min_significant_weight = calculate_min_significant_weight (
838
+ parent_block_metadata .score ,
839
+ 2 * self .settings .WEIGHT_TOL
840
+ )
837
841
weight = max (daa .calculate_next_weight (parent_block , timestamp ), min_significant_weight )
838
842
height = parent_block .get_height () + 1
839
843
parents = [parent_block .hash ] + parent_txs .must_include
@@ -898,15 +902,21 @@ def submit_block(self, blk: Block, fails_silently: bool = True) -> bool:
898
902
parent_block = self .tx_storage .get_transaction (parent_hash )
899
903
parent_block_metadata = parent_block .get_metadata ()
900
904
# this is the smallest weight that won't cause the score to increase, anything equal or smaller is bad
901
- min_insignificant_weight = calculate_min_significant_weight (parent_block_metadata .score , settings .WEIGHT_TOL )
905
+ min_insignificant_weight = calculate_min_significant_weight (
906
+ parent_block_metadata .score ,
907
+ self .settings .WEIGHT_TOL
908
+ )
902
909
if blk .weight <= min_insignificant_weight :
903
910
self .log .warn ('submit_block(): insignificant weight? accepted anyway' , blk = blk .hash_hex , weight = blk .weight )
904
911
return self .propagate_tx (blk , fails_silently = fails_silently )
905
912
906
913
def push_tx (self , tx : Transaction , allow_non_standard_script : bool = False ,
907
- max_output_script_size : int = settings . PUSHTX_MAX_OUTPUT_SCRIPT_SIZE ) -> None :
914
+ max_output_script_size : int | None = None ) -> None :
908
915
"""Used by all APIs that accept a new transaction (like push_tx)
909
916
"""
917
+ if max_output_script_size is None :
918
+ max_output_script_size = self .settings .PUSHTX_MAX_OUTPUT_SCRIPT_SIZE
919
+
910
920
is_double_spending = tx .is_double_spending ()
911
921
if is_double_spending :
912
922
raise DoubleSpendingError ('Invalid transaction. At least one of your inputs has already been spent.' )
@@ -968,7 +978,7 @@ def on_new_tx(self, tx: BaseTransaction, *, conn: Optional[HathorProtocol] = Non
968
978
self .tx_storage .compare_bytes_with_local_tx (tx )
969
979
already_exists = True
970
980
971
- if tx .timestamp - self .reactor .seconds () > settings .MAX_FUTURE_TIMESTAMP_ALLOWED :
981
+ if tx .timestamp - self .reactor .seconds () > self . settings .MAX_FUTURE_TIMESTAMP_ALLOWED :
972
982
if not fails_silently :
973
983
raise InvalidNewTransaction ('Ignoring transaction in the future {} (timestamp={})' .format (
974
984
tx .hash_hex , tx .timestamp ))
@@ -1117,7 +1127,7 @@ def tx_fully_validated(self, tx: BaseTransaction, *, quiet: bool) -> None:
1117
1127
1118
1128
def _log_feature_states (self , vertex : BaseTransaction ) -> None :
1119
1129
"""Log features states for a block. Used as part of the Feature Activation Phased Testing."""
1120
- if not settings .FEATURE_ACTIVATION .enable_usage or not isinstance (vertex , Block ):
1130
+ if not self . settings .FEATURE_ACTIVATION .enable_usage or not isinstance (vertex , Block ):
1121
1131
return
1122
1132
1123
1133
feature_descriptions = self ._feature_service .get_bits_description (block = vertex )
@@ -1153,10 +1163,10 @@ def listen(self, description: str, use_ssl: Optional[bool] = None) -> None:
1153
1163
self .my_peer .entrypoints .append (address )
1154
1164
1155
1165
def has_sync_version_capability (self ) -> bool :
1156
- return settings .CAPABILITY_SYNC_VERSION in self .capabilities
1166
+ return self . settings .CAPABILITY_SYNC_VERSION in self .capabilities
1157
1167
1158
1168
def add_peer_to_whitelist (self , peer_id ):
1159
- if not settings .ENABLE_PEER_WHITELIST :
1169
+ if not self . settings .ENABLE_PEER_WHITELIST :
1160
1170
return
1161
1171
1162
1172
if peer_id in self .peers_whitelist :
@@ -1165,7 +1175,7 @@ def add_peer_to_whitelist(self, peer_id):
1165
1175
self .peers_whitelist .append (peer_id )
1166
1176
1167
1177
def remove_peer_from_whitelist_and_disconnect (self , peer_id : str ) -> None :
1168
- if not settings .ENABLE_PEER_WHITELIST :
1178
+ if not self . settings .ENABLE_PEER_WHITELIST :
1169
1179
return
1170
1180
1171
1181
if peer_id in self .peers_whitelist :
@@ -1179,7 +1189,9 @@ def has_recent_activity(self) -> bool:
1179
1189
1180
1190
# We use the avg time between blocks as a basis to know how much time we should use to consider the fullnode
1181
1191
# as not synced.
1182
- maximum_timestamp_delta = settings .P2P_RECENT_ACTIVITY_THRESHOLD_MULTIPLIER * settings .AVG_TIME_BETWEEN_BLOCKS
1192
+ maximum_timestamp_delta = (
1193
+ self .settings .P2P_RECENT_ACTIVITY_THRESHOLD_MULTIPLIER * self .settings .AVG_TIME_BETWEEN_BLOCKS
1194
+ )
1183
1195
1184
1196
if current_timestamp - latest_blockchain_timestamp > maximum_timestamp_delta :
1185
1197
return False
0 commit comments