22
22
from hathor .profiler import get_cpu_profiler
23
23
from hathor .transaction import BaseTransaction , Block , TxInput , TxOutput , TxVersion
24
24
from hathor .transaction .base_transaction import TX_HASH_SIZE
25
+ from hathor .transaction .exceptions import InvalidToken
25
26
from hathor .transaction .util import VerboseCallback , unpack , unpack_len
26
27
from hathor .types import TokenUid , VertexId
27
28
from hathor .util import not_none
@@ -278,7 +279,16 @@ def verify_checkpoint(self, checkpoints: list[Checkpoint]) -> None:
278
279
raise InvalidNewTransaction (f'Invalid new transaction { self .hash_hex } : expected to reach a checkpoint but '
279
280
'none of its children is checkpoint-valid' )
280
281
281
- def get_token_info_from_inputs (self ) -> dict [TokenUid , TokenInfo ]:
282
+ def get_complete_token_info (self ) -> dict [TokenUid , TokenInfo ]:
283
+ """
284
+ Get a complete token info dict, including data from both inputs and outputs.
285
+ """
286
+ token_dict = self ._get_token_info_from_inputs ()
287
+ self ._update_token_info_from_outputs (token_dict = token_dict )
288
+
289
+ return token_dict
290
+
291
+ def _get_token_info_from_inputs (self ) -> dict [TokenUid , TokenInfo ]:
282
292
"""Sum up all tokens present in the inputs and their properties (amount, can_mint, can_melt)
283
293
"""
284
294
token_dict : dict [TokenUid , TokenInfo ] = {}
@@ -305,6 +315,37 @@ def get_token_info_from_inputs(self) -> dict[TokenUid, TokenInfo]:
305
315
306
316
return token_dict
307
317
318
+ def _update_token_info_from_outputs (self , * , token_dict : dict [TokenUid , TokenInfo ]) -> None :
319
+ """Iterate over the outputs and add values to token info dict. Updates the dict in-place.
320
+
321
+ Also, checks if no token has authorities on the outputs not present on the inputs
322
+
323
+ :raises InvalidToken: when there's an error in token operations
324
+ """
325
+ # iterate over outputs and add values to token_dict
326
+ for index , tx_output in enumerate (self .outputs ):
327
+ token_uid = self .get_token_uid (tx_output .get_token_index ())
328
+ token_info = token_dict .get (token_uid )
329
+ if token_info is None :
330
+ raise InvalidToken ('no inputs for token {}' .format (token_uid .hex ()))
331
+ else :
332
+ # for authority outputs, make sure the same capability (mint/melt) was present in the inputs
333
+ if tx_output .can_mint_token () and not token_info .can_mint :
334
+ raise InvalidToken ('output has mint authority, but no input has it: {}' .format (
335
+ tx_output .to_human_readable ()))
336
+ if tx_output .can_melt_token () and not token_info .can_melt :
337
+ raise InvalidToken ('output has melt authority, but no input has it: {}' .format (
338
+ tx_output .to_human_readable ()))
339
+
340
+ if tx_output .is_token_authority ():
341
+ # make sure we only have authorities that we know of
342
+ if tx_output .value > TxOutput .ALL_AUTHORITIES :
343
+ raise InvalidToken ('Invalid authorities in output (0b{0:b})' .format (tx_output .value ))
344
+ else :
345
+ # for regular outputs, just subtract from the total amount
346
+ sum_tokens = token_info .amount + tx_output .value
347
+ token_dict [token_uid ] = TokenInfo (sum_tokens , token_info .can_mint , token_info .can_melt )
348
+
308
349
def iter_spent_rewards (self ) -> Iterator [Block ]:
309
350
"""Iterate over all the rewards being spent, assumes tx has been verified."""
310
351
for input_tx in self .inputs :
0 commit comments