Skip to content

Downgraded severity of some messages from debug to trace #592

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Mar 27, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion plenum/common/batched.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ def flushOutBoxes(self) -> None:
continue
if msgs:
if self._should_batch(msgs):
logger.debug(
logger.trace(
"{} batching {} msgs to {} into fewer transmissions".
format(self, len(msgs), dest))
logger.trace(" messages: {}".format(msgs))
Expand Down
2 changes: 1 addition & 1 deletion plenum/server/has_action_queue.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ def wrapper():
self.repeatingActions.add(action)
self._schedule(wrapper, seconds)
else:
logger.debug('{} is already repeating'.format(
logger.trace('{} is already repeating'.format(
get_func_name(action)))

def stopRepeating(self, action: Callable, strict=True):
Expand Down
16 changes: 8 additions & 8 deletions plenum/server/node.py
Original file line number Diff line number Diff line change
Expand Up @@ -1501,7 +1501,7 @@ def handleOneNodeMsg(self, wrappedMsg):
try:
vmsg = self.validateNodeMsg(wrappedMsg)
if vmsg:
logger.debug("{} msg validated {}".format(self, wrappedMsg),
logger.trace("{} msg validated {}".format(self, wrappedMsg),
extra={"tags": ["node-msg-validation"]})
self.unpackNodeMsg(*vmsg)
else:
Expand Down Expand Up @@ -1538,7 +1538,7 @@ def validateNodeMsg(self, wrappedMsg):
self.verifySignature(message)
except BaseExc as ex:
raise SuspiciousNode(frm, ex, message) from ex
logger.debug("{} received node message from {}: {}".
logger.trace("{} received node message from {}: {}".
format(self, frm, message),
extra={"cli": False})
return message, frm
Expand All @@ -1555,7 +1555,7 @@ def unpackNodeMsg(self, msg, frm) -> None:
# a transport, it should be encapsulated.

if isinstance(msg, Batch):
logger.debug("{} processing a batch {}".format(self, msg))
logger.trace("{} processing a batch {}".format(self, msg))
for m in msg.messages:
m = self.nodestack.deserializeMsg(m)
self.handleOneNodeMsg((m, frm))
Expand Down Expand Up @@ -1764,8 +1764,8 @@ def preLedgerCatchUp(self, ledger_id):

# revert uncommitted txns and state for unordered requests
r = self.master_replica.revert_unordered_batches()
logger.info('{} reverted {} batches before starting catch up for '
'ledger {}'.format(self, r, ledger_id))
logger.debug('{} reverted {} batches before starting catch up for '
'ledger {}'.format(self, r, ledger_id))

def postTxnFromCatchupAddedToLedger(self, ledger_id: int, txn: Any):
rh = self.postRecvTxnFromCatchup(ledger_id, txn)
Expand Down Expand Up @@ -2286,12 +2286,12 @@ def checkPerformance(self) -> Optional[bool]:
self.view_changer.on_master_degradation()
return False
else:
logger.debug("{}'s master has higher performance than backups".
logger.trace("{}'s master has higher performance than backups".
format(self))
return True

def checkNodeRequestSpike(self):
logger.debug("{} checking its request amount".format(self))
logger.trace("{} checking its request amount".format(self))

if not self.isParticipating:
return
Expand Down Expand Up @@ -2640,7 +2640,7 @@ def sendReplyToClient(self, reply, reqKey):
if self.isProcessingReq(*reqKey):
sender = self.requestSender[reqKey]
if sender:
logger.debug(
logger.trace(
'{} sending reply for {} to client'.format(
self, reqKey))
self.transmitToClient(reply, self.requestSender[reqKey])
Expand Down
2 changes: 1 addition & 1 deletion plenum/server/notifier_plugin_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ def sendMessageUponSuspiciousSpike(
assert 'enabled' in config

if not (enabled and config['enabled']):
logger.debug('Suspicious Spike check is disabled')
logger.trace('Suspicious Spike check is disabled')
return None

coefficient = config['coefficient']
Expand Down
2 changes: 1 addition & 1 deletion plenum/server/propagator.py
Original file line number Diff line number Diff line change
Expand Up @@ -273,7 +273,7 @@ def tryForwarding(self, request: Request):
# to move ahead
self.forward(request)
else:
logger.debug("{} not forwarding request {} to its replicas "
logger.trace("{} not forwarding request {} to its replicas "
"since {}".format(self, request, cannot_reason_msg))

def request_propagates(self, req_keys):
Expand Down
26 changes: 13 additions & 13 deletions plenum/server/replica.py
Original file line number Diff line number Diff line change
Expand Up @@ -650,7 +650,7 @@ def trackBatches(self, pp: PrePrepare, prevStateRootHash):
# pp.discarded indicates the index from where the discarded requests
# starts hence the count of accepted requests, prevStateRoot is
# tracked to revert this PRE-PREPARE
self.logger.debug('{} tracking batch for {} with state root {}'.format(
self.logger.trace('{} tracking batch for {} with state root {}'.format(
self, pp, prevStateRootHash))
self.batches[(pp.viewNo, pp.ppSeqNo)] = [pp.ledgerId, pp.discarded,
pp.ppTime, prevStateRootHash]
Expand Down Expand Up @@ -745,7 +745,7 @@ def create3PCBatch(self, ledger_id):
rv = self.execute_hook(ReplicaHooks.CREATE_PPR, pre_prepare)
pre_prepare = rv if rv is not None else pre_prepare

self.logger.debug('{} created a PRE-PREPARE with {} requests for ledger {}'.format(
self.logger.trace('{} created a PRE-PREPARE with {} requests for ledger {}'.format(
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we have other DEBUG level entries where we log creation of a PRE-PREPARE by a Primary?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We have at least sends for any message logged, is it enough?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, absolutely

self, len(validReqs), ledger_id))
self.lastPrePrepareSeqNo = ppSeqNo
self.last_accepted_pre_prepare_time = tm
Expand Down Expand Up @@ -876,7 +876,7 @@ def _process_valid_preprepare(self, pre_prepare, sender):
self.node.onBatchCreated(pre_prepare.ledgerId, state_root)
# BLS multi-sig:
self._bls_bft_replica.process_pre_prepare(pre_prepare, sender)
self.logger.debug("{} saved shared multi signature for root".format(self, old_state_root))
self.logger.trace("{} saved shared multi signature for root".format(self, old_state_root))
self.trackBatches(pre_prepare, old_state_root)
self.logger.debug("{} processed incoming PRE-PREPARE{}".format(self, key),
extra={"tags": ["processing"]})
Expand Down Expand Up @@ -994,7 +994,7 @@ def processPrepare(self, prepare: Prepare, sender: str) -> None:
else:
# TODO let's have isValidPrepare throw an exception that gets
# handled and possibly logged higher
self.logger.debug("{} cannot process incoming PREPARE".format(self))
self.logger.trace("{} cannot process incoming PREPARE".format(self))
except SuspiciousNode as ex:
self.node.reportSuspiciousNodeEx(ex)

Expand Down Expand Up @@ -1760,7 +1760,7 @@ def markCheckPointStable(self, seqNo):
return
self.h = seqNo
for k in previousCheckpoints:
self.logger.debug("{} removing previous checkpoint {}".format(self, k))
self.logger.trace("{} removing previous checkpoint {}".format(self, k))
self.checkpoints.pop(k)
self._gc((self.viewNo, seqNo))
self.logger.debug("{} marked stable checkpoint {}".format(self, (s, e)))
Expand Down Expand Up @@ -1821,7 +1821,7 @@ def processStashedCheckpoints(self, key):
self._clear_prev_view_stashed_checkpoints()

if key not in self.stashedRecvdCheckpoints.get(self.viewNo, {}):
self.logger.debug("{} have no stashed checkpoints for {}")
self.logger.trace("{} have no stashed checkpoints for {}")
return 0

stashed = self.stashedRecvdCheckpoints[self.viewNo][key]
Expand Down Expand Up @@ -1861,9 +1861,9 @@ def _gc(self, till3PCKey):
for reqKey in pp.reqIdr:
reqKeys.add(reqKey)

self.logger.debug("{} found {} 3-phase keys to clean".
self.logger.trace("{} found {} 3-phase keys to clean".
format(self, len(tpcKeys)))
self.logger.debug("{} found {} request keys to clean".
self.logger.trace("{} found {} request keys to clean".
format(self, len(reqKeys)))

to_clean_up = (
Expand All @@ -1882,7 +1882,7 @@ def _gc(self, till3PCKey):

for request_key in reqKeys:
self.requests.free(request_key)
self.logger.debug('{} freed request {} from previous checkpoints'
self.logger.trace('{} freed request {} from previous checkpoints'
.format(self, request_key))

self.compact_ordered()
Expand Down Expand Up @@ -2139,7 +2139,7 @@ def _request_three_phase_msg(self, three_pc_key: Tuple[int, int],
return False

# TODO: Using a timer to retry would be a better thing to do
self.logger.debug('{} requesting {} for {} from {}'.format(
self.logger.trace('{} requesting {} for {} from {}'.format(
self, msg_type, three_pc_key, recipients))
# An optimisation can be to request PRE-PREPARE from f+1 or
# f+x (f+x<2f) nodes only rather than 2f since only 1 correct
Expand Down Expand Up @@ -2344,7 +2344,7 @@ def send(self, msg, stat=None) -> None:
:param rid: remote id of one recipient (sends to all recipients if None)
:param msg: the message to send
"""
self.logger.debug("{} sending {}".format(self, msg.__class__.__name__),
self.logger.trace("{} sending {}".format(self, msg.__class__.__name__),
extra={"cli": True, "tags": ['sending']})
self.logger.trace("{} sending {}".format(self, msg))
if stat:
Expand Down Expand Up @@ -2393,7 +2393,7 @@ def _remove_till_caught_up_3pc(self, last_caught_up_3PC):
if compare_3PC_keys(key, last_caught_up_3PC) >= 0:
outdated_pre_prepares[key] = pp

self.logger.debug('{} going to remove messages for {} 3PC keys'.format(
self.logger.trace('{} going to remove messages for {} 3PC keys'.format(
self, len(outdated_pre_prepares)))

for key, pp in outdated_pre_prepares.items():
Expand All @@ -2417,7 +2417,7 @@ def _remove_ordered_from_queue(self, last_caught_up_3PC=None):
compare_3PC_keys((msg.viewNo, msg.ppSeqNo), last_caught_up_3PC) >= 0):
to_remove.append(i)

self.logger.debug('{} going to remove {} Ordered messages from outbox'.format(self, len(to_remove)))
self.logger.trace('{} going to remove {} Ordered messages from outbox'.format(self, len(to_remove)))

# Removing Ordered from queue but returning `Ordered` in order that
# they should be processed.
Expand Down
2 changes: 1 addition & 1 deletion stp_zmq/kit_zstack.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def maintainConnections(self, force=False):
else self.config.RETRY_TIMEOUT_RESTRICTED)
missing = self.connectToMissing()
self.retryDisconnected(exclude=missing)
logger.debug("{} next check for retries in {:.2f} seconds"
logger.trace("{} next check for retries in {:.2f} seconds"
.format(self, self.nextCheck - now))
return True

Expand Down
6 changes: 3 additions & 3 deletions stp_zmq/zstack.py
Original file line number Diff line number Diff line change
Expand Up @@ -654,13 +654,13 @@ def sendPingPong(self, remote: Union[str, Remote], is_ping=True):
def handlePingPong(self, msg, frm, ident):
if msg in (self.pingMessage, self.pongMessage):
if msg == self.pingMessage:
logger.debug('{} got ping from {}'.format(self, frm))
logger.trace('{} got ping from {}'.format(self, frm))
self.sendPingPong(frm, is_ping=False)

if msg == self.pongMessage:
if ident in self.remotesByKeys:
self.remotesByKeys[ident].setConnected()
logger.debug('{} got pong from {}'.format(self, frm))
logger.trace('{} got pong from {}'.format(self, frm))
return True
return False

Expand Down Expand Up @@ -713,7 +713,7 @@ def transmit(self, msg, uid, timeout=None, serialized=False):
msg = self.prepare_to_send(msg)
# socket.send(self.signedMsg(msg), flags=zmq.NOBLOCK)
socket.send(msg, flags=zmq.NOBLOCK)
logger.debug('{} transmitting message {} to {}'
logger.trace('{} transmitting message {} to {}'
.format(self, msg, uid))
if not remote.isConnected and msg not in self.healthMessages:
logger.debug('Remote {} is not connected - '
Expand Down