From c7a34717c24c4efa937ed6443ddc99db935de0af Mon Sep 17 00:00:00 2001 From: Andrey Kononykhin Date: Wed, 23 Aug 2017 16:45:23 +0300 Subject: [PATCH 1/5] modified rank routine, added test cases for primary demotion --- plenum/server/node.py | 2 +- plenum/server/pool_manager.py | 88 +++++++++++++------ plenum/server/primary_decider.py | 7 +- plenum/server/primary_selector.py | 17 +++- plenum/test/primary_selection/conftest.py | 26 +++++- plenum/test/primary_selection/helper.py | 18 +++- ...after_primary_demotion_and_pool_restart.py | 59 +++++++++++++ ...after_primary_demotion_and_view_changes.py | 50 +++++++++++ .../test_new_node_joins_after_view_change.py | 3 + 9 files changed, 234 insertions(+), 36 deletions(-) create mode 100644 plenum/test/primary_selection/test_primary_selection_after_primary_demotion_and_pool_restart.py create mode 100644 plenum/test/primary_selection/test_primary_selection_after_primary_demotion_and_view_changes.py diff --git a/plenum/server/node.py b/plenum/server/node.py index c52584710f..5543ec4138 100644 --- a/plenum/server/node.py +++ b/plenum/server/node.py @@ -622,7 +622,7 @@ def start(self, loop): self.logNodeInfo() @property - def rank(self) -> int: + def rank(self) -> Optional[int]: return self.poolManager.rank def get_name_by_rank(self, rank): diff --git a/plenum/server/pool_manager.py b/plenum/server/pool_manager.py index 289339c55c..0489e09969 100644 --- a/plenum/server/pool_manager.py +++ b/plenum/server/pool_manager.py @@ -54,7 +54,10 @@ def _get_rank(needle_id: str, haystack_ids: List[str]): # Return the rank of the node where rank is defined by the order in # which node was added to the pool or on the alphabetical order of name # if using RegistryPoolManager - return haystack_ids.index(needle_id) + try: + return haystack_ids.index(needle_id) + except ValueError: + return None @property @abstractmethod @@ -63,25 +66,30 @@ def id(self): """ @abstractmethod - def get_rank_of(self, node_id) -> int: - """ + def get_rank_of(self, node_id) -> Optional[int]: + """Return node rank among active pool validators by id + + :param node_id: node's id + :return: rank of the node or None if not found """ @property def rank(self) -> Optional[int]: # Nodes have a total order defined in them, rank is the node's # position in that order - if self._rank is None: - self._rank = self.get_rank_of(self.id) - return self._rank + return self.get_rank_of(self.id) @abstractmethod - def get_name_by_rank(self, rank): + def get_name_by_rank(self, rank) -> Optional[str]: # Needed for communicating primary name to others and also nodeReg # uses node names (alias) and not ids # TODO: Should move to using node ids and not node names (alias) + """Return node name (alias) by rank among active pool validators + + :param rank: rank of the node + :return: name of the node or None if not found """ - """ + class HasPoolManager: @@ -105,7 +113,8 @@ def __init__(self, node, ha=None, cliname=None, cliha=None): self.basedirpath = node.basedirpath self._ledger = None self._id = None - self._rank = None + self._ordered_node_ids = None + TxnStackManager.__init__( self, self.name, self.basedirpath, isNode=True) self.state = self.loadState() @@ -114,6 +123,9 @@ def __init__(self, node, ha=None, cliname=None, cliha=None): self.nstack, self.cstack, self.nodeReg, self.cliNodeReg = \ self.getStackParamsAndNodeReg(self.name, self.basedirpath, ha=ha, cliname=cliname, cliha=cliha) + # order nodes initially by ledger + _ = self.node_ids_ordered_by_rank + self._dataFieldsValidators = ( (NODE_IP, self._isIpAddressValid), (CLIENT_IP, self._isIpAddressValid), @@ -207,6 +219,8 @@ def onPoolMembershipChange(self, txn): nodeName = txn[DATA][ALIAS] nodeNym = txn[TARGET_NYM] + self._order_node(nodeNym, nodeName) + def _updateNode(txn): if {NODE_IP, NODE_PORT, CLIENT_IP, CLIENT_PORT}. \ intersection(set(txn[DATA].keys())): @@ -369,38 +383,56 @@ def id(self): self._id = txn[TARGET_NYM] return self._id + + def _order_node(self, nodeNym, nodeName): + assert self._ordered_node_ids.get(nodeNym) in (nodeName, None), ( + "{} trying to order already ordered node {} ({}) " + "with other alias {}".format( + self.name, self._ordered_node_ids.get(nodeNym), nodeNym + ) + ) + + self._ordered_node_ids[nodeNym] = nodeName + @property - def node_ids_in_ordered_by_rank(self) -> List: - ids = OrderedDict() - for _, txn in self.ledger.getAllTxn(): - ids[txn[TARGET_NYM]] = True - return list(ids.keys()) + def node_ids_ordered_by_rank(self) -> List: + if self._ordered_node_ids is None: + self._ordered_node_ids = OrderedDict() + for _, txn in self.ledger.getAllTxn(): + if txn[TXN_TYPE] == NODE: + self._order_node(txn[TARGET_NYM], txn[DATA][ALIAS]) + + return [nym for nym, name in self._ordered_node_ids.items() + if name in self.nodeReg] def get_rank_of(self, node_id) -> Optional[int]: if self.id is None: # This can happen if a non-genesis node starts return None - return self._get_rank(node_id, self.node_ids_in_ordered_by_rank) + return self._get_rank(node_id, self.node_ids_ordered_by_rank) - def get_name_by_rank(self, rank): - # This is expensive but only required while start or view change - id = self.node_ids_in_ordered_by_rank[rank] - # We don't allow changing ALIAS - for _, txn in self.ledger.getAllTxn(): - if txn[TARGET_NYM] == id and DATA in txn and ALIAS in txn[DATA]: - return txn[DATA][ALIAS] + def get_name_by_rank(self, rank) -> Optional[str]: + try: + nym = self.node_ids_ordered_by_rank[rank] + except IndexError: + return None + else: + return self._ordered_node_ids[nym] class RegistryPoolManager(PoolManager): # This is the old way of managing the pool nodes information and # should be deprecated. def __init__(self, name, basedirpath, nodeRegistry, ha, cliname, cliha): - self._rank = None + self._ordered_node_names = None + self.nstack, self.cstack, self.nodeReg, self.cliNodeReg = \ self.getStackParamsAndNodeReg(name=name, basedirpath=basedirpath, nodeRegistry=nodeRegistry, ha=ha, cliname=cliname, cliha=cliha) + _ = self.node_names_ordered_by_rank + def getStackParamsAndNodeReg(self, name, basedirpath, nodeRegistry=None, ha=None, cliname=None, cliha=None): nstack, nodeReg, cliNodeReg = self.getNodeStackParams(name, @@ -491,8 +523,12 @@ def id(self): def node_names_ordered_by_rank(self) -> List: return sorted(self.nodeReg.keys()) - def get_rank_of(self, node_id) -> int: + def get_rank_of(self, node_id) -> Optional[int]: + #TODO node_id here has got another meaning return self._get_rank(node_id, self.node_names_ordered_by_rank) - def get_name_by_rank(self, rank): - return self.node_names_ordered_by_rank[rank] + def get_name_by_rank(self, rank) -> Optional[str]: + try: + return self.node_names_ordered_by_rank[rank] + except IndexError: + return None diff --git a/plenum/server/primary_decider.py b/plenum/server/primary_decider.py index 801ad7a026..8540dd5ef3 100644 --- a/plenum/server/primary_decider.py +++ b/plenum/server/primary_decider.py @@ -1,4 +1,4 @@ -from typing import Iterable +from typing import Iterable, Optional from collections import deque from plenum.common.constants import VIEW_CHANGE_PREFIX @@ -23,7 +23,6 @@ def __init__(self, node): self.f = node.f self.replicas = node.replicas self.viewNo = node.viewNo - self.rank = node.rank self.nodeNames = node.allNodeNames self.nodeCount = 0 self.inBox = deque() @@ -40,6 +39,10 @@ def __init__(self, node): def __repr__(self): return "{}".format(self.name) + @property + def rank(self) -> Optional[int]: + return self.node.rank + @property def was_master_primary_in_prev_view(self): return self.previous_master_primary == self.name diff --git a/plenum/server/primary_selector.py b/plenum/server/primary_selector.py index 5e7d588916..f820f388a3 100644 --- a/plenum/server/primary_selector.py +++ b/plenum/server/primary_selector.py @@ -204,8 +204,9 @@ def has_view_change_from_primary(self) -> bool: if next_primary_name not in self._view_change_done: logger.debug( "{} has not received ViewChangeDone from the next " - "primary {}". format( - self.name, next_primary_name)) + "primary {} (viewNo: {}, totalNodes: {})". format( + self.name, next_primary_name, + self.viewNo, self.node.totalNodes)) return False else: self._has_view_change_from_primary = True @@ -316,8 +317,16 @@ def _get_primary_id(self, view_no, instance_id): return (view_no + instance_id) % self.node.totalNodes def next_primary_node_name(self, instance_id): - return self.node.get_name_by_rank(self._get_primary_id( - self.viewNo, instance_id)) + rank = self._get_primary_id(self.viewNo, instance_id) + name = self.node.get_name_by_rank(rank) + + assert name, ("{} failed to get node name for rank {}: " + "view_no {}, instance_id {}, totalNodes {}".format( + self, rank, self.viewNo, instance_id, + self.node.totalNodes) + ) + + return name def next_primary_replica_name(self, instance_id): """ diff --git a/plenum/test/primary_selection/conftest.py b/plenum/test/primary_selection/conftest.py index 43c832c8f9..36455f005f 100644 --- a/plenum/test/primary_selection/conftest.py +++ b/plenum/test/primary_selection/conftest.py @@ -1,10 +1,12 @@ import pytest from plenum.test.node_catchup.helper import waitNodeDataEquality -from plenum.test.primary_selection.helper import check_newly_added_nodes +from plenum.test.primary_selection.helper import check_newly_added_nodes, \ + getPrimaryNodesIdxs from plenum.test.pool_transactions.conftest import clientAndWallet1, \ client1, wallet1, client1Connected, looper, nodeThetaAdded, \ stewardAndWallet1, steward1, stewardWallet +from plenum.test.pool_transactions.helper import buildPoolClientAndWallet @pytest.fixture(scope="module") @@ -15,3 +17,25 @@ def one_node_added(looper, txnPoolNodeSet, nodeThetaAdded): waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1]) check_newly_added_nodes(looper, txnPoolNodeSet, [new_node]) return new_node + + +@pytest.fixture(scope="module") +def txnPoolMasterNodes(txnPoolNodeSet): + primariesIdxs = getPrimaryNodesIdxs(txnPoolNodeSet) + return txnPoolNodeSet[primariesIdxs[0]], txnPoolNodeSet[primariesIdxs[1]] + + +@pytest.fixture(scope="module") +def stewardAndWalletForMasterNode(looper, poolTxnData, poolTxnStewardNames, + tdirWithPoolTxns, txnPoolNodeSet, txnPoolMasterNodes): + primariesIdxs = getPrimaryNodesIdxs(txnPoolNodeSet) + master_node = txnPoolMasterNodes[0] + stewardName = poolTxnStewardNames[primariesIdxs[0]] + stewardsSeed = poolTxnData["seeds"][stewardName].encode() + + stewardClient, stewardWallet = buildPoolClientAndWallet( + (stewardName, stewardsSeed), tdirWithPoolTxns) + looper.add(stewardClient) + looper.run(stewardClient.ensureConnectedToNodes()) + + return stewardClient, stewardWallet diff --git a/plenum/test/primary_selection/helper.py b/plenum/test/primary_selection/helper.py index 729aad3918..64d55a26ae 100644 --- a/plenum/test/primary_selection/helper.py +++ b/plenum/test/primary_selection/helper.py @@ -1,5 +1,7 @@ +from typing import Sequence, List + from plenum.server.pool_manager import RegistryPoolManager, TxnPoolManager -from plenum.test.test_node import checkProtocolInstanceSetup +from plenum.test.test_node import TestNode, checkProtocolInstanceSetup def check_rank_consistent_across_each_node(nodes): @@ -23,7 +25,7 @@ def check_rank_consistent_across_each_node(nodes): if isinstance(node.poolManager, RegistryPoolManager): order.append(node.poolManager.node_names_ordered_by_rank) elif isinstance(node.poolManager, TxnPoolManager): - order.append(node.poolManager.node_ids_in_ordered_by_rank) + order.append(node.poolManager.node_ids_ordered_by_rank) else: RuntimeError('Dont know this pool manager {}'. format(node.poolManager)) @@ -41,3 +43,15 @@ def check_newly_added_nodes(looper, all_nodes, new_nodes): assert all(new_node.rank > n.rank for n in old_nodes) old_nodes.append(new_node) checkProtocolInstanceSetup(looper, all_nodes, retryWait=1) + + +def getPrimaryNodesIdxs(nodes: Sequence[TestNode]) -> List[TestNode]: + primariesIdxs = [] + for instId in range(len(nodes[0].replicas)): + for idx, node in enumerate(nodes): + if node.replicas[instId].isPrimary: + assert instId == len(primariesIdxs) + primariesIdxs.append(idx) + + assert len(set(primariesIdxs)) == len(nodes[0].replicas) + return primariesIdxs diff --git a/plenum/test/primary_selection/test_primary_selection_after_primary_demotion_and_pool_restart.py b/plenum/test/primary_selection/test_primary_selection_after_primary_demotion_and_pool_restart.py new file mode 100644 index 0000000000..41b3544277 --- /dev/null +++ b/plenum/test/primary_selection/test_primary_selection_after_primary_demotion_and_pool_restart.py @@ -0,0 +1,59 @@ +from stp_core.common.log import getlogger + +from plenum.common.constants import ALIAS, SERVICES +from plenum.test.pool_transactions.conftest import looper +from plenum.test.pool_transactions.helper import updateNodeData + +from plenum.test.test_node import TestNode, checkNodesConnected, \ + ensureElectionsDone +from plenum.test.helper import checkViewNoForNodes + +from plenum.test.primary_selection.helper import getPrimaryNodesIdxs + +logger = getlogger() + +def test_primary_selection_after_primary_demotion_and_pool_restart(looper, + txnPoolNodeSet, stewardAndWalletForMasterNode, txnPoolMasterNodes, + tconf, tdirWithPoolTxns): + """ + Demote primary and restart the pool. + Pool should select new primary and have viewNo=0 after restart. + """ + + logger.info("1. turn off the node which has primary replica for master instanse") + master_node = txnPoolMasterNodes[0] + client, wallet = stewardAndWalletForMasterNode + + node_data = { + ALIAS: master_node.name, + SERVICES: [] + } + updateNodeData(looper, client, wallet, master_node, node_data) + + restNodes = [node for node in txnPoolNodeSet if node.name != master_node.name] + ensureElectionsDone(looper, restNodes) + + logger.info("2. restart pool") + # Stopping existing nodes + for node in txnPoolNodeSet: + node.stop() + looper.removeProdable(node) + + # Starting nodes again by creating `Node` objects since that simulates + # what happens when starting the node with script + restartedNodes = [] + for node in txnPoolNodeSet: + restartedNode = TestNode(node.name, basedirpath=tdirWithPoolTxns, + config=tconf, ha=node.nodestack.ha, + cliha=node.clientstack.ha) + looper.add(restartedNode) + restartedNodes.append(restartedNode) + + restNodes = [node for node in restartedNodes if node.name != master_node.name] + + looper.run(checkNodesConnected(restNodes)) + ensureElectionsDone(looper, restNodes) + checkViewNoForNodes(restNodes, 0) + + primariesIdxs = getPrimaryNodesIdxs(restNodes) + assert restNodes[primariesIdxs[0]].name != master_node.name diff --git a/plenum/test/primary_selection/test_primary_selection_after_primary_demotion_and_view_changes.py b/plenum/test/primary_selection/test_primary_selection_after_primary_demotion_and_view_changes.py new file mode 100644 index 0000000000..aaec4929da --- /dev/null +++ b/plenum/test/primary_selection/test_primary_selection_after_primary_demotion_and_view_changes.py @@ -0,0 +1,50 @@ +from stp_core.common.log import getlogger + +from plenum.common.constants import ALIAS, SERVICES + +from plenum.test.pool_transactions.conftest import looper +from plenum.test.pool_transactions.helper import updateNodeData + +from plenum.test.helper import checkViewNoForNodes +from plenum.test.test_node import ensureElectionsDone +from plenum.test.view_change.helper import ensure_view_change_complete + +logger = getlogger() + +def test_primary_selection_after_primary_demotion_and_view_changes(looper, txnPoolNodeSet, + stewardAndWalletForMasterNode, txnPoolMasterNodes): + """ + Demote primary and do multiple view changes forcing primaries rotation. + Demoted primary should be skipped without additional view changes. + """ + + viewNo0 = checkViewNoForNodes(txnPoolNodeSet) + + logger.info("1. turn off the node which has primary replica for master instanse, " + " this should trigger view change") + master_node = txnPoolMasterNodes[0] + client, wallet = stewardAndWalletForMasterNode + node_data = { + ALIAS: master_node.name, + SERVICES: [] + } + updateNodeData(looper, client, wallet, master_node, node_data) + + restNodes = [node for node in txnPoolNodeSet \ + if node.name != master_node.name] + ensureElectionsDone(looper, restNodes) + + viewNo1 = checkViewNoForNodes(restNodes) + + assert viewNo1 == viewNo0 + 1 + assert master_node.viewNo == viewNo0 + + logger.info("2. force view change 2 and check final viewNo") + ensure_view_change_complete(looper, restNodes) + viewNo2 = checkViewNoForNodes(restNodes) + assert viewNo2 == viewNo1 + 1 + + logger.info("3. force view change 3 and check final viewNo") + ensure_view_change_complete(looper, restNodes) + viewNo3 = checkViewNoForNodes(restNodes) + assert viewNo3 == viewNo2 + 1 diff --git a/plenum/test/view_change/test_new_node_joins_after_view_change.py b/plenum/test/view_change/test_new_node_joins_after_view_change.py index 9496254138..b2510c8ef3 100644 --- a/plenum/test/view_change/test_new_node_joins_after_view_change.py +++ b/plenum/test/view_change/test_new_node_joins_after_view_change.py @@ -17,6 +17,8 @@ from plenum.test.primary_selection.conftest import nodeThetaAdded, \ one_node_added +from stp_core.common.log import getlogger +logger = getlogger() @pytest.fixture(scope='module') def all_nodes_view_change( @@ -88,4 +90,5 @@ def test_old_non_primary_restart_after_view_change(new_node_in_correct_view, compare_val_to=True)) > 0 ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet) + ensureElectionsDone(looper, txnPoolNodeSet) assert not restarted_node._next_view_indications From 45da757e6b8359af06f3c3d15d5cc0c23331198e Mon Sep 17 00:00:00 2001 From: Andrey Kononykhin Date: Wed, 23 Aug 2017 17:06:27 +0300 Subject: [PATCH 2/5] fixed pep8 --- plenum/server/pool_manager.py | 18 +++++------------- plenum/server/primary_selector.py | 4 +--- 2 files changed, 6 insertions(+), 16 deletions(-) diff --git a/plenum/server/pool_manager.py b/plenum/server/pool_manager.py index 0489e09969..ac1648cd7a 100644 --- a/plenum/server/pool_manager.py +++ b/plenum/server/pool_manager.py @@ -91,7 +91,6 @@ def get_name_by_rank(self, rank) -> Optional[str]: """ - class HasPoolManager: # noinspection PyUnresolvedReferences, PyTypeChecker def __init__(self, nodeRegistry=None, ha=None, cliname=None, cliha=None): @@ -123,8 +122,6 @@ def __init__(self, node, ha=None, cliname=None, cliha=None): self.nstack, self.cstack, self.nodeReg, self.cliNodeReg = \ self.getStackParamsAndNodeReg(self.name, self.basedirpath, ha=ha, cliname=cliname, cliha=cliha) - # order nodes initially by ledger - _ = self.node_ids_ordered_by_rank self._dataFieldsValidators = ( (NODE_IP, self._isIpAddressValid), @@ -383,14 +380,11 @@ def id(self): self._id = txn[TARGET_NYM] return self._id - def _order_node(self, nodeNym, nodeName): assert self._ordered_node_ids.get(nodeNym) in (nodeName, None), ( - "{} trying to order already ordered node {} ({}) " - "with other alias {}".format( - self.name, self._ordered_node_ids.get(nodeNym), nodeNym - ) - ) + "{} trying to order already ordered node {} ({}) " + "with other alias {}".format( + self.name, self._ordered_node_ids.get(nodeNym), nodeNym)) self._ordered_node_ids[nodeNym] = nodeName @@ -403,7 +397,7 @@ def node_ids_ordered_by_rank(self) -> List: self._order_node(txn[TARGET_NYM], txn[DATA][ALIAS]) return [nym for nym, name in self._ordered_node_ids.items() - if name in self.nodeReg] + if name in self.nodeReg] def get_rank_of(self, node_id) -> Optional[int]: if self.id is None: @@ -431,8 +425,6 @@ def __init__(self, name, basedirpath, nodeRegistry, ha, cliname, cliha): nodeRegistry=nodeRegistry, ha=ha, cliname=cliname, cliha=cliha) - _ = self.node_names_ordered_by_rank - def getStackParamsAndNodeReg(self, name, basedirpath, nodeRegistry=None, ha=None, cliname=None, cliha=None): nstack, nodeReg, cliNodeReg = self.getNodeStackParams(name, @@ -524,7 +516,7 @@ def node_names_ordered_by_rank(self) -> List: return sorted(self.nodeReg.keys()) def get_rank_of(self, node_id) -> Optional[int]: - #TODO node_id here has got another meaning + # TODO node_id here has got another meaning return self._get_rank(node_id, self.node_names_ordered_by_rank) def get_name_by_rank(self, rank) -> Optional[str]: diff --git a/plenum/server/primary_selector.py b/plenum/server/primary_selector.py index f820f388a3..cd276b0c1f 100644 --- a/plenum/server/primary_selector.py +++ b/plenum/server/primary_selector.py @@ -323,9 +323,7 @@ def next_primary_node_name(self, instance_id): assert name, ("{} failed to get node name for rank {}: " "view_no {}, instance_id {}, totalNodes {}".format( self, rank, self.viewNo, instance_id, - self.node.totalNodes) - ) - + self.node.totalNodes)) return name def next_primary_replica_name(self, instance_id): From 15c19cbea7e3dc585ce842f9e5f614541cd590b4 Mon Sep 17 00:00:00 2001 From: Andrey Kononykhin Date: Wed, 23 Aug 2017 17:43:36 +0300 Subject: [PATCH 3/5] fixed nodes oreder initialization --- plenum/server/pool_manager.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/plenum/server/pool_manager.py b/plenum/server/pool_manager.py index ac1648cd7a..e3db2ecea8 100644 --- a/plenum/server/pool_manager.py +++ b/plenum/server/pool_manager.py @@ -112,13 +112,13 @@ def __init__(self, node, ha=None, cliname=None, cliha=None): self.basedirpath = node.basedirpath self._ledger = None self._id = None - self._ordered_node_ids = None TxnStackManager.__init__( self, self.name, self.basedirpath, isNode=True) self.state = self.loadState() self.reqHandler = self.getPoolReqHandler() self.initPoolState() + self._load_nodes_order_from_ledger() self.nstack, self.cstack, self.nodeReg, self.cliNodeReg = \ self.getStackParamsAndNodeReg(self.name, self.basedirpath, ha=ha, cliname=cliname, cliha=cliha) @@ -380,6 +380,12 @@ def id(self): self._id = txn[TARGET_NYM] return self._id + def _load_nodes_order_from_ledger(self): + self._ordered_node_ids = OrderedDict() + for _, txn in self.ledger.getAllTxn(): + if txn[TXN_TYPE] == NODE: + self._order_node(txn[TARGET_NYM], txn[DATA][ALIAS]) + def _order_node(self, nodeNym, nodeName): assert self._ordered_node_ids.get(nodeNym) in (nodeName, None), ( "{} trying to order already ordered node {} ({}) " @@ -390,12 +396,6 @@ def _order_node(self, nodeNym, nodeName): @property def node_ids_ordered_by_rank(self) -> List: - if self._ordered_node_ids is None: - self._ordered_node_ids = OrderedDict() - for _, txn in self.ledger.getAllTxn(): - if txn[TXN_TYPE] == NODE: - self._order_node(txn[TARGET_NYM], txn[DATA][ALIAS]) - return [nym for nym, name in self._ordered_node_ids.items() if name in self.nodeReg] From 357a48d8dd29669cec7f832640796b3ac9bd199f Mon Sep 17 00:00:00 2001 From: Andrey Kononykhin Date: Mon, 28 Aug 2017 19:01:02 +0300 Subject: [PATCH 4/5] added checks that pool is functional after recofigurations --- ..._selection_after_primary_demotion_and_pool_restart.py | 7 ++++++- ..._selection_after_primary_demotion_and_view_changes.py | 9 ++++++++- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/plenum/test/primary_selection/test_primary_selection_after_primary_demotion_and_pool_restart.py b/plenum/test/primary_selection/test_primary_selection_after_primary_demotion_and_pool_restart.py index 41b3544277..0bd873a4f2 100644 --- a/plenum/test/primary_selection/test_primary_selection_after_primary_demotion_and_pool_restart.py +++ b/plenum/test/primary_selection/test_primary_selection_after_primary_demotion_and_pool_restart.py @@ -6,7 +6,8 @@ from plenum.test.test_node import TestNode, checkNodesConnected, \ ensureElectionsDone -from plenum.test.helper import checkViewNoForNodes +from plenum.test.helper import checkViewNoForNodes, \ + sendReqsToNodesAndVerifySuffReplies from plenum.test.primary_selection.helper import getPrimaryNodesIdxs @@ -33,6 +34,9 @@ def test_primary_selection_after_primary_demotion_and_pool_restart(looper, restNodes = [node for node in txnPoolNodeSet if node.name != master_node.name] ensureElectionsDone(looper, restNodes) + # ensure pool is working properly + sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, numReqs=3) + logger.info("2. restart pool") # Stopping existing nodes for node in txnPoolNodeSet: @@ -54,6 +58,7 @@ def test_primary_selection_after_primary_demotion_and_pool_restart(looper, looper.run(checkNodesConnected(restNodes)) ensureElectionsDone(looper, restNodes) checkViewNoForNodes(restNodes, 0) + sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, numReqs=3) primariesIdxs = getPrimaryNodesIdxs(restNodes) assert restNodes[primariesIdxs[0]].name != master_node.name diff --git a/plenum/test/primary_selection/test_primary_selection_after_primary_demotion_and_view_changes.py b/plenum/test/primary_selection/test_primary_selection_after_primary_demotion_and_view_changes.py index aaec4929da..28fa6d3ac0 100644 --- a/plenum/test/primary_selection/test_primary_selection_after_primary_demotion_and_view_changes.py +++ b/plenum/test/primary_selection/test_primary_selection_after_primary_demotion_and_view_changes.py @@ -5,7 +5,8 @@ from plenum.test.pool_transactions.conftest import looper from plenum.test.pool_transactions.helper import updateNodeData -from plenum.test.helper import checkViewNoForNodes +from plenum.test.helper import checkViewNoForNodes, \ + sendReqsToNodesAndVerifySuffReplies from plenum.test.test_node import ensureElectionsDone from plenum.test.view_change.helper import ensure_view_change_complete @@ -39,8 +40,12 @@ def test_primary_selection_after_primary_demotion_and_view_changes(looper, txnPo assert viewNo1 == viewNo0 + 1 assert master_node.viewNo == viewNo0 + # ensure pool is working properly + sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, numReqs=3) + logger.info("2. force view change 2 and check final viewNo") ensure_view_change_complete(looper, restNodes) + viewNo2 = checkViewNoForNodes(restNodes) assert viewNo2 == viewNo1 + 1 @@ -48,3 +53,5 @@ def test_primary_selection_after_primary_demotion_and_view_changes(looper, txnPo ensure_view_change_complete(looper, restNodes) viewNo3 = checkViewNoForNodes(restNodes) assert viewNo3 == viewNo2 + 1 + + sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, numReqs=3) From 740ec4a26688db993f2d61b8b15861f83c5b34e4 Mon Sep 17 00:00:00 2001 From: Andrey Kononykhin Date: Mon, 28 Aug 2017 19:09:21 +0300 Subject: [PATCH 5/5] added additional view change to ensure original master is skipped --- ...tion_after_primary_demotion_and_view_changes.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/plenum/test/primary_selection/test_primary_selection_after_primary_demotion_and_view_changes.py b/plenum/test/primary_selection/test_primary_selection_after_primary_demotion_and_view_changes.py index 28fa6d3ac0..228af4111f 100644 --- a/plenum/test/primary_selection/test_primary_selection_after_primary_demotion_and_view_changes.py +++ b/plenum/test/primary_selection/test_primary_selection_after_primary_demotion_and_view_changes.py @@ -39,6 +39,8 @@ def test_primary_selection_after_primary_demotion_and_view_changes(looper, txnPo assert viewNo1 == viewNo0 + 1 assert master_node.viewNo == viewNo0 + assert len(restNodes[0].replicas) == 1 # only one instance left + assert restNodes[0].replicas[0].primaryName != master_node.name # ensure pool is working properly sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, numReqs=3) @@ -47,11 +49,23 @@ def test_primary_selection_after_primary_demotion_and_view_changes(looper, txnPo ensure_view_change_complete(looper, restNodes) viewNo2 = checkViewNoForNodes(restNodes) + assert restNodes[0].replicas[0].primaryName != master_node.name assert viewNo2 == viewNo1 + 1 + sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, numReqs=3) + logger.info("3. force view change 3 and check final viewNo") ensure_view_change_complete(looper, restNodes) viewNo3 = checkViewNoForNodes(restNodes) + assert restNodes[0].replicas[0].primaryName != master_node.name assert viewNo3 == viewNo2 + 1 sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, numReqs=3) + + logger.info("4. force view change 4 and check final viewNo") + ensure_view_change_complete(looper, restNodes) + viewNo4 = checkViewNoForNodes(restNodes) + assert restNodes[0].replicas[0].primaryName != master_node.name + assert viewNo4 == viewNo3 + 1 + + sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, numReqs=3)