diff --git a/neighsyncd/neighsync.cpp b/neighsyncd/neighsync.cpp index cb04371d41..85f35cdc2d 100644 --- a/neighsyncd/neighsync.cpp +++ b/neighsyncd/neighsync.cpp @@ -23,7 +23,8 @@ NeighSync::NeighSync(RedisPipeline *pipelineAppDB, DBConnector *stateDb, DBConne m_stateNeighRestoreTable(stateDb, STATE_NEIGH_RESTORE_TABLE_NAME), m_cfgInterfaceTable(cfgDb, CFG_INTF_TABLE_NAME), m_cfgLagInterfaceTable(cfgDb, CFG_LAG_INTF_TABLE_NAME), - m_cfgVlanInterfaceTable(cfgDb, CFG_VLAN_INTF_TABLE_NAME) + m_cfgVlanInterfaceTable(cfgDb, CFG_VLAN_INTF_TABLE_NAME), + m_cfgPeerSwitchTable(cfgDb, CFG_PEER_SWITCH_TABLE_NAME) { m_AppRestartAssist = new AppRestartAssist(pipelineAppDB, "neighsyncd", "swss", DEFAULT_NEIGHSYNC_WARMSTART_TIMER); if (m_AppRestartAssist) @@ -98,14 +99,29 @@ void NeighSync::onMsg(int nlmsg_type, struct nl_object *obj) return; } + std::vector peerSwitchKeys; bool delete_key = false; - if ((nlmsg_type == RTM_DELNEIGH) || (state == NUD_INCOMPLETE) || - (state == NUD_FAILED)) + bool use_zero_mac = false; + m_cfgPeerSwitchTable.getKeys(peerSwitchKeys); + bool is_dualtor = peerSwitchKeys.size() > 0; + if (is_dualtor && (state == NUD_INCOMPLETE || state == NUD_FAILED)) + { + use_zero_mac = true; + } + else if ((nlmsg_type == RTM_DELNEIGH) || + (state == NUD_INCOMPLETE) || (state == NUD_FAILED)) { delete_key = true; } - nl_addr2str(rtnl_neigh_get_lladdr(neigh), macStr, MAX_ADDR_SIZE); + if (use_zero_mac) + { + strncpy(macStr, "00:00:00:00:00:00", ETHER_ADDR_LEN); + } + else + { + nl_addr2str(rtnl_neigh_get_lladdr(neigh), macStr, MAX_ADDR_SIZE); + } /* Ignore neighbor entries with Broadcast Mac - Trigger for directed broadcast */ if (!delete_key && (MacAddress(macStr) == MacAddress("ff:ff:ff:ff:ff:ff"))) diff --git a/neighsyncd/neighsync.h b/neighsyncd/neighsync.h index 49a17ee6b6..e934ffcfc5 100644 --- a/neighsyncd/neighsync.h +++ b/neighsyncd/neighsync.h @@ -39,7 +39,7 @@ class NeighSync : public NetMsg Table m_stateNeighRestoreTable; ProducerStateTable m_neighTable; AppRestartAssist *m_AppRestartAssist; - Table m_cfgVlanInterfaceTable, m_cfgLagInterfaceTable, m_cfgInterfaceTable; + Table m_cfgVlanInterfaceTable, m_cfgLagInterfaceTable, m_cfgInterfaceTable, m_cfgPeerSwitchTable; bool isLinkLocalEnabled(const std::string &port); }; diff --git a/orchagent/neighorch.cpp b/orchagent/neighorch.cpp index c1eba4c0e3..c72dfcc493 100644 --- a/orchagent/neighorch.cpp +++ b/orchagent/neighorch.cpp @@ -22,6 +22,8 @@ extern Directory gDirectory; extern string gMySwitchType; extern int32_t gVoqMySwitchId; +#define MUX_TUNNEL "MuxTunnel0" + const int neighorch_pri = 30; NeighOrch::NeighOrch(DBConnector *appDb, string tableName, IntfsOrch *intfsOrch, FdbOrch *fdbOrch, PortsOrch *portsOrch, DBConnector *chassisAppDb) : @@ -728,6 +730,11 @@ void NeighOrch::doTask(Consumer &consumer) if (m_syncdNeighbors.find(neighbor_entry) == m_syncdNeighbors.end() || m_syncdNeighbors[neighbor_entry].mac != mac_address) { + if (mac_address == MacAddress("00:00:00:00:00:00")) + { + MuxOrch* mux_orch = gDirectory.get(); + mux_orch->createNextHopTunnel(MUX_TUNNEL, neighbor_entry.ip_address); + } if (addNeighbor(neighbor_entry, mac_address)) { it = consumer.m_toSync.erase(it); diff --git a/tests/test_mux.py b/tests/test_mux.py index e9eb027a9d..b7997590a3 100644 --- a/tests/test_mux.py +++ b/tests/test_mux.py @@ -20,6 +20,7 @@ class TestMuxTunnelBase(object): ASIC_NEIGH_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_NEIGHBOR_ENTRY" ASIC_NEXTHOP_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP" ASIC_ROUTE_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY" + ASIC_FDB_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_FDB_ENTRY" CONFIG_MUX_CABLE = "MUX_CABLE" SERV1_IPV4 = "192.168.0.100" @@ -664,6 +665,45 @@ def remove_and_test_tunnel(self, db, asicdb, tunnel_name): assert not self.check_interface_exists_in_asicdb(asicdb, overlay_infs_id) + def remove_link_and_test_tunnel_create(self, appdb, asicdb, confdb, dvs): + self.create_vlan_interface(confdb, asicdb, dvs) + self.create_mux_cable(confdb) + self.create_and_test_tunnel(appdb, asicdb, tunnel_name="MuxTunnel0", tunnel_type="IPINIP", + dst_ip="10.1.0.32", dscp_mode="uniform", + ecn_mode="standard", ttl_mode="pipe") + + peer_attrs = { + "address_ipv4": "10.1.0.32" + } + confdb.create_entry("PEER_SWITCH", "peer", peer_attrs) + + dvs.runcmd("ping -c 10 192.168.0.99") + route = None + routes = asicdb.get_keys(self.ASIC_ROUTE_TABLE) + for r in routes: + t = json.loads(r) + if t["dest"] == "192.168.0.99/32": + route = r + assert json.loads(route)["dest"] == "192.168.0.99/32" + + fvs1 = asicdb.get_entry(self.ASIC_ROUTE_TABLE, route) + oid = fvs1["SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID"] + fvs2 = asicdb.get_entry(self.ASIC_NEXTHOP_TABLE, oid) + assert fvs2["SAI_NEXT_HOP_ATTR_IP"] == "10.1.0.32" + + fdb = asicdb.get_keys(self.ASIC_FDB_TABLE) + mac = json.loads(fdb[0])["mac"] + dvs.runcmd("ip -4 neigh replace 192.168.0.99 lladdr "+mac+" dev Vlan1000") + time.sleep(10) + route = None + routes = asicdb.get_keys(self.ASIC_ROUTE_TABLE) + for r in routes: + t = json.loads(r) + if t["dest"] == "192.168.0.99/32": + route = r + assert route == None + + def cleanup_left_over(self, db, asicdb): """ Cleanup APP and ASIC tables """ @@ -750,6 +790,14 @@ def test_mux_metrics(self, dvs, testlog): self.create_and_test_metrics(appdb, statedb, dvs) + def test_neighbor_miss(self, dvs, testlog): + """ test IP tunnel to Active for missing neighbor """ + appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + asicdb = dvs.get_asic_db() + confdb = dvs.get_config_db() + + self.remove_link_and_test_tunnel_create(appdb, asicdb, confdb, dvs) + # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying