Skip to content

Commit b5209c4

Browse files
prsunnydaall
authored andcommitted
Handle IPv6 and ECMP routes to be programmed to ASIC (#1711)
*In case of standby mux, associated routes may come to orch with ifname of tun0. Handle the case when nexthop is non-zero *For ECMP, multiple nexthop IPs can have the same sai nexthop id (tunnel NH). Existing data structure is unable to handle such case. Added a secondary map if nexthops are shared
1 parent 515cc1a commit b5209c4

File tree

5 files changed

+110
-26
lines changed

5 files changed

+110
-26
lines changed

cfgmgr/tunnelmgr.cpp

+30-8
Original file line numberDiff line numberDiff line change
@@ -64,21 +64,43 @@ static int cmdIpTunnelRouteAdd(const std::string& pfx, std::string & res)
6464
// ip route add/replace {{ip prefix}} dev {{tunnel intf}}
6565
// Replace route if route already exists
6666
ostringstream cmd;
67-
cmd << IP_CMD " route replace "
68-
<< shellquote(pfx)
69-
<< " dev "
70-
<< TUNIF;
67+
if (IpPrefix(pfx).isV4())
68+
{
69+
cmd << IP_CMD " route replace "
70+
<< shellquote(pfx)
71+
<< " dev "
72+
<< TUNIF;
73+
}
74+
else
75+
{
76+
cmd << IP_CMD " -6 route replace "
77+
<< shellquote(pfx)
78+
<< " dev "
79+
<< TUNIF;
80+
}
81+
7182
return swss::exec(cmd.str(), res);
7283
}
7384

7485
static int cmdIpTunnelRouteDel(const std::string& pfx, std::string & res)
7586
{
7687
// ip route del {{ip prefix}} dev {{tunnel intf}}
7788
ostringstream cmd;
78-
cmd << IP_CMD " route del "
79-
<< shellquote(pfx)
80-
<< " dev "
81-
<< TUNIF;
89+
if (IpPrefix(pfx).isV4())
90+
{
91+
cmd << IP_CMD " route del "
92+
<< shellquote(pfx)
93+
<< " dev "
94+
<< TUNIF;
95+
}
96+
else
97+
{
98+
cmd << IP_CMD " -6 route del "
99+
<< shellquote(pfx)
100+
<< " dev "
101+
<< TUNIF;
102+
}
103+
82104
return swss::exec(cmd.str(), res);
83105
}
84106

orchagent/muxorch.cpp

-12
Original file line numberDiff line numberDiff line change
@@ -1278,12 +1278,6 @@ void MuxCableOrch::updateMuxState(string portName, string muxState)
12781278

12791279
void MuxCableOrch::addTunnelRoute(const NextHopKey &nhKey)
12801280
{
1281-
if (!nhKey.ip_address.isV4())
1282-
{
1283-
SWSS_LOG_INFO("IPv6 tunnel route add '%s' - (Not Implemented)", nhKey.ip_address.to_string().c_str());
1284-
return;
1285-
}
1286-
12871281
vector<FieldValueTuple> data;
12881282
string key, alias = nhKey.alias;
12891283

@@ -1299,12 +1293,6 @@ void MuxCableOrch::addTunnelRoute(const NextHopKey &nhKey)
12991293

13001294
void MuxCableOrch::removeTunnelRoute(const NextHopKey &nhKey)
13011295
{
1302-
if (!nhKey.ip_address.isV4())
1303-
{
1304-
SWSS_LOG_INFO("IPv6 tunnel route remove '%s' - (Not Implemented)", nhKey.ip_address.to_string().c_str());
1305-
return;
1306-
}
1307-
13081296
string key, alias = nhKey.alias;
13091297

13101298
IpPrefix pfx = nhKey.ip_address.to_string();

orchagent/neighorch.cpp

+4-1
Original file line numberDiff line numberDiff line change
@@ -284,7 +284,10 @@ bool NeighOrch::isNextHopFlagSet(const NextHopKey &nexthop, const uint32_t nh_fl
284284

285285
auto nhop = m_syncdNextHops.find(nexthop);
286286

287-
assert(nhop != m_syncdNextHops.end());
287+
if (nhop == m_syncdNextHops.end())
288+
{
289+
return false;
290+
}
288291

289292
if (nhop->second.nh_flags & nh_flag)
290293
{

orchagent/routeorch.cpp

+37-5
Original file line numberDiff line numberDiff line change
@@ -566,7 +566,7 @@ void RouteOrch::doTask(Consumer& consumer)
566566
* way is to create loopback interface and then create
567567
* route pointing to it, so that we can traps packets to
568568
* CPU */
569-
if (alias == "eth0" || alias == "docker0" || alias == "tun0" ||
569+
if (alias == "eth0" || alias == "docker0" ||
570570
alias == "lo" || !alias.compare(0, strlen(LOOPBACK_PREFIX), LOOPBACK_PREFIX))
571571
{
572572
excp_intfs_flag = true;
@@ -591,10 +591,18 @@ void RouteOrch::doTask(Consumer& consumer)
591591

592592
if (overlay_nh == false)
593593
{
594+
if (alsv[0] == "tun0" && !(IpAddress(ipv[0]).isZero()))
595+
{
596+
alsv[0] = gIntfsOrch->getRouterIntfsAlias(ipv[0]);
597+
}
594598
nhg_str = ipv[0] + NH_DELIMITER + alsv[0];
595599

596600
for (uint32_t i = 1; i < ipv.size(); i++)
597601
{
602+
if (alsv[i] == "tun0" && !(IpAddress(ipv[i]).isZero()))
603+
{
604+
alsv[i] = gIntfsOrch->getRouterIntfsAlias(ipv[i]);
605+
}
598606
nhg_str += NHG_DELIMITER + ipv[i] + NH_DELIMITER + alsv[i];
599607
}
600608

@@ -620,6 +628,11 @@ void RouteOrch::doTask(Consumer& consumer)
620628
/* add addBlackholeRoute or addRoute support empty nhg */
621629
it = consumer.m_toSync.erase(it);
622630
}
631+
/* skip direct routes to tun0 */
632+
else if (alsv[0] == "tun0")
633+
{
634+
it = consumer.m_toSync.erase(it);
635+
}
623636
/* directly connected route to VRF interface which come from kernel */
624637
else if (!alsv[0].compare(0, strlen(VRF_PREFIX), VRF_PREFIX))
625638
{
@@ -993,6 +1006,7 @@ bool RouteOrch::addNextHopGroup(const NextHopGroupKey &nexthops)
9931006
vector<sai_object_id_t> next_hop_ids;
9941007
set<NextHopKey> next_hop_set = nexthops.getNextHops();
9951008
std::map<sai_object_id_t, NextHopKey> nhopgroup_members_set;
1009+
std::map<sai_object_id_t, set<NextHopKey>> nhopgroup_shared_set;
9961010

9971011
/* Assert each IP address exists in m_syncdNextHops table,
9981012
* and add the corresponding next_hop_id to next_hop_ids. */
@@ -1013,7 +1027,14 @@ bool RouteOrch::addNextHopGroup(const NextHopGroupKey &nexthops)
10131027

10141028
sai_object_id_t next_hop_id = m_neighOrch->getNextHopId(it);
10151029
next_hop_ids.push_back(next_hop_id);
1016-
nhopgroup_members_set[next_hop_id] = it;
1030+
if (nhopgroup_members_set.find(next_hop_id) == nhopgroup_members_set.end())
1031+
{
1032+
nhopgroup_members_set[next_hop_id] = it;
1033+
}
1034+
else
1035+
{
1036+
nhopgroup_shared_set[next_hop_id].insert(it);
1037+
}
10171038
}
10181039

10191040
sai_attribute_t nhg_attr;
@@ -1083,8 +1104,20 @@ bool RouteOrch::addNextHopGroup(const NextHopGroupKey &nexthops)
10831104
gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_NEXTHOP_GROUP_MEMBER);
10841105

10851106
// Save the membership into next hop structure
1086-
next_hop_group_entry.nhopgroup_members[nhopgroup_members_set.find(nhid)->second] =
1087-
nhgm_id;
1107+
if (nhopgroup_shared_set.find(nhid) != nhopgroup_shared_set.end())
1108+
{
1109+
auto it = nhopgroup_shared_set[nhid].begin();
1110+
next_hop_group_entry.nhopgroup_members[*it] = nhgm_id;
1111+
nhopgroup_shared_set[nhid].erase(it);
1112+
if (nhopgroup_shared_set[nhid].empty())
1113+
{
1114+
nhopgroup_shared_set.erase(nhid);
1115+
}
1116+
}
1117+
else
1118+
{
1119+
next_hop_group_entry.nhopgroup_members[nhopgroup_members_set.find(nhid)->second] = nhgm_id;
1120+
}
10881121
}
10891122

10901123
/* Increment the ref_count for the next hops used by the next hop group. */
@@ -1098,7 +1131,6 @@ bool RouteOrch::addNextHopGroup(const NextHopGroupKey &nexthops)
10981131
next_hop_group_entry.ref_count = 0;
10991132
m_syncdNextHopGroups[nexthops] = next_hop_group_entry;
11001133

1101-
11021134
return true;
11031135
}
11041136

tests/test_mux.py

+39
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,7 @@ def create_vlan_interface(self, confdb, asicdb, dvs):
5959
fvs = {"NULL": "NULL"}
6060
confdb.create_entry("VLAN_INTERFACE", "Vlan1000", fvs)
6161
confdb.create_entry("VLAN_INTERFACE", "Vlan1000|192.168.0.1/24", fvs)
62+
confdb.create_entry("VLAN_INTERFACE", "Vlan1000|fc02:1000::1/64", fvs)
6263

6364
dvs.runcmd("config interface startup Ethernet0")
6465
dvs.runcmd("config interface startup Ethernet4")
@@ -334,6 +335,44 @@ def create_and_test_route(self, appdb, asicdb, dvs, dvs_route):
334335
self.set_mux_state(appdb, "Ethernet4", "active")
335336
self.check_nexthop_group_in_asic_db(asicdb, rtkeys[0])
336337

338+
ps._del(rtprefix)
339+
340+
# Test IPv6 ECMP routes and start with standby config
341+
self.set_mux_state(appdb, "Ethernet0", "standby")
342+
self.set_mux_state(appdb, "Ethernet4", "standby")
343+
344+
rtprefix = "2020::/64"
345+
346+
dvs_route.check_asicdb_deleted_route_entries([rtprefix])
347+
348+
ps = swsscommon.ProducerStateTable(pdb.db_connection, "ROUTE_TABLE")
349+
350+
fvs = swsscommon.FieldValuePairs([("nexthop", self.SERV1_IPV6 + "," + self.SERV2_IPV6), ("ifname", "tun0,tun0")])
351+
352+
ps.set(rtprefix, fvs)
353+
354+
# Check if route was propagated to ASIC DB
355+
rtkeys = dvs_route.check_asicdb_route_entries([rtprefix])
356+
357+
# Check for nexthop group and validate nexthop group member in asic db
358+
self.check_nexthop_group_in_asic_db(asicdb, rtkeys[0], 2)
359+
360+
# Step: 1 - Change one NH to active and verify ecmp route
361+
self.set_mux_state(appdb, "Ethernet0", "active")
362+
self.check_nexthop_group_in_asic_db(asicdb, rtkeys[0], 1)
363+
364+
# Step: 2 - Change the other NH to active and verify ecmp route
365+
self.set_mux_state(appdb, "Ethernet4", "active")
366+
self.check_nexthop_group_in_asic_db(asicdb, rtkeys[0])
367+
368+
# Step: 3 - Change one NH to back to standby and verify ecmp route
369+
self.set_mux_state(appdb, "Ethernet0", "standby")
370+
self.check_nexthop_group_in_asic_db(asicdb, rtkeys[0], 1)
371+
372+
# Step: 4 - Change the other NH to standby and verify ecmp route
373+
self.set_mux_state(appdb, "Ethernet4", "standby")
374+
self.check_nexthop_group_in_asic_db(asicdb, rtkeys[0], 2)
375+
337376

338377
def get_expected_sai_qualifiers(self, portlist, dvs_acl):
339378
expected_sai_qualifiers = {

0 commit comments

Comments
 (0)