Skip to content

Update qos sai due to no lossless pg for some platforms #16440

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 6 commits into from
Apr 1, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions tests/qos/files/mellanox/qos_param_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -236,6 +236,11 @@ def calculate_parameters(self):
self.qos_params_mlnx['wm_pg_shared_lossy'].update(wm_shared_lossy)
wm_shared_lossy["pkts_num_margin"] = 8
self.qos_params_mlnx['wm_q_shared_lossy'].update(wm_shared_lossy)
if 'lossy_dscp' in self.egressLossyProfile:
lossy_queue['dscp'] = self.egressLossyProfile['lossy_dscp']
self.qos_params_mlnx['wm_pg_shared_lossy']['dscp'] = self.egressLossyProfile['lossy_dscp']
self.qos_params_mlnx['wm_q_shared_lossy']['dscp'] = self.egressLossyProfile['lossy_dscp']
self.qos_params_mlnx['wm_q_shared_lossy']['queue'] = self.egressLossyProfile['lossy_queue']

wm_buf_pool_lossless = self.qos_params_mlnx['wm_buf_pool_lossless']
wm_buf_pool_lossless['pkts_num_trig_pfc'] = pkts_num_trig_pfc
Expand Down
8 changes: 4 additions & 4 deletions tests/qos/files/mellanox/special_qos_config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,12 +27,12 @@ qos_params:
xon_4:
packet_size: 800
lossy_queue_1:
packet_size: 800
packet_size: 1200
wm_pg_shared_lossless:
packet_size: 800
pkts_num_margin: 7
wm_pg_shared_lossy:
packet_size: 800
pkts_num_margin: 5
packet_size: 1200
pkts_num_margin: 8
wm_q_shared_lossy:
packet_size: 800
packet_size: 1200
54 changes: 46 additions & 8 deletions tests/qos/qos_sai_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -346,8 +346,24 @@ def __getBufferProfile(self, request, dut_asic, os_version, table, port, priorit
else:
bufferProfileName = out.translate({ord(i): None for i in '[]'})
else:
bufferProfileName = bufkeystr + dut_asic.run_redis_cmd(
argv=["redis-cli", "-n", db, "HGET", keystr, "profile"])[0]
profile_content = dut_asic.run_redis_cmd(argv=["redis-cli", "-n", db, "HGET", keystr, "profile"])
if profile_content:
bufferProfileName = bufkeystr + profile_content[0]
else:
logger.info("No lossless buffer. To compatible the existing case, return dump bufferProfilfe")
dump_buffer_profile = {
"profileName": f"{bufkeystr}pg_lossless_0_0m_profile",
"pool": "ingress_lossless_pool",
"xon": "0",
"xoff": "0",
"size": "0",
"dynamic_th": "0",
"pg_q_alpha": "0",
"port_alpha": "0",
"pool_size": "0",
"static_th": "0"
}
return dump_buffer_profile

result = dut_asic.run_redis_cmd(
argv=["redis-cli", "-n", db, "HGETALL", bufferProfileName]
Expand Down Expand Up @@ -1781,10 +1797,13 @@ def releaseAllPorts(
Raises:
RunAnsibleModuleFail if ptf test fails
"""
self.runPtfTest(
ptfhost, testCase="sai_qos_tests.ReleaseAllPorts",
testParams=dutTestParams["basicParams"]
)
if isMellanoxDevice(duthosts[0]):
logger.info("skip reaseAllports fixture for mellanox device")
else:
self.runPtfTest(
ptfhost, testCase="sai_qos_tests.ReleaseAllPorts",
testParams=dutTestParams["basicParams"]
)

def __loadSwssConfig(self, duthost):
"""
Expand Down Expand Up @@ -2107,12 +2126,25 @@ def egressLossyProfile(

srcport = dutConfig["dutInterfaces"][dutConfig["testPorts"]["src_port_id"]]

is_lossy_queue_only = False

if srcport in dualtor_ports_for_duts:
queues = "0-1"
else:
queues = "0-2"
if isMellanoxDevice(duthost):
cable_len = dut_asic.shell(f"redis-cli -n 4 hget 'CABLE_LENGTH|AZURE' {srcport}")['stdout']
if cable_len == '0m':
is_lossy_queue_only = True
logger.info(f"{srcport} has only lossy queue")
if is_lossy_queue_only:
is_lossy_queue_only = True
queue_table_postfix_list = ['1-3', '4', '5']
queue_to_dscp_map = {'1-3': '1', '4': '11', '5': '31'}
queues = random.choice(queue_table_postfix_list)
else:
queues = "0-2"

yield self.__getBufferProfile(
egress_lossy_profile = self.__getBufferProfile(
request,
dut_asic,
duthost.os_version,
Expand All @@ -2121,6 +2153,12 @@ def egressLossyProfile(
srcport,
queues
)
if is_lossy_queue_only:
egress_lossy_profile['lossy_dscp'] = queue_to_dscp_map[queues]
egress_lossy_profile['lossy_queue'] = '1' if queues == '1-3' else queues
logger.info(f"queues:{queues}, egressLossyProfile: {egress_lossy_profile}")

yield egress_lossy_profile

@pytest.fixture(scope='class')
def losslessSchedProfile(
Expand Down
20 changes: 19 additions & 1 deletion tests/qos/test_qos_sai.py
Original file line number Diff line number Diff line change
Expand Up @@ -175,6 +175,11 @@ def check_skip_xon_hysteresis_test(xonHysteresisKey, dutQosConfig,
" Pls see qos.yaml for the port idx's that are needed.")


def skip_test_on_no_lossless_pg(portSpeedCableLength):
if portSpeedCableLength == "0_0m":
pytest.skip("skip the test due to no buffer lossless pg")


class TestQosSai(QosSaiBase):
"""TestQosSai derives from QosSaiBase and contains collection of QoS SAI test cases.

Expand Down Expand Up @@ -411,6 +416,7 @@ def testQosSaiPfcXoffLimit(
"Additional DSCPs are not supported on non-dual ToR ports")

portSpeedCableLength = dutQosConfig["portSpeedCableLength"]
skip_test_on_no_lossless_pg(portSpeedCableLength)
if dutTestParams['hwsku'] in self.BREAKOUT_SKUS and 'backend' not in dutTestParams['topo']:
qosConfig = dutQosConfig["param"][portSpeedCableLength]["breakout"]
else:
Expand Down Expand Up @@ -508,6 +514,7 @@ def testPfcStormWithSharedHeadroomOccupancy(
pytest.skip("Shared Headroom has to be enabled for this test")

portSpeedCableLength = dutQosConfig["portSpeedCableLength"]
skip_test_on_no_lossless_pg(portSpeedCableLength)
if xonProfile in list(dutQosConfig["param"][portSpeedCableLength].keys()):
qosConfig = dutQosConfig["param"][portSpeedCableLength]
else:
Expand Down Expand Up @@ -681,6 +688,7 @@ def testQosSaiPfcXonLimit(
"Additional DSCPs are not supported on non-dual ToR ports")

portSpeedCableLength = dutQosConfig["portSpeedCableLength"]
skip_test_on_no_lossless_pg(portSpeedCableLength)
if xonProfile in list(dutQosConfig["param"][portSpeedCableLength].keys()):
qosConfig = dutQosConfig["param"][portSpeedCableLength]
else:
Expand Down Expand Up @@ -858,6 +866,7 @@ def testQosSaiHeadroomPoolSize(
"""

portSpeedCableLength = dutQosConfig["portSpeedCableLength"]
skip_test_on_no_lossless_pg(portSpeedCableLength)
qosConfig = dutQosConfig["param"][portSpeedCableLength]
testPortIps = dutConfig["testPortIps"]

Expand Down Expand Up @@ -1616,6 +1625,7 @@ def testQosSaiDwrr(
RunAnsibleModuleFail if ptf test fails
"""
portSpeedCableLength = dutQosConfig["portSpeedCableLength"]
skip_test_on_no_lossless_pg(portSpeedCableLength)
qosConfig = dutQosConfig["param"]
if "wrr" in qosConfig[portSpeedCableLength]:
qosConfigWrr = qosConfig[portSpeedCableLength]["wrr"]
Expand Down Expand Up @@ -1695,6 +1705,9 @@ def testQosSaiPgSharedWatermark(
"""

portSpeedCableLength = dutQosConfig["portSpeedCableLength"]
if pgProfile == "wm_pg_shared_lossless":
skip_test_on_no_lossless_pg(portSpeedCableLength)

if pgProfile in list(dutQosConfig["param"][portSpeedCableLength].keys()):
qosConfig = dutQosConfig["param"][portSpeedCableLength]
else:
Expand Down Expand Up @@ -1798,6 +1811,7 @@ def testQosSaiPgHeadroomWatermark(
RunAnsibleModuleFail if ptf test fails
"""
portSpeedCableLength = dutQosConfig["portSpeedCableLength"]
skip_test_on_no_lossless_pg(portSpeedCableLength)
if dutTestParams['hwsku'] in self.BREAKOUT_SKUS and 'backend' not in dutTestParams['topo']:
qosConfig = dutQosConfig["param"][portSpeedCableLength]["breakout"]
else:
Expand Down Expand Up @@ -1910,6 +1924,8 @@ def testQosSaiQSharedWatermark(
RunAnsibleModuleFail if ptf test fails
"""
portSpeedCableLength = dutQosConfig["portSpeedCableLength"]
if queueProfile == "wm_q_shared_lossless":
skip_test_on_no_lossless_pg(portSpeedCableLength)

if queueProfile == "wm_q_shared_lossless":
if dutTestParams["basicParams"]["sonic_asic_type"] == 'cisco-8000':
Expand Down Expand Up @@ -1972,7 +1988,7 @@ def testQosSaiQSharedWatermark(

def testQosSaiDscpToPgMapping(
self, get_src_dst_asic_and_duts, duthost, request, ptfhost, dutTestParams, dutConfig, dut_qos_maps, # noqa F811
change_lag_lacp_timer):
change_lag_lacp_timer, dutQosConfig):
"""
Test QoS SAI DSCP to PG mapping ptf test

Expand All @@ -1990,6 +2006,8 @@ def testQosSaiDscpToPgMapping(
RunAnsibleModuleFail if ptf test fails
"""
disableTest = request.config.getoption("--disable_test")
portSpeedCableLength = dutQosConfig["portSpeedCableLength"]
skip_test_on_no_lossless_pg(portSpeedCableLength)
if dutTestParams["basicParams"]["sonic_asic_type"] == 'cisco-8000' or \
('platform_asic' in dutTestParams["basicParams"] and
dutTestParams["basicParams"]["platform_asic"] in ["broadcom-dnx", "mellanox"]):
Expand Down
20 changes: 17 additions & 3 deletions tests/saitests/py3/sai_base_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@
"5": "scheduler.0",
"6": "scheduler.0",
"7": ""}
BLOCK_DATA_PLANE_SCHEDULER_NAME = 'scheduler.block_data_plane'


class ThriftInterface(BaseTest):
Expand Down Expand Up @@ -238,16 +239,15 @@ def disable_mellanox_egress_data_plane(self, ptf_port_list):
dut_port = self.get_dut_port(ptf_port)
dut_port_list.append(dut_port)
self.original_dut_port_queue_scheduler_map = self.get_queue_scheduler_name(dut_port_list)
block_data_plane_scheduler_name = 'scheduler.block_data_plane'
cmd_set_block_data_plane_scheduler = \
f'sonic-db-cli CONFIG_DB hset "SCHEDULER|{block_data_plane_scheduler_name}" "type" DWRR "weight" 15 "pir" 1'
f'sonic-db-cli CONFIG_DB hset "SCHEDULER|{BLOCK_DATA_PLANE_SCHEDULER_NAME}" "type" DWRR "weight" 15 "pir" 1'

self.exec_cmd_on_dut(self.server, self.test_params['dut_username'], self.test_params['dut_password'],
cmd_set_block_data_plane_scheduler)
for dut_port in dut_port_list:
for q in DATA_PLANE_QUEUE_LIST:
cmd_block_q = \
f" sonic-db-cli CONFIG_DB hset 'QUEUE|{dut_port}|{q}' scheduler {block_data_plane_scheduler_name}"
f" sonic-db-cli CONFIG_DB hset 'QUEUE|{dut_port}|{q}' scheduler {BLOCK_DATA_PLANE_SCHEDULER_NAME}"
self.exec_cmd_on_dut(
self.server, self.test_params['dut_username'], self.test_params['dut_password'], cmd_block_q)

Expand Down Expand Up @@ -276,6 +276,20 @@ def enable_mellanox_egress_data_plane(self, ptf_port_list):
self.exec_cmd_on_dut(
self.server, self.test_params['dut_username'],
self.test_params['dut_password'], cmd_recover_q_scheduler_config)
self.remove_block_data_plan_scheduler()

def remove_block_data_plan_scheduler(self):
get_block_data_plane_scheduler_name = \
f"sonic-db-cli CONFIG_DB keys 'SCHEDULER|{BLOCK_DATA_PLANE_SCHEDULER_NAME}'"
scheduler_name, _, _ = self.exec_cmd_on_dut(self.server,
self.test_params['dut_username'],
self.test_params['dut_password'],
get_block_data_plane_scheduler_name)
if isinstance(scheduler_name, list) and BLOCK_DATA_PLANE_SCHEDULER_NAME in scheduler_name[0]:
cmd_del_block_data_plane_scheduler = \
f'sonic-db-cli CONFIG_DB del "SCHEDULER|{BLOCK_DATA_PLANE_SCHEDULER_NAME}"'
self.exec_cmd_on_dut(self.server, self.test_params['dut_username'], self.test_params['dut_password'],
cmd_del_block_data_plane_scheduler)


class ThriftInterfaceDataPlane(ThriftInterface):
Expand Down
2 changes: 1 addition & 1 deletion tests/saitests/py3/sai_qos_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -5193,7 +5193,7 @@ def runTest(self):
assert (q_wm_res[queue] <= (margin + 1) * cell_size)
elif pkts_num_fill_min:
assert (q_wm_res[queue] == 0)
elif 'cisco-8000' in asic_type or "SN5600" in hwsku or "SN5400" in hwsku:
elif 'cisco-8000' in asic_type or "SN56" in hwsku or "SN5400" in hwsku:
assert (q_wm_res[queue] <= (margin + 1) * cell_size)
else:
if platform_asic and platform_asic == "broadcom-dnx":
Expand Down
Loading