@@ -675,7 +675,6 @@ namespace portsorch_test
675
675
_unhook_sai_queue_api ();
676
676
}
677
677
678
- /*
679
678
TEST_F (PortsOrchTest, PfcZeroBufferHandler)
680
679
{
681
680
Table portTable = Table (m_app_db.get (), APP_PORT_TABLE_NAME);
@@ -684,109 +683,111 @@ namespace portsorch_test
684
683
Table poolTable = Table (m_app_db.get (), APP_BUFFER_POOL_TABLE_NAME);
685
684
Table queueTable = Table (m_app_db.get (), APP_BUFFER_QUEUE_TABLE_NAME);
686
685
687
- // Get SAI default ports to populate DB
688
- auto ports = ut_helper::getInitialSaiPorts();
689
-
690
- // Populate port table with SAI ports
691
- for (const auto &it : ports)
686
+ if (gMySwitchType != " voq" )
692
687
{
693
- portTable.set(it.first, it.second);
694
- }
688
+ // Get SAI default ports to populate DB
689
+ auto ports = ut_helper::getInitialSaiPorts ();
695
690
696
- // Set PortConfigDone, PortInitDone
697
- portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } });
698
- portTable.set("PortInitDone", { { "lanes", "0" } });
691
+ // Populate port table with SAI ports
692
+ for (const auto &it : ports)
693
+ {
694
+ portTable.set (it.first , it.second );
695
+ }
699
696
700
- // refill consumer
701
- gPortsOrch->addExistingData(&portTable);
697
+ // Set PortConfigDone, PortInitDone
698
+ portTable.set (" PortConfigDone" , { { " count" , to_string (ports.size ()) } });
699
+ portTable.set (" PortInitDone" , { { " lanes" , " 0" } });
702
700
703
- // Apply configuration :
704
- // create ports
701
+ // refill consumer
702
+ gPortsOrch -> addExistingData (&portTable);
705
703
706
- static_cast<Orch *>(gPortsOrch)->doTask();
704
+ // Apply configuration :
705
+ // create ports
707
706
708
- // Apply configuration
709
- // ports
710
- static_cast<Orch *>(gPortsOrch)->doTask();
707
+ static_cast <Orch *>(gPortsOrch )->doTask ();
711
708
712
- ASSERT_TRUE(gPortsOrch->allPortsReady());
709
+ // Apply configuration
710
+ // ports
711
+ static_cast <Orch *>(gPortsOrch )->doTask ();
713
712
714
- // No more tasks
715
- vector<string> ts;
716
- gPortsOrch->dumpPendingTasks(ts);
717
- ASSERT_TRUE(ts.empty());
718
- ts.clear();
713
+ ASSERT_TRUE (gPortsOrch ->allPortsReady ());
719
714
720
- // Simulate storm drop handler started on Ethernet0 TC 3
721
- Port port;
722
- gPortsOrch->getPort("Ethernet0", port);
715
+ // No more tasks
716
+ vector<string> ts;
717
+ gPortsOrch ->dumpPendingTasks (ts);
718
+ ASSERT_TRUE (ts.empty ());
719
+ ts.clear ();
723
720
724
- auto countersTable = make_shared<Table>(m_counters_db.get(), COUNTERS_TABLE);
725
- auto dropHandler = make_unique<PfcWdZeroBufferHandler>(port.m_port_id, port.m_queue_ids[3], 3, countersTable);
721
+ // Simulate storm drop handler started on Ethernet0 TC 3
722
+ Port port;
723
+ gPortsOrch ->getPort (" Ethernet0" , port);
726
724
727
- // Create test buffer pool
728
- poolTable.set(
729
- "egress_pool",
730
- {
731
- { "type", "egress" },
732
- { "mode", "dynamic" },
733
- { "size", "4200000" },
734
- });
735
- poolTable.set(
736
- "ingress_pool",
737
- {
738
- { "type", "ingress" },
739
- { "mode", "dynamic" },
740
- { "size", "4200000" },
741
- });
725
+ auto countersTable = make_shared<Table>(m_counters_db.get (), COUNTERS_TABLE);
726
+ auto dropHandler = make_unique<PfcWdZeroBufferHandler>(port.m_port_id , port.m_queue_ids [3 ], 3 , countersTable);
742
727
743
- // Create test buffer profile
744
- profileTable.set("ingress_profile", { { "pool", "ingress_pool" },
745
- { "xon", "14832" },
746
- { "xoff", "14832" },
747
- { "size", "35000" },
748
- { "dynamic_th", "0" } });
749
- profileTable.set("egress_profile", { { "pool", "egress_pool" },
750
- { "size", "0" },
751
- { "dynamic_th", "0" } });
752
-
753
- // Apply profile on Queue and PGs 3-4 all ports
754
- for (const auto &it : ports)
755
- {
756
- std::ostringstream oss;
757
- oss << it.first << ":3-4";
758
- pgTable.set(oss.str(), { { "profile", "ingress_profile" } });
759
- queueTable.set(oss.str(), { {"profile", "egress_profile" } });
760
- }
761
- gBufferOrch->addExistingData(&pgTable);
762
- gBufferOrch->addExistingData(&poolTable);
763
- gBufferOrch->addExistingData(&profileTable);
764
- gBufferOrch->addExistingData(&queueTable);
728
+ // Create test buffer pool
729
+ poolTable.set (
730
+ " egress_pool" ,
731
+ {
732
+ { " type" , " egress" },
733
+ { " mode" , " dynamic" },
734
+ { " size" , " 4200000" },
735
+ });
736
+ poolTable.set (
737
+ " ingress_pool" ,
738
+ {
739
+ { " type" , " ingress" },
740
+ { " mode" , " dynamic" },
741
+ { " size" , " 4200000" },
742
+ });
743
+
744
+ // Create test buffer profile
745
+ profileTable.set (" ingress_profile" , { { " pool" , " ingress_pool" },
746
+ { " xon" , " 14832" },
747
+ { " xoff" , " 14832" },
748
+ { " size" , " 35000" },
749
+ { " dynamic_th" , " 0" } });
750
+ profileTable.set (" egress_profile" , { { " pool" , " egress_pool" },
751
+ { " size" , " 0" },
752
+ { " dynamic_th" , " 0" } });
753
+
754
+ // Apply profile on Queue and PGs 3-4 all ports
755
+ for (const auto &it : ports)
756
+ {
757
+ std::ostringstream oss;
758
+ oss << it.first << " :3-4" ;
759
+ pgTable.set (oss.str (), { { " profile" , " ingress_profile" } });
760
+ queueTable.set (oss.str (), { {" profile" , " egress_profile" } });
761
+ }
762
+ gBufferOrch ->addExistingData (&pgTable);
763
+ gBufferOrch ->addExistingData (&poolTable);
764
+ gBufferOrch ->addExistingData (&profileTable);
765
+ gBufferOrch ->addExistingData (&queueTable);
765
766
766
- // process pool, profile and Q's
767
- static_cast<Orch *>(gBufferOrch)->doTask();
767
+ // process pool, profile and Q's
768
+ static_cast <Orch *>(gBufferOrch )->doTask ();
768
769
769
- auto queueConsumer = static_cast<Consumer*>(gBufferOrch->getExecutor(APP_BUFFER_QUEUE_TABLE_NAME));
770
- queueConsumer->dumpPendingTasks(ts);
771
- ASSERT_FALSE(ts.empty()); // Queue is skipped
772
- ts.clear();
770
+ auto queueConsumer = static_cast <Consumer*>(gBufferOrch ->getExecutor (APP_BUFFER_QUEUE_TABLE_NAME));
771
+ queueConsumer->dumpPendingTasks (ts);
772
+ ASSERT_FALSE (ts.empty ()); // Queue is skipped
773
+ ts.clear ();
773
774
774
- auto pgConsumer = static_cast<Consumer*>(gBufferOrch->getExecutor(APP_BUFFER_PG_TABLE_NAME));
775
- pgConsumer->dumpPendingTasks(ts);
776
- ASSERT_TRUE(ts.empty()); // PG Notification is not skipped
777
- ts.clear();
775
+ auto pgConsumer = static_cast <Consumer*>(gBufferOrch ->getExecutor (APP_BUFFER_PG_TABLE_NAME));
776
+ pgConsumer->dumpPendingTasks (ts);
777
+ ASSERT_TRUE (ts.empty ()); // PG Notification is not skipped
778
+ ts.clear ();
778
779
779
- // release zero buffer drop handler
780
- dropHandler.reset();
780
+ // release zero buffer drop handler
781
+ dropHandler.reset ();
781
782
782
- // process queue
783
- static_cast<Orch *>(gBufferOrch)->doTask();
783
+ // process queue
784
+ static_cast <Orch *>(gBufferOrch )->doTask ();
784
785
785
- queueConsumer->dumpPendingTasks(ts);
786
- ASSERT_TRUE(ts.empty()); // queue should be processed now
787
- ts.clear();
786
+ queueConsumer->dumpPendingTasks (ts);
787
+ ASSERT_TRUE (ts.empty ()); // queue should be processed now
788
+ ts.clear ();
789
+ }
788
790
}
789
- */
790
791
791
792
/* This test checks that a LAG member validation happens on orchagent level
792
793
* and no SAI call is executed in case a port requested to be a LAG member
0 commit comments