|
14 | 14 | #include <sstream>
|
15 | 15 |
|
16 | 16 | extern redisReply *mockReply;
|
17 |
| -extern string gMySwitchType; |
18 | 17 |
|
19 | 18 | namespace portsorch_test
|
20 | 19 | {
|
@@ -725,110 +724,100 @@ namespace portsorch_test
|
725 | 724 | Table poolTable = Table(m_app_db.get(), APP_BUFFER_POOL_TABLE_NAME);
|
726 | 725 | Table queueTable = Table(m_app_db.get(), APP_BUFFER_QUEUE_TABLE_NAME);
|
727 | 726 |
|
728 |
| - if(gMySwitchType != "voq") |
729 |
| - { |
730 |
| - // Get SAI default ports to populate DB |
731 |
| - auto ports = ut_helper::getInitialSaiPorts(); |
| 727 | + // Get SAI default ports to populate DB |
| 728 | + auto ports = ut_helper::getInitialSaiPorts(); |
732 | 729 |
|
733 |
| - // Populate port table with SAI ports |
734 |
| - for (const auto &it : ports) |
735 |
| - { |
736 |
| - portTable.set(it.first, it.second); |
737 |
| - } |
| 730 | + // Populate port table with SAI ports |
| 731 | + for (const auto &it : ports) |
| 732 | + { |
| 733 | + portTable.set(it.first, it.second); |
| 734 | + } |
738 | 735 |
|
739 |
| - // Set PortConfigDone, PortInitDone |
740 |
| - portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); |
741 |
| - portTable.set("PortInitDone", { { "lanes", "0" } }); |
| 736 | + // Set PortConfigDone, PortInitDone |
| 737 | + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); |
| 738 | + portTable.set("PortInitDone", { { "lanes", "0" } }); |
742 | 739 |
|
743 |
| - // refill consumer |
744 |
| - gPortsOrch->addExistingData(&portTable); |
| 740 | + // refill consumer |
| 741 | + gPortsOrch->addExistingData(&portTable); |
745 | 742 |
|
746 |
| - // Apply configuration : |
747 |
| - // create ports |
| 743 | + // Apply configuration : |
| 744 | + // create ports |
748 | 745 |
|
749 |
| - static_cast<Orch *>(gPortsOrch)->doTask(); |
| 746 | + static_cast<Orch *>(gPortsOrch)->doTask(); |
750 | 747 |
|
751 |
| - // Apply configuration |
752 |
| - // ports |
753 |
| - static_cast<Orch *>(gPortsOrch)->doTask(); |
| 748 | + // Apply configuration |
| 749 | + // ports |
| 750 | + static_cast<Orch *>(gPortsOrch)->doTask(); |
754 | 751 |
|
755 |
| - ASSERT_TRUE(gPortsOrch->allPortsReady()); |
| 752 | + ASSERT_TRUE(gPortsOrch->allPortsReady()); |
756 | 753 |
|
757 |
| - // No more tasks |
758 |
| - vector<string> ts; |
759 |
| - gPortsOrch->dumpPendingTasks(ts); |
760 |
| - ASSERT_TRUE(ts.empty()); |
761 |
| - ts.clear(); |
| 754 | + // No more tasks |
| 755 | + vector<string> ts; |
| 756 | + gPortsOrch->dumpPendingTasks(ts); |
| 757 | + ASSERT_TRUE(ts.empty()); |
| 758 | + ts.clear(); |
762 | 759 |
|
763 |
| - // Simulate storm drop handler started on Ethernet0 TC 3 |
764 |
| - Port port; |
765 |
| - gPortsOrch->getPort("Ethernet0", port); |
| 760 | + // Simulate storm drop handler started on Ethernet0 TC 3 |
| 761 | + Port port; |
| 762 | + gPortsOrch->getPort("Ethernet0", port); |
766 | 763 |
|
767 |
| - auto countersTable = make_shared<Table>(m_counters_db.get(), COUNTERS_TABLE); |
768 |
| - auto dropHandler = make_unique<PfcWdZeroBufferHandler>(port.m_port_id, port.m_queue_ids[3], 3, countersTable); |
| 764 | + auto countersTable = make_shared<Table>(m_counters_db.get(), COUNTERS_TABLE); |
| 765 | + auto dropHandler = make_unique<PfcWdZeroBufferHandler>(port.m_port_id, port.m_queue_ids[3], 3, countersTable); |
769 | 766 |
|
770 |
| - // Create test buffer pool |
771 |
| - poolTable.set( |
772 |
| - "egress_pool", |
773 |
| - { |
774 |
| - { "type", "egress" }, |
775 |
| - { "mode", "dynamic" }, |
776 |
| - { "size", "4200000" }, |
777 |
| - }); |
778 |
| - poolTable.set( |
779 |
| - "ingress_pool", |
780 |
| - { |
781 |
| - { "type", "ingress" }, |
782 |
| - { "mode", "dynamic" }, |
783 |
| - { "size", "4200000" }, |
784 |
| - }); |
785 |
| - |
786 |
| - // Create test buffer profile |
787 |
| - profileTable.set("ingress_profile", { { "pool", "ingress_pool" }, |
788 |
| - { "xon", "14832" }, |
789 |
| - { "xoff", "14832" }, |
790 |
| - { "size", "35000" }, |
791 |
| - { "dynamic_th", "0" } }); |
792 |
| - profileTable.set("egress_profile", { { "pool", "egress_pool" }, |
793 |
| - { "size", "0" }, |
794 |
| - { "dynamic_th", "0" } }); |
795 |
| - |
796 |
| - // Apply profile on Queue and PGs 3-4 all ports |
797 |
| - for (const auto &it : ports) |
| 767 | + // Create test buffer pool |
| 768 | + poolTable.set( |
| 769 | + "egress_pool", |
798 | 770 | {
|
799 |
| - std::ostringstream oss; |
800 |
| - oss << it.first << ":3-4"; |
801 |
| - pgTable.set(oss.str(), { { "profile", "ingress_profile" } }); |
802 |
| - queueTable.set(oss.str(), { {"profile", "egress_profile" } }); |
803 |
| - } |
804 |
| - gBufferOrch->addExistingData(&pgTable); |
805 |
| - gBufferOrch->addExistingData(&poolTable); |
806 |
| - gBufferOrch->addExistingData(&profileTable); |
807 |
| - gBufferOrch->addExistingData(&queueTable); |
808 |
| - |
809 |
| - // process pool, profile and Q's |
810 |
| - static_cast<Orch *>(gBufferOrch)->doTask(); |
| 771 | + { "type", "egress" }, |
| 772 | + { "mode", "dynamic" }, |
| 773 | + { "size", "4200000" }, |
| 774 | + }); |
| 775 | + poolTable.set( |
| 776 | + "ingress_pool", |
| 777 | + { |
| 778 | + { "type", "ingress" }, |
| 779 | + { "mode", "dynamic" }, |
| 780 | + { "size", "4200000" }, |
| 781 | + }); |
811 | 782 |
|
812 |
| - auto queueConsumer = static_cast<Consumer*>(gBufferOrch->getExecutor(APP_BUFFER_QUEUE_TABLE_NAME)); |
813 |
| - queueConsumer->dumpPendingTasks(ts); |
814 |
| - ASSERT_FALSE(ts.empty()); // Queue is skipped |
815 |
| - ts.clear(); |
| 783 | + // Create test buffer profile |
| 784 | + profileTable.set("ingress_profile", { { "pool", "ingress_pool" }, |
| 785 | + { "xon", "14832" }, |
| 786 | + { "xoff", "14832" }, |
| 787 | + { "size", "35000" }, |
| 788 | + { "dynamic_th", "0" } }); |
| 789 | + profileTable.set("egress_profile", { { "pool", "egress_pool" }, |
| 790 | + { "size", "0" }, |
| 791 | + { "dynamic_th", "0" } }); |
| 792 | + |
| 793 | + // Apply profile on Queue and PGs 3-4 all ports |
| 794 | + for (const auto &it : ports) |
| 795 | + { |
| 796 | + std::ostringstream oss; |
| 797 | + oss << it.first << ":3-4"; |
| 798 | + pgTable.set(oss.str(), { { "profile", "ingress_profile" } }); |
| 799 | + queueTable.set(oss.str(), { {"profile", "egress_profile" } }); |
| 800 | + } |
| 801 | + gBufferOrch->addExistingData(&pgTable); |
| 802 | + gBufferOrch->addExistingData(&poolTable); |
| 803 | + gBufferOrch->addExistingData(&profileTable); |
| 804 | + gBufferOrch->addExistingData(&queueTable); |
816 | 805 |
|
817 |
| - auto pgConsumer = static_cast<Consumer*>(gBufferOrch->getExecutor(APP_BUFFER_PG_TABLE_NAME)); |
818 |
| - pgConsumer->dumpPendingTasks(ts); |
819 |
| - ASSERT_TRUE(ts.empty()); // PG Notification is not skipped |
820 |
| - ts.clear(); |
| 806 | + // process pool, profile and Q's |
| 807 | + static_cast<Orch *>(gBufferOrch)->doTask(); |
821 | 808 |
|
822 |
| - // release zero buffer drop handler |
823 |
| - dropHandler.reset(); |
| 809 | + auto queueConsumer = static_cast<Consumer*>(gBufferOrch->getExecutor(APP_BUFFER_QUEUE_TABLE_NAME)); |
| 810 | + queueConsumer->dumpPendingTasks(ts); |
| 811 | + ASSERT_TRUE(ts.empty()); // Queue is not skipped |
| 812 | + ts.clear(); |
824 | 813 |
|
825 |
| - // process queue |
826 |
| - static_cast<Orch *>(gBufferOrch)->doTask(); |
| 814 | + auto pgConsumer = static_cast<Consumer*>(gBufferOrch->getExecutor(APP_BUFFER_PG_TABLE_NAME)); |
| 815 | + pgConsumer->dumpPendingTasks(ts); |
| 816 | + ASSERT_TRUE(ts.empty()); // PG Notification is not skipped |
| 817 | + ts.clear(); |
827 | 818 |
|
828 |
| - queueConsumer->dumpPendingTasks(ts); |
829 |
| - ASSERT_TRUE(ts.empty()); // queue should be processed now |
830 |
| - ts.clear(); |
831 |
| - } |
| 819 | + // release zero buffer drop handler |
| 820 | + dropHandler.reset(); |
832 | 821 | }
|
833 | 822 |
|
834 | 823 | /* This test checks that a LAG member validation happens on orchagent level
|
|
0 commit comments