Skip to content

Commit 08e7bd3

Browse files
Merge branch 'master' of github.com:sonic-net/sonic-swss into dash-pl
2 parents 0c26e58 + 465391d commit 08e7bd3

File tree

209 files changed

+15562
-2746
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

209 files changed

+15562
-2746
lines changed

.azure-pipelines/build-docker-sonic-vs-template.yml

+1
Original file line numberDiff line numberDiff line change
@@ -90,6 +90,7 @@ jobs:
9090
artifact: ${{ parameters.sairedis_artifact_name }}
9191
runVersion: 'latestFromBranch'
9292
runBranch: 'refs/heads/${{ parameters.sairedis_artifact_branch }}'
93+
allowPartiallySucceededBuilds: true
9394
path: $(Build.ArtifactStagingDirectory)/download/sairedis
9495
patterns: |
9596
${{ parameters.sairedis_artifact_pattern }}/libsaivs_*.deb

.azure-pipelines/build-template.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ parameters:
1010
- name: pool
1111
type: string
1212
values:
13-
- sonicbld
13+
- sonicbld-1es
1414
- sonicbld-armhf
1515
- sonicbld-arm64
1616
- default

.azure-pipelines/build_and_install_module.sh

+30-32
Original file line numberDiff line numberDiff line change
@@ -26,62 +26,60 @@ function build_and_install_kmodule()
2626
SUBLEVEL=$(echo $KERNEL_MAINVERSION | cut -d. -f3)
2727

2828
# Install the required debian packages to build the kernel modules
29+
apt-get update
2930
apt-get install -y build-essential linux-headers-${KERNEL_RELEASE} autoconf pkg-config fakeroot
30-
apt-get install -y flex bison libssl-dev libelf-dev
31+
apt-get install -y flex bison libssl-dev libelf-dev dwarves
3132
apt-get install -y libnl-route-3-200 libnl-route-3-dev libnl-cli-3-200 libnl-cli-3-dev libnl-3-dev
3233

3334
# Add the apt source mirrors and download the linux image source code
3435
cp /etc/apt/sources.list /etc/apt/sources.list.bk
3536
sed -i "s/^# deb-src/deb-src/g" /etc/apt/sources.list
3637
apt-get update
37-
apt-get source linux-image-unsigned-$(uname -r) > source.log
38+
KERNEL_PACKAGE_SOURCE=$(apt-cache show linux-image-unsigned-${KERNEL_RELEASE} | grep ^Source: | cut -d':' -f 2)
39+
KERNEL_PACKAGE_VERSION=$(apt-cache show linux-image-unsigned-${KERNEL_RELEASE} | grep ^Version: | cut -d':' -f 2)
40+
SOURCE_PACKAGE_VERSION=$(apt-cache showsrc ${KERNEL_PACKAGE_SOURCE} | grep ^Version: | cut -d':' -f 2)
41+
if [ ${KERNEL_PACKAGE_VERSION} != ${SOURCE_PACKAGE_VERSION} ]; then
42+
echo "WARNING: the running kernel version (${KERNEL_PACKAGE_VERSION}) doesn't match the source package " \
43+
"version (${SOURCE_PACKAGE_VERSION}) being downloaded. There's no guarantee the module being downloaded " \
44+
"can be loaded into the kernel or function correctly. If possible, please update your kernel and reboot " \
45+
"your system so that it's running the matching kernel version." >&2
46+
echo "Continuing with the build anyways" >&2
47+
fi
48+
apt-get source linux-image-unsigned-${KERNEL_RELEASE} > source.log
3849

3950
# Recover the original apt sources list
4051
cp /etc/apt/sources.list.bk /etc/apt/sources.list
4152
apt-get update
4253

4354
# Build the Linux kernel module drivers/net/team and vrf
4455
cd $(find . -maxdepth 1 -type d | grep -v "^.$")
56+
if [ -e debian/debian.env ]; then
57+
source debian/debian.env
58+
if [ -n "${DEBIAN}" -a -e ${DEBIAN}/reconstruct ]; then
59+
bash ${DEBIAN}/reconstruct
60+
fi
61+
fi
4562
make allmodconfig
4663
mv .config .config.bk
4764
cp /boot/config-$(uname -r) .config
4865
grep NET_TEAM .config.bk >> .config
49-
echo CONFIG_NET_VRF=m >> .config
50-
echo CONFIG_MACSEC=m >> .config
51-
echo CONFIG_NET_VENDOR_MICROSOFT=y >> .config
52-
echo CONFIG_MICROSOFT_MANA=m >> .config
53-
echo CONFIG_SYSTEM_REVOCATION_LIST=n >> .config
5466
make VERSION=$VERSION PATCHLEVEL=$PATCHLEVEL SUBLEVEL=$SUBLEVEL EXTRAVERSION=-${EXTRAVERSION} LOCALVERSION=-${LOCALVERSION} modules_prepare
55-
make M=drivers/net/team
67+
cp /usr/src/linux-headers-$(uname -r)/Module.symvers .
68+
make -j$(nproc) M=drivers/net/team
5669
mv drivers/net/Makefile drivers/net/Makefile.bak
5770
echo 'obj-$(CONFIG_NET_VRF) += vrf.o' > drivers/net/Makefile
5871
echo 'obj-$(CONFIG_MACSEC) += macsec.o' >> drivers/net/Makefile
59-
make M=drivers/net
72+
make -j$(nproc) M=drivers/net
6073

6174
# Install the module
62-
TEAM_DIR=$(echo /lib/modules/$(uname -r)/kernel/net/team)
63-
NET_DIR=$(echo /lib/modules/$(uname -r)/kernel/net)
64-
if [ ! -e "$TEAM_DIR/team.ko" ]; then
65-
mkdir -p $TEAM_DIR
66-
cp drivers/net/team/*.ko $TEAM_DIR/
67-
modinfo $TEAM_DIR/team.ko
68-
depmod
69-
modprobe team
70-
fi
71-
if [ ! -e "$NET_DIR/vrf.ko" ]; then
72-
mkdir -p $NET_DIR
73-
cp drivers/net/vrf.ko $NET_DIR/
74-
modinfo $NET_DIR/vrf.ko
75-
depmod
76-
modprobe vrf
77-
fi
78-
if [ ! -e "$NET_DIR/macsec.ko" ]; then
79-
mkdir -p $NET_DIR
80-
cp drivers/net/macsec.ko $NET_DIR/
81-
modinfo $NET_DIR/macsec.ko
82-
depmod
83-
modprobe macsec
84-
fi
75+
SONIC_MODULES_DIR=/lib/modules/$(uname -r)/updates/sonic
76+
mkdir -p $SONIC_MODULES_DIR
77+
cp drivers/net/team/*.ko drivers/net/vrf.ko drivers/net/macsec.ko $SONIC_MODULES_DIR/
78+
depmod
79+
modinfo team vrf macsec
80+
modprobe team
81+
modprobe vrf
82+
modprobe macsec
8583

8684
cd /tmp
8785
rm -rf $WORKDIR

.azure-pipelines/gcov.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ parameters:
88
- name: pool
99
type: string
1010
values:
11-
- sonicbld
11+
- sonicbld-1es
1212
- default
1313
default: default
1414

.azure-pipelines/test-docker-sonic-vs-template.yml

+4-3
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ jobs:
5050
DIFF_COVER_ENABLE: 'true'
5151
DIFF_COVER_COVERAGE_FILES: Cobertura.xml
5252

53-
pool: sonic-common-test
53+
pool: sonictest
5454

5555
steps:
5656
- script: |
@@ -91,7 +91,8 @@ jobs:
9191
sudo apt-add-repository https://packages.microsoft.com/ubuntu/20.04/prod
9292
sudo apt-get update
9393
sudo apt-get install -y dotnet-sdk-7.0
94-
sudo dotnet tool install dotnet-reportgenerator-globaltool --tool-path /usr/bin
94+
sudo dotnet tool install dotnet-reportgenerator-globaltool --tool-path /usr/bin 2>&1 | tee log.log || grep 'already installed' log.log
95+
rm log.log
9596
displayName: "Install .NET CORE"
9697
9798
- script: |
@@ -107,7 +108,7 @@ jobs:
107108
# install packages for vs test
108109
sudo apt-get install -y net-tools bridge-utils vlan
109110
sudo apt-get install -y python3-pip
110-
sudo pip3 install pytest==4.6.2 attrs==19.1.0 exabgp==4.0.10 distro==1.5.0 docker>=4.4.1 redis==3.3.4 flaky==3.7.0
111+
sudo pip3 install pytest==4.6.2 attrs==19.1.0 exabgp==4.0.10 distro==1.5.0 docker>=4.4.1 redis==3.3.4 flaky==3.7.0 requests==2.31.0
111112
sudo pip3 install lcov_cobertura
112113
displayName: "Install dependencies"
113114

azure-pipelines.yml

+41-2
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ stages:
4141
- template: .azure-pipelines/build-template.yml
4242
parameters:
4343
arch: amd64
44-
pool: sonicbld
44+
pool: sonicbld-1es
4545
sonic_slave: sonic-slave-bullseye
4646
common_lib_artifact_name: common-lib
4747
swss_common_artifact_name: sonic-swss-common
@@ -56,7 +56,7 @@ stages:
5656
- template: .azure-pipelines/build-template.yml
5757
parameters:
5858
arch: amd64
59-
pool: sonicbld
59+
pool: sonicbld-1es
6060
sonic_slave: sonic-slave-bullseye
6161
common_lib_artifact_name: common-lib
6262
swss_common_artifact_name: sonic-swss-common
@@ -92,6 +92,45 @@ stages:
9292
artifact_name: sonic-swss.arm64
9393
archive_gcov: false
9494

95+
- stage: BuildBookworm
96+
dependsOn: BuildArm
97+
condition: succeeded('BuildArm')
98+
jobs:
99+
- template: .azure-pipelines/build-template.yml
100+
parameters:
101+
arch: amd64
102+
pool: sonicbld-1es
103+
sonic_slave: sonic-slave-bookworm
104+
common_lib_artifact_name: common-lib
105+
swss_common_artifact_name: sonic-swss-common-bookworm
106+
sairedis_artifact_name: sonic-sairedis-bookworm
107+
artifact_name: sonic-swss-bookworm
108+
archive_gcov: false
109+
110+
- template: .azure-pipelines/build-template.yml
111+
parameters:
112+
arch: armhf
113+
timeout: 240
114+
pool: sonicbld-armhf
115+
sonic_slave: sonic-slave-bookworm-armhf
116+
common_lib_artifact_name: common-lib.armhf
117+
swss_common_artifact_name: sonic-swss-common-bookworm.armhf
118+
sairedis_artifact_name: sonic-sairedis-bookworm.armhf
119+
artifact_name: sonic-swss-bookworm.armhf
120+
archive_gcov: false
121+
122+
- template: .azure-pipelines/build-template.yml
123+
parameters:
124+
arch: arm64
125+
timeout: 240
126+
pool: sonicbld-arm64
127+
sonic_slave: sonic-slave-bookworm-arm64
128+
common_lib_artifact_name: common-lib.arm64
129+
swss_common_artifact_name: sonic-swss-common-bookworm.arm64
130+
sairedis_artifact_name: sonic-sairedis-bookworm.arm64
131+
artifact_name: sonic-swss-bookworm.arm64
132+
archive_gcov: false
133+
95134
- stage: BuildDocker
96135
dependsOn: Build
97136
condition: succeeded('Build')

cfgmgr/Makefile.am

+1-1
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ CFLAGS_SAI = -I /usr/include/sai
33
LIBNL_CFLAGS = -I/usr/include/libnl3
44
LIBNL_LIBS = -lnl-genl-3 -lnl-route-3 -lnl-3
55
SAIMETA_LIBS = -lsaimeta -lsaimetadata -lzmq
6-
COMMON_LIBS = -lswsscommon
6+
COMMON_LIBS = -lswsscommon -lpthread
77

88
bin_PROGRAMS = vlanmgrd teammgrd portmgrd intfmgrd buffermgrd vrfmgrd nbrmgrd vxlanmgrd sflowmgrd natmgrd coppmgrd tunnelmgrd macsecmgrd fabricmgrd
99

cfgmgr/buffer_pool_mellanox.lua

+28-8
Original file line numberDiff line numberDiff line change
@@ -133,7 +133,7 @@ local function iterate_profile_list(all_items)
133133
return 0
134134
end
135135

136-
local function fetch_buffer_pool_size_from_appldb()
136+
local function fetch_buffer_pool_size_from_appldb(shp_enabled)
137137
local buffer_pools = {}
138138
redis.call('SELECT', config_db)
139139
local buffer_pool_keys = redis.call('KEYS', 'BUFFER_POOL|*')
@@ -158,7 +158,18 @@ local function fetch_buffer_pool_size_from_appldb()
158158
end
159159
xoff = redis.call('HGET', 'BUFFER_POOL_TABLE:' .. buffer_pools[i], 'xoff')
160160
if not xoff then
161-
table.insert(result, buffer_pools[i] .. ':' .. size)
161+
if shp_enabled and size == "0" and buffer_pools[i] == "ingress_lossless_pool" then
162+
-- During initialization, if SHP is enabled
163+
-- 1. the buffer pool sizes, xoff have initialized to 0, which means the shared headroom pool is disabled
164+
-- 2. but the buffer profiles already indicate the shared headroom pool is enabled
165+
-- 3. later on the buffer pool sizes are updated with xoff being non-zero
166+
-- In case the orchagent starts handling buffer configuration between 2 and 3,
167+
-- It is inconsistent between buffer pools and profiles, which fails Mellanox SAI sanity check
168+
-- To avoid it, it indicates the shared headroom pool is enabled by setting a very small buffer pool and shared headroom pool sizes
169+
table.insert(result, buffer_pools[i] .. ':2048:1024')
170+
else
171+
table.insert(result, buffer_pools[i] .. ':' .. size)
172+
end
162173
else
163174
table.insert(result, buffer_pools[i] .. ':' .. size .. ':' .. xoff)
164175
end
@@ -295,7 +306,7 @@ local fail_count = 0
295306
fail_count = fail_count + iterate_all_items(all_pgs, true)
296307
fail_count = fail_count + iterate_all_items(all_tcs, false)
297308
if fail_count > 0 then
298-
fetch_buffer_pool_size_from_appldb()
309+
fetch_buffer_pool_size_from_appldb(shp_enabled)
299310
return result
300311
end
301312

@@ -305,7 +316,7 @@ local all_egress_profile_lists = redis.call('KEYS', 'BUFFER_PORT_EGRESS_PROFILE_
305316
fail_count = fail_count + iterate_profile_list(all_ingress_profile_lists)
306317
fail_count = fail_count + iterate_profile_list(all_egress_profile_lists)
307318
if fail_count > 0 then
308-
fetch_buffer_pool_size_from_appldb()
319+
fetch_buffer_pool_size_from_appldb(shp_enabled)
309320
return result
310321
end
311322

@@ -406,10 +417,12 @@ local pool_size
406417
if shp_size then
407418
accumulative_occupied_buffer = accumulative_occupied_buffer + shp_size
408419
end
420+
421+
local available_buffer = mmu_size - accumulative_occupied_buffer
409422
if ingress_pool_count == 1 then
410-
pool_size = mmu_size - accumulative_occupied_buffer
423+
pool_size = available_buffer
411424
else
412-
pool_size = (mmu_size - accumulative_occupied_buffer) / 2
425+
pool_size = available_buffer / 2
413426
end
414427

415428
if pool_size > ceiling_mmu_size then
@@ -418,12 +431,19 @@ end
418431

419432
local shp_deployed = false
420433
for i = 1, #pools_need_update, 1 do
434+
local percentage = tonumber(redis.call('HGET', pools_need_update[i], 'percentage'))
435+
local effective_pool_size
436+
if percentage ~= nil and percentage >= 0 then
437+
effective_pool_size = available_buffer * percentage / 100
438+
else
439+
effective_pool_size = pool_size
440+
end
421441
local pool_name = string.match(pools_need_update[i], "BUFFER_POOL|([^%s]+)$")
422442
if shp_size ~= 0 and pool_name == "ingress_lossless_pool" then
423-
table.insert(result, pool_name .. ":" .. math.ceil(pool_size) .. ":" .. math.ceil(shp_size))
443+
table.insert(result, pool_name .. ":" .. math.ceil(effective_pool_size) .. ":" .. math.ceil(shp_size))
424444
shp_deployed = true
425445
else
426-
table.insert(result, pool_name .. ":" .. math.ceil(pool_size))
446+
table.insert(result, pool_name .. ":" .. math.ceil(effective_pool_size))
427447
end
428448
end
429449

cfgmgr/buffermgr.cpp

+17-18
Original file line numberDiff line numberDiff line change
@@ -549,24 +549,23 @@ void BufferMgr::doTask(Consumer &consumer)
549549
task_status = doSpeedUpdateTask(port);
550550
}
551551
}
552-
553-
switch (task_status)
554-
{
555-
case task_process_status::task_failed:
556-
SWSS_LOG_ERROR("Failed to process table update");
557-
return;
558-
case task_process_status::task_need_retry:
559-
SWSS_LOG_INFO("Unable to process table update. Will retry...");
560-
++it;
561-
break;
562-
case task_process_status::task_invalid_entry:
563-
SWSS_LOG_ERROR("Failed to process invalid entry, drop it");
564-
it = consumer.m_toSync.erase(it);
565-
break;
566-
default:
567-
it = consumer.m_toSync.erase(it);
568-
break;
569-
}
552+
}
553+
switch (task_status)
554+
{
555+
case task_process_status::task_failed:
556+
SWSS_LOG_ERROR("Failed to process table update");
557+
return;
558+
case task_process_status::task_need_retry:
559+
SWSS_LOG_INFO("Unable to process table update. Will retry...");
560+
++it;
561+
break;
562+
case task_process_status::task_invalid_entry:
563+
SWSS_LOG_ERROR("Failed to process invalid entry, drop it");
564+
it = consumer.m_toSync.erase(it);
565+
break;
566+
default:
567+
it = consumer.m_toSync.erase(it);
568+
break;
570569
}
571570
}
572571
}

cfgmgr/buffermgrd.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ void dump_db_item(KeyOpFieldsValuesTuple &db_item)
4646

4747
void write_to_state_db(shared_ptr<vector<KeyOpFieldsValuesTuple>> db_items_ptr)
4848
{
49-
DBConnector db("STATE_DB", 0, true);
49+
DBConnector db("STATE_DB", 0);
5050
auto &db_items = *db_items_ptr;
5151
for (auto &db_item : db_items)
5252
{

cfgmgr/buffermgrdyn.cpp

+9-9
Original file line numberDiff line numberDiff line change
@@ -934,15 +934,6 @@ void BufferMgrDynamic::updateBufferObjectToDb(const string &key, const string &p
934934
void BufferMgrDynamic::updateBufferObjectListToDb(const string &key, const string &profileList, buffer_direction_t dir)
935935
{
936936
auto &table = m_applBufferProfileListTables[dir];
937-
const auto &direction = m_bufferDirectionNames[dir];
938-
939-
if (!m_bufferPoolReady)
940-
{
941-
SWSS_LOG_NOTICE("Buffer pools are not ready when configuring buffer %s profile list %s, pending", direction.c_str(), key.c_str());
942-
m_bufferObjectsPending = true;
943-
return;
944-
}
945-
946937
vector<FieldValueTuple> fvVector;
947938

948939
fvVector.emplace_back(buffer_profile_list_field_name, profileList);
@@ -3245,6 +3236,15 @@ task_process_status BufferMgrDynamic::handleSingleBufferPortProfileListEntry(con
32453236
}
32463237
}
32473238

3239+
if (!m_bufferPoolReady)
3240+
{
3241+
const auto &direction = m_bufferDirectionNames[dir];
3242+
3243+
SWSS_LOG_NOTICE("Buffer pools are not ready when configuring buffer %s profile list %s, pending", direction.c_str(), key.c_str());
3244+
m_bufferObjectsPending = true;
3245+
return task_process_status::task_success;
3246+
}
3247+
32483248
auto &portInfo = m_portInfoLookup[port];
32493249
if (PORT_ADMIN_DOWN != portInfo.state)
32503250
{

0 commit comments

Comments
 (0)