@@ -5,34 +5,31 @@ local appl_db = "0"
5
5
local config_db = " 4"
6
6
local state_db = " 6"
7
7
8
- local lossypg_reserved = 19 * 1024
9
- local lossypg_reserved_400g = 37 * 1024
10
- -- Number of 400G ports
11
- local port_count_400g = 0
12
- -- Number of lossy PG on 400G ports
13
- local lossypg_400g = 0
8
+ -- Number of ports with 8 lanes (whose pipeline latency should be doubled)
9
+ local port_count_8lanes = 0
10
+ -- Number of lossy PG on ports with 8 lanes
11
+ local lossypg_8lanes = 0
12
+
13
+ -- Private headrom
14
+ local private_headroom = 10 * 1024
14
15
15
16
local result = {}
16
17
local profiles = {}
18
+ local lossless_profiles = {}
17
19
18
20
local total_port = 0
19
21
20
22
local mgmt_pool_size = 256 * 1024
21
23
local egress_mirror_headroom = 10 * 1024
22
24
23
- local function find_profile (ref )
24
- -- Remove the surrounding square bracket and the find in the list
25
- local name = string.sub (ref , 2 , - 2 )
26
- for i = 1 , # profiles , 1 do
27
- if profiles [i ][1 ] == name then
28
- return i
29
- end
30
- end
31
- return 0
32
- end
25
+ -- The set of ports with 8 lanes
26
+ local port_set_8lanes = {}
27
+ -- Number of ports with lossless profiles
28
+ local lossless_port_count = 0
33
29
34
- local function iterate_all_items (all_items )
30
+ local function iterate_all_items (all_items , check_lossless )
35
31
table.sort (all_items )
32
+ local lossless_ports = {}
36
33
local port
37
34
local fvpairs
38
35
for i = 1 , # all_items , 1 do
@@ -43,9 +40,13 @@ local function iterate_all_items(all_items)
43
40
port = string.match (all_items [i ], " Ethernet%d+" )
44
41
if port ~= nil then
45
42
local range = string.match (all_items [i ], " Ethernet%d+:([^%s]+)$" )
46
- local profile = redis .call (' HGET' , all_items [i ], ' profile' )
47
- local index = find_profile (profile )
48
- if index == 0 then
43
+ local profile_name = redis .call (' HGET' , all_items [i ], ' profile' )
44
+ if not profile_name then
45
+ return 1
46
+ end
47
+ profile_name = string.sub (profile_name , 2 , - 2 )
48
+ local profile_ref_count = profiles [profile_name ]
49
+ if profile_ref_count == nil then
49
50
-- Indicate an error in case the referenced profile hasn't been inserted or has been removed
50
51
-- It's possible when the orchagent is busy
51
52
-- The buffermgrd will take care of it and retry later
@@ -57,13 +58,15 @@ local function iterate_all_items(all_items)
57
58
else
58
59
size = 1 + tonumber (string.sub (range , - 1 )) - tonumber (string.sub (range , 1 , 1 ))
59
60
end
60
- profiles [index ][2 ] = profiles [index ][2 ] + size
61
- local speed = redis .call (' HGET' , ' PORT_TABLE:' .. port , ' speed' )
62
- if speed == ' 400000' then
63
- if profile == ' [BUFFER_PROFILE_TABLE:ingress_lossy_profile]' then
64
- lossypg_400g = lossypg_400g + size
61
+ profiles [profile_name ] = profile_ref_count + size
62
+ if port_set_8lanes [port ] and profile_name == ' BUFFER_PROFILE_TABLE:ingress_lossy_profile' then
63
+ lossypg_8lanes = lossypg_8lanes + size
64
+ end
65
+ if check_lossless and lossless_profiles [profile_name ] then
66
+ if lossless_ports [port ] == nil then
67
+ lossless_port_count = lossless_port_count + 1
68
+ lossless_ports [port ] = true
65
69
end
66
- port_count_400g = port_count_400g + 1
67
70
end
68
71
end
69
72
end
@@ -77,6 +80,27 @@ local ports_table = redis.call('KEYS', 'PORT|*')
77
80
78
81
total_port = # ports_table
79
82
83
+ -- Initialize the port_set_8lanes set
84
+ local lanes
85
+ local number_of_lanes
86
+ local port
87
+ for i = 1 , total_port , 1 do
88
+ -- Load lanes from PORT table
89
+ lanes = redis .call (' HGET' , ports_table [i ], ' lanes' )
90
+ if lanes then
91
+ local _
92
+ _ , number_of_lanes = string.gsub (lanes , " ," , " ," )
93
+ number_of_lanes = number_of_lanes + 1
94
+ port = string.sub (ports_table [i ], 6 , - 1 )
95
+ if (number_of_lanes == 8 ) then
96
+ port_set_8lanes [port ] = true
97
+ port_count_8lanes = port_count_8lanes + 1
98
+ else
99
+ port_set_8lanes [port ] = false
100
+ end
101
+ end
102
+ end
103
+
80
104
local egress_lossless_pool_size = redis .call (' HGET' , ' BUFFER_POOL|egress_lossless_pool' , ' size' )
81
105
82
106
-- Whether shared headroom pool is enabled?
@@ -97,22 +121,45 @@ else
97
121
shp_size = 0
98
122
end
99
123
124
+ -- Fetch mmu_size
125
+ redis .call (' SELECT' , state_db )
126
+ local mmu_size = tonumber (redis .call (' HGET' , ' BUFFER_MAX_PARAM_TABLE|global' , ' mmu_size' ))
127
+ if mmu_size == nil then
128
+ mmu_size = tonumber (egress_lossless_pool_size )
129
+ end
130
+ local asic_keys = redis .call (' KEYS' , ' ASIC_TABLE*' )
131
+ local cell_size = tonumber (redis .call (' HGET' , asic_keys [1 ], ' cell_size' ))
132
+ local pipeline_latency = tonumber (redis .call (' HGET' , asic_keys [1 ], ' pipeline_latency' ))
133
+
134
+ local lossypg_reserved = pipeline_latency * 1024
135
+ local lossypg_reserved_8lanes = (2 * pipeline_latency - 1 ) * 1024
136
+
137
+ -- Align mmu_size at cell size boundary, otherwise the sdk will complain and the syncd will fail
138
+ local number_of_cells = math.floor (mmu_size / cell_size )
139
+ local ceiling_mmu_size = number_of_cells * cell_size
140
+
100
141
-- Switch to APPL_DB
101
142
redis .call (' SELECT' , appl_db )
102
143
103
144
-- Fetch names of all profiles and insert them into the look up table
104
145
local all_profiles = redis .call (' KEYS' , ' BUFFER_PROFILE*' )
105
146
for i = 1 , # all_profiles , 1 do
106
- table.insert (profiles , {all_profiles [i ], 0 })
147
+ if all_profiles [i ] ~= " BUFFER_PROFILE_TABLE_KEY_SET" and all_profiles [i ] ~= " BUFFER_PROFILE_TABLE_DEL_SET" then
148
+ local xoff = redis .call (' HGET' , all_profiles [i ], ' xoff' )
149
+ if xoff then
150
+ lossless_profiles [all_profiles [i ]] = true
151
+ end
152
+ profiles [all_profiles [i ]] = 0
153
+ end
107
154
end
108
155
109
156
-- Fetch all the PGs
110
157
local all_pgs = redis .call (' KEYS' , ' BUFFER_PG*' )
111
158
local all_tcs = redis .call (' KEYS' , ' BUFFER_QUEUE*' )
112
159
113
160
local fail_count = 0
114
- fail_count = fail_count + iterate_all_items (all_pgs )
115
- fail_count = fail_count + iterate_all_items (all_tcs )
161
+ fail_count = fail_count + iterate_all_items (all_pgs , true )
162
+ fail_count = fail_count + iterate_all_items (all_tcs , false )
116
163
if fail_count > 0 then
117
164
return {}
118
165
end
@@ -122,56 +169,55 @@ local statistics = {}
122
169
-- Fetch sizes of all of the profiles, accumulate them
123
170
local accumulative_occupied_buffer = 0
124
171
local accumulative_xoff = 0
125
- for i = 1 , # profiles , 1 do
126
- if profiles [i ][1 ] ~= " BUFFER_PROFILE_TABLE_KEY_SET" and profiles [i ][1 ] ~= " BUFFER_PROFILE_TABLE_DEL_SET" then
127
- local size = tonumber (redis .call (' HGET' , profiles [i ][1 ], ' size' ))
172
+
173
+ for name in pairs (profiles ) do
174
+ if name ~= " BUFFER_PROFILE_TABLE_KEY_SET" and name ~= " BUFFER_PROFILE_TABLE_DEL_SET" then
175
+ local size = tonumber (redis .call (' HGET' , name , ' size' ))
128
176
if size ~= nil then
129
- if profiles [ i ][ 1 ] == " BUFFER_PROFILE_TABLE:ingress_lossy_profile" then
177
+ if name == " BUFFER_PROFILE_TABLE:ingress_lossy_profile" then
130
178
size = size + lossypg_reserved
131
179
end
132
- if profiles [ i ][ 1 ] == " BUFFER_PROFILE_TABLE:egress_lossy_profile" then
133
- profiles [i ][ 2 ] = total_port
180
+ if name == " BUFFER_PROFILE_TABLE:egress_lossy_profile" then
181
+ profiles [name ] = total_port
134
182
end
135
183
if size ~= 0 then
136
184
if shp_enabled and shp_size == 0 then
137
- local xon = tonumber (redis .call (' HGET' , profiles [ i ][ 1 ] , ' xon' ))
138
- local xoff = tonumber (redis .call (' HGET' , profiles [ i ][ 1 ] , ' xoff' ))
185
+ local xon = tonumber (redis .call (' HGET' , name , ' xon' ))
186
+ local xoff = tonumber (redis .call (' HGET' , name , ' xoff' ))
139
187
if xon ~= nil and xoff ~= nil and xon + xoff > size then
140
- accumulative_xoff = accumulative_xoff + (xon + xoff - size ) * profiles [i ][ 2 ]
188
+ accumulative_xoff = accumulative_xoff + (xon + xoff - size ) * profiles [name ]
141
189
end
142
190
end
143
- accumulative_occupied_buffer = accumulative_occupied_buffer + size * profiles [i ][ 2 ]
191
+ accumulative_occupied_buffer = accumulative_occupied_buffer + size * profiles [name ]
144
192
end
145
- table.insert (statistics , {profiles [ i ][ 1 ] , size , profiles [i ][ 2 ]})
193
+ table.insert (statistics , {name , size , profiles [name ]})
146
194
end
147
195
end
148
196
end
149
197
150
- -- Extra lossy xon buffer for 400G port
151
- local lossypg_extra_for_400g = (lossypg_reserved_400g - lossypg_reserved ) * lossypg_400g
152
- accumulative_occupied_buffer = accumulative_occupied_buffer + lossypg_extra_for_400g
198
+ -- Extra lossy xon buffer for ports with 8 lanes
199
+ local lossypg_extra_for_8lanes = (lossypg_reserved_8lanes - lossypg_reserved ) * lossypg_8lanes
200
+ accumulative_occupied_buffer = accumulative_occupied_buffer + lossypg_extra_for_8lanes
201
+
202
+ -- Accumulate sizes for private headrooms
203
+ local accumulative_private_headroom = 0
204
+ if shp_enabled then
205
+ accumulative_private_headroom = lossless_port_count * private_headroom
206
+ accumulative_occupied_buffer = accumulative_occupied_buffer + accumulative_private_headroom
207
+ accumulative_xoff = accumulative_xoff - accumulative_private_headroom
208
+ if accumulative_xoff < 0 then
209
+ accumulative_xoff = 0
210
+ end
211
+ end
153
212
154
213
-- Accumulate sizes for management PGs
155
- local accumulative_management_pg = (total_port - port_count_400g ) * lossypg_reserved + port_count_400g * lossypg_reserved_400g
214
+ local accumulative_management_pg = (total_port - port_count_8lanes ) * lossypg_reserved + port_count_8lanes * lossypg_reserved_8lanes
156
215
accumulative_occupied_buffer = accumulative_occupied_buffer + accumulative_management_pg
157
216
158
217
-- Accumulate sizes for egress mirror and management pool
159
218
local accumulative_egress_mirror_overhead = total_port * egress_mirror_headroom
160
219
accumulative_occupied_buffer = accumulative_occupied_buffer + accumulative_egress_mirror_overhead + mgmt_pool_size
161
220
162
- -- Fetch mmu_size
163
- redis .call (' SELECT' , state_db )
164
- local mmu_size = tonumber (redis .call (' HGET' , ' BUFFER_MAX_PARAM_TABLE|global' , ' mmu_size' ))
165
- if mmu_size == nil then
166
- mmu_size = tonumber (egress_lossless_pool_size )
167
- end
168
- local asic_keys = redis .call (' KEYS' , ' ASIC_TABLE*' )
169
- local cell_size = tonumber (redis .call (' HGET' , asic_keys [1 ], ' cell_size' ))
170
-
171
- -- Align mmu_size at cell size boundary, otherwise the sdk will complain and the syncd will fail
172
- local number_of_cells = math.floor (mmu_size / cell_size )
173
- local ceiling_mmu_size = number_of_cells * cell_size
174
-
175
221
-- Switch to CONFIG_DB
176
222
redis .call (' SELECT' , config_db )
177
223
@@ -238,13 +284,16 @@ table.insert(result, "debug:accumulative size:" .. accumulative_occupied_buffer)
238
284
for i = 1 , # statistics do
239
285
table.insert (result , " debug:" .. statistics [i ][1 ] .. " :" .. statistics [i ][2 ] .. " :" .. statistics [i ][3 ])
240
286
end
241
- table.insert (result , " debug:extra_400g :" .. (lossypg_reserved_400g - lossypg_reserved ) .. " :" .. lossypg_400g .. " :" .. port_count_400g )
287
+ table.insert (result , " debug:extra_8lanes :" .. (lossypg_reserved_8lanes - lossypg_reserved ) .. " :" .. lossypg_8lanes .. " :" .. port_count_8lanes )
242
288
table.insert (result , " debug:mgmt_pool:" .. mgmt_pool_size )
289
+ if shp_enabled then
290
+ table.insert (result , " debug:accumulative_private_headroom:" .. accumulative_private_headroom )
291
+ table.insert (result , " debug:accumulative xoff:" .. accumulative_xoff )
292
+ end
243
293
table.insert (result , " debug:accumulative_mgmt_pg:" .. accumulative_management_pg )
244
294
table.insert (result , " debug:egress_mirror:" .. accumulative_egress_mirror_overhead )
245
295
table.insert (result , " debug:shp_enabled:" .. tostring (shp_enabled ))
246
296
table.insert (result , " debug:shp_size:" .. shp_size )
247
- table.insert (result , " debug:accumulative xoff:" .. accumulative_xoff )
248
- table.insert (result , " debug:total port:" .. total_port )
297
+ table.insert (result , " debug:total port:" .. total_port .. " ports with 8 lanes:" .. port_count_8lanes )
249
298
250
299
return result
0 commit comments