30
30
#include "printfrr.h"
31
31
#include "frrscript.h"
32
32
#include "frrdistance.h"
33
+ #include "lib/termtable.h"
33
34
34
35
#include "zebra/zebra_router.h"
35
36
#include "zebra/connected.h"
@@ -273,6 +274,63 @@ static const char *subqueue2str(enum meta_queue_indexes index)
273
274
return "Unknown" ;
274
275
}
275
276
277
+ /* Handler for 'show zebra metaq' */
278
+ int zebra_show_metaq_counter (struct vty * vty , bool uj )
279
+ {
280
+ struct meta_queue * mq = zrouter .mq ;
281
+ struct ttable * tt = NULL ;
282
+ char * table = NULL ;
283
+ json_object * json = NULL ;
284
+ json_object * json_table = NULL ;
285
+
286
+ if (!mq )
287
+ return CMD_WARNING ;
288
+
289
+ /* Create a table for subqueue details */
290
+ tt = ttable_new (& ttable_styles [TTSTYLE_ASCII ]);
291
+ ttable_add_row (tt , "SubQ|Current|Max Size|Total" );
292
+
293
+ /* Add rows for each subqueue */
294
+ for (uint8_t i = 0 ; i < MQ_SIZE ; i ++ ) {
295
+ ttable_add_row (tt , "%s|%u|%u|%u" , subqueue2str (i ), mq -> subq [i ]-> count ,
296
+ mq -> max_subq [i ], mq -> total_subq [i ]);
297
+ }
298
+
299
+ /* For a better formatting between the content and separator */
300
+ tt -> style .cell .rpad = 2 ;
301
+ tt -> style .cell .lpad = 1 ;
302
+ ttable_restyle (tt );
303
+
304
+ if (uj ) {
305
+ json = json_object_new_object ();
306
+ /* Add MetaQ summary to the JSON object */
307
+ json_object_int_add (json , "currentSize" , mq -> size );
308
+ json_object_int_add (json , "maxSize" , mq -> max_metaq );
309
+ json_object_int_add (json , "total" , mq -> total_metaq );
310
+
311
+ /* Convert the table to JSON and add it to the main JSON object */
312
+ /* n = name/string, u = unsigned int */
313
+ json_table = ttable_json (tt , "sddd" );
314
+ json_object_object_add (json , "subqueues" , json_table );
315
+ vty_json (vty , json );
316
+ } else {
317
+ vty_out (vty , "MetaQ Summary\n" );
318
+ vty_out (vty , "Current Size\t: %u\n" , mq -> size );
319
+ vty_out (vty , "Max Size\t: %u\n" , mq -> max_metaq );
320
+ vty_out (vty , "Total\t\t: %u\n" , mq -> total_metaq );
321
+
322
+ /* Dump the table */
323
+ table = ttable_dump (tt , "\n" );
324
+ vty_out (vty , "%s\n" , table );
325
+ XFREE (MTYPE_TMP_TTABLE , table );
326
+ }
327
+
328
+ /* Clean up the table */
329
+ ttable_del (tt );
330
+
331
+ return CMD_SUCCESS ;
332
+ }
333
+
276
334
printfrr_ext_autoreg_p ("ZN" , printfrr_zebra_node );
277
335
static ssize_t printfrr_zebra_node (struct fbuf * buf , struct printfrr_eargs * ea ,
278
336
const void * ptr )
@@ -3257,6 +3315,7 @@ static int rib_meta_queue_add(struct meta_queue *mq, void *data)
3257
3315
struct route_node * rn = NULL ;
3258
3316
struct route_entry * re = NULL , * curr_re = NULL ;
3259
3317
uint8_t qindex = MQ_SIZE , curr_qindex = MQ_SIZE ;
3318
+ uint64_t curr , high ;
3260
3319
3261
3320
rn = (struct route_node * )data ;
3262
3321
@@ -3300,6 +3359,15 @@ static int rib_meta_queue_add(struct meta_queue *mq, void *data)
3300
3359
listnode_add (mq -> subq [qindex ], rn );
3301
3360
route_lock_node (rn );
3302
3361
mq -> size ++ ;
3362
+ atomic_fetch_add_explicit (& mq -> total_metaq , 1 , memory_order_relaxed );
3363
+ atomic_fetch_add_explicit (& mq -> total_subq [qindex ], 1 , memory_order_relaxed );
3364
+ curr = listcount (mq -> subq [qindex ]);
3365
+ high = atomic_load_explicit (& mq -> max_subq [qindex ], memory_order_relaxed );
3366
+ if (curr > high )
3367
+ atomic_store_explicit (& mq -> max_subq [qindex ], curr , memory_order_relaxed );
3368
+ high = atomic_load_explicit (& mq -> max_metaq , memory_order_relaxed );
3369
+ if (mq -> size > high )
3370
+ atomic_store_explicit (& mq -> max_metaq , mq -> size , memory_order_relaxed );
3303
3371
3304
3372
if (IS_ZEBRA_DEBUG_RIB_DETAILED )
3305
3373
rnode_debug (rn , re -> vrf_id , "queued rn %p into sub-queue %s" ,
@@ -3310,8 +3378,21 @@ static int rib_meta_queue_add(struct meta_queue *mq, void *data)
3310
3378
3311
3379
static int early_label_meta_queue_add (struct meta_queue * mq , void * data )
3312
3380
{
3381
+ uint64_t curr , high ;
3382
+
3313
3383
listnode_add (mq -> subq [META_QUEUE_EARLY_LABEL ], data );
3314
3384
mq -> size ++ ;
3385
+ atomic_fetch_add_explicit (& mq -> total_metaq , 1 , memory_order_relaxed );
3386
+ atomic_fetch_add_explicit (& mq -> total_subq [META_QUEUE_EARLY_LABEL ], 1 , memory_order_relaxed );
3387
+ curr = listcount (mq -> subq [META_QUEUE_EARLY_LABEL ]);
3388
+ high = atomic_load_explicit (& mq -> max_subq [META_QUEUE_EARLY_LABEL ], memory_order_relaxed );
3389
+ if (curr > high )
3390
+ atomic_store_explicit (& mq -> max_subq [META_QUEUE_EARLY_LABEL ], curr ,
3391
+ memory_order_relaxed );
3392
+ high = atomic_load_explicit (& mq -> max_metaq , memory_order_relaxed );
3393
+ if (mq -> size > high )
3394
+ atomic_store_explicit (& mq -> max_metaq , mq -> size , memory_order_relaxed );
3395
+
3315
3396
return 0 ;
3316
3397
}
3317
3398
@@ -3320,6 +3401,7 @@ static int rib_meta_queue_nhg_ctx_add(struct meta_queue *mq, void *data)
3320
3401
struct nhg_ctx * ctx = NULL ;
3321
3402
uint8_t qindex = META_QUEUE_NHG ;
3322
3403
struct wq_nhg_wrapper * w ;
3404
+ uint64_t curr , high ;
3323
3405
3324
3406
ctx = (struct nhg_ctx * )data ;
3325
3407
@@ -3333,6 +3415,15 @@ static int rib_meta_queue_nhg_ctx_add(struct meta_queue *mq, void *data)
3333
3415
3334
3416
listnode_add (mq -> subq [qindex ], w );
3335
3417
mq -> size ++ ;
3418
+ atomic_fetch_add_explicit (& mq -> total_metaq , 1 , memory_order_relaxed );
3419
+ atomic_fetch_add_explicit (& mq -> total_subq [qindex ], 1 , memory_order_relaxed );
3420
+ curr = listcount (mq -> subq [qindex ]);
3421
+ high = atomic_load_explicit (& mq -> max_subq [qindex ], memory_order_relaxed );
3422
+ if (curr > high )
3423
+ atomic_store_explicit (& mq -> max_subq [qindex ], curr , memory_order_relaxed );
3424
+ high = atomic_load_explicit (& mq -> max_metaq , memory_order_relaxed );
3425
+ if (mq -> size > high )
3426
+ atomic_store_explicit (& mq -> max_metaq , mq -> size , memory_order_relaxed );
3336
3427
3337
3428
if (IS_ZEBRA_DEBUG_RIB_DETAILED )
3338
3429
zlog_debug ("NHG Context id=%u queued into sub-queue %s" ,
@@ -3347,6 +3438,7 @@ static int rib_meta_queue_nhg_process(struct meta_queue *mq, void *data,
3347
3438
struct nhg_hash_entry * nhe = NULL ;
3348
3439
uint8_t qindex = META_QUEUE_NHG ;
3349
3440
struct wq_nhg_wrapper * w ;
3441
+ uint64_t curr , high ;
3350
3442
3351
3443
nhe = (struct nhg_hash_entry * )data ;
3352
3444
@@ -3361,6 +3453,15 @@ static int rib_meta_queue_nhg_process(struct meta_queue *mq, void *data,
3361
3453
3362
3454
listnode_add (mq -> subq [qindex ], w );
3363
3455
mq -> size ++ ;
3456
+ atomic_fetch_add_explicit (& mq -> total_metaq , 1 , memory_order_relaxed );
3457
+ atomic_fetch_add_explicit (& mq -> total_subq [qindex ], 1 , memory_order_relaxed );
3458
+ curr = listcount (mq -> subq [qindex ]);
3459
+ high = atomic_load_explicit (& mq -> max_subq [qindex ], memory_order_relaxed );
3460
+ if (curr > high )
3461
+ atomic_store_explicit (& mq -> max_subq [qindex ], curr , memory_order_relaxed );
3462
+ high = atomic_load_explicit (& mq -> max_metaq , memory_order_relaxed );
3463
+ if (mq -> size > high )
3464
+ atomic_store_explicit (& mq -> max_metaq , mq -> size , memory_order_relaxed );
3364
3465
3365
3466
if (IS_ZEBRA_DEBUG_RIB_DETAILED )
3366
3467
zlog_debug ("NHG id=%u queued into sub-queue %s" , nhe -> id ,
@@ -3381,8 +3482,19 @@ static int rib_meta_queue_nhg_del(struct meta_queue *mq, void *data)
3381
3482
3382
3483
static int rib_meta_queue_evpn_add (struct meta_queue * mq , void * data )
3383
3484
{
3485
+ uint64_t curr , high ;
3486
+
3384
3487
listnode_add (mq -> subq [META_QUEUE_EVPN ], data );
3385
3488
mq -> size ++ ;
3489
+ atomic_fetch_add_explicit (& mq -> total_metaq , 1 , memory_order_relaxed );
3490
+ atomic_fetch_add_explicit (& mq -> total_subq [META_QUEUE_EVPN ], 1 , memory_order_relaxed );
3491
+ curr = listcount (mq -> subq [META_QUEUE_EVPN ]);
3492
+ high = atomic_load_explicit (& mq -> max_subq [META_QUEUE_EVPN ], memory_order_relaxed );
3493
+ if (curr > high )
3494
+ atomic_store_explicit (& mq -> max_subq [META_QUEUE_EVPN ], curr , memory_order_relaxed );
3495
+ high = atomic_load_explicit (& mq -> max_metaq , memory_order_relaxed );
3496
+ if (mq -> size > high )
3497
+ atomic_store_explicit (& mq -> max_metaq , mq -> size , memory_order_relaxed );
3386
3498
3387
3499
return 0 ;
3388
3500
}
@@ -4222,8 +4334,19 @@ void _route_entry_dump(const char *func, union prefixconstptr pp,
4222
4334
4223
4335
static int rib_meta_queue_gr_run_add (struct meta_queue * mq , void * data )
4224
4336
{
4337
+ uint64_t curr , high ;
4338
+
4225
4339
listnode_add (mq -> subq [META_QUEUE_GR_RUN ], data );
4226
4340
mq -> size ++ ;
4341
+ atomic_fetch_add_explicit (& mq -> total_metaq , 1 , memory_order_relaxed );
4342
+ atomic_fetch_add_explicit (& mq -> total_subq [META_QUEUE_GR_RUN ], 1 , memory_order_relaxed );
4343
+ curr = listcount (mq -> subq [META_QUEUE_GR_RUN ]);
4344
+ high = atomic_load_explicit (& mq -> max_subq [META_QUEUE_GR_RUN ], memory_order_relaxed );
4345
+ if (curr > high )
4346
+ atomic_store_explicit (& mq -> max_subq [META_QUEUE_GR_RUN ], curr , memory_order_relaxed );
4347
+ high = atomic_load_explicit (& mq -> max_metaq , memory_order_relaxed );
4348
+ if (mq -> size > high )
4349
+ atomic_store_explicit (& mq -> max_metaq , mq -> size , memory_order_relaxed );
4227
4350
4228
4351
if (IS_ZEBRA_DEBUG_RIB_DETAILED )
4229
4352
zlog_debug ("Graceful Run adding" );
@@ -4234,9 +4357,20 @@ static int rib_meta_queue_gr_run_add(struct meta_queue *mq, void *data)
4234
4357
static int rib_meta_queue_early_route_add (struct meta_queue * mq , void * data )
4235
4358
{
4236
4359
struct zebra_early_route * ere = data ;
4360
+ uint64_t curr , high ;
4237
4361
4238
4362
listnode_add (mq -> subq [META_QUEUE_EARLY_ROUTE ], data );
4239
4363
mq -> size ++ ;
4364
+ atomic_fetch_add_explicit (& mq -> total_metaq , 1 , memory_order_relaxed );
4365
+ atomic_fetch_add_explicit (& mq -> total_subq [META_QUEUE_EARLY_ROUTE ], 1 , memory_order_relaxed );
4366
+ curr = listcount (mq -> subq [META_QUEUE_EARLY_ROUTE ]);
4367
+ high = atomic_load_explicit (& mq -> max_subq [META_QUEUE_EARLY_ROUTE ], memory_order_relaxed );
4368
+ if (curr > high )
4369
+ atomic_store_explicit (& mq -> max_subq [META_QUEUE_EARLY_ROUTE ], curr ,
4370
+ memory_order_relaxed );
4371
+ high = atomic_load_explicit (& mq -> max_metaq , memory_order_relaxed );
4372
+ if (mq -> size > high )
4373
+ atomic_store_explicit (& mq -> max_metaq , mq -> size , memory_order_relaxed );
4240
4374
4241
4375
if (IS_ZEBRA_DEBUG_RIB_DETAILED ) {
4242
4376
struct vrf * vrf = vrf_lookup_by_id (ere -> re -> vrf_id );
0 commit comments