@@ -449,6 +449,7 @@ async def get_workspace_alerts_summary(workspace_name: str) -> v1_models.AlertSu
449
449
malicious_packages = summary .total_packages_count ,
450
450
pii = summary .total_pii_count ,
451
451
secrets = summary .total_secrets_count ,
452
+ total_alerts = summary .total_alerts ,
452
453
)
453
454
except Exception :
454
455
logger .exception ("Error while getting alerts summary" )
@@ -477,60 +478,74 @@ async def get_workspace_messages(
477
478
raise HTTPException (status_code = 500 , detail = "Internal server error" )
478
479
479
480
offset = (page - 1 ) * page_size
481
+ valid_conversations : List [v1_models .ConversationSummary ] = []
482
+ fetched_prompts = 0
483
+
484
+ while len (valid_conversations ) < page_size :
485
+ batch_size = page_size * 2 # Fetch more prompts to compensate for potential skips
486
+
487
+ prompts = await dbreader .get_prompts (
488
+ ws .id ,
489
+ offset + fetched_prompts ,
490
+ batch_size ,
491
+ filter_by_ids ,
492
+ list ([AlertSeverity .CRITICAL .value ]), # TODO: Configurable severity
493
+ filter_by_alert_trigger_types ,
494
+ )
495
+
496
+ # iterate for all prompts to compose the conversation summary
497
+ for prompt in prompts :
498
+ fetched_prompts += 1
499
+ if not prompt .request :
500
+ logger .warning (f"Skipping prompt { prompt .id } . Empty request field" )
501
+ continue
502
+
503
+ messages , _ = await v1_processing .parse_request (prompt .request )
504
+ if not messages or len (messages ) == 0 :
505
+ logger .warning (f"Skipping prompt { prompt .id } . No messages found" )
506
+ continue
507
+
508
+ # message is just the first entry in the request
509
+ message_obj = v1_models .ChatMessage (
510
+ message = messages [0 ], timestamp = prompt .timestamp , message_id = prompt .id
511
+ )
512
+
513
+ # count total alerts for the prompt
514
+ total_alerts_row = await dbreader .get_alerts_summary (prompt_id = prompt .id )
515
+
516
+ # get token usage for the prompt
517
+ prompts_outputs = await dbreader .get_prompts_with_output (prompt_id = prompt .id )
518
+ ws_token_usage = await v1_processing .parse_workspace_token_usage (prompts_outputs )
519
+
520
+ conversation_summary = v1_models .ConversationSummary (
521
+ chat_id = prompt .id ,
522
+ prompt = message_obj ,
523
+ provider = prompt .provider ,
524
+ type = prompt .type ,
525
+ conversation_timestamp = prompt .timestamp ,
526
+ alerts_summary = v1_models .AlertSummary (
527
+ malicious_packages = total_alerts_row .total_packages_count ,
528
+ pii = total_alerts_row .total_pii_count ,
529
+ secrets = total_alerts_row .total_secrets_count ,
530
+ total_alerts = total_alerts_row .total_alerts ,
531
+ ),
532
+ total_alerts = total_alerts_row .total_alerts ,
533
+ token_usage_agg = ws_token_usage ,
534
+ )
535
+
536
+ valid_conversations .append (conversation_summary )
537
+ if len (valid_conversations ) >= page_size :
538
+ break
480
539
481
- prompts = await dbreader .get_prompts (
482
- ws .id ,
483
- offset ,
484
- page_size ,
485
- filter_by_ids ,
486
- list ([AlertSeverity .CRITICAL .value ]), # TODO: Configurable severity
487
- filter_by_alert_trigger_types ,
488
- )
489
540
# Fetch total message count
490
541
total_count = await dbreader .get_total_messages_count_by_workspace_id (
491
542
ws .id , AlertSeverity .CRITICAL .value
492
543
)
493
544
494
- # iterate for all prompts to compose the conversation summary
495
- conversation_summaries : List [v1_models .ConversationSummary ] = []
496
- for prompt in prompts :
497
- if not prompt .request :
498
- logger .warning (f"Skipping prompt { prompt .id } . Empty request field" )
499
- continue
500
-
501
- messages , _ = await v1_processing .parse_request (prompt .request )
502
- if not messages or len (messages ) == 0 :
503
- logger .warning (f"Skipping prompt { prompt .id } . No messages found" )
504
- continue
505
-
506
- # message is just the first entry in the request
507
- message_obj = v1_models .ChatMessage (
508
- message = messages [0 ], timestamp = prompt .timestamp , message_id = prompt .id
509
- )
510
-
511
- # count total alerts for the prompt
512
- total_alerts_row = await dbreader .get_alerts_summary (prompt_id = prompt .id )
513
-
514
- # get token usage for the prompt
515
- prompts_outputs = await dbreader .get_prompts_with_output (prompt_id = prompt .id )
516
- ws_token_usage = await v1_processing .parse_workspace_token_usage (prompts_outputs )
517
-
518
- conversation_summary = v1_models .ConversationSummary (
519
- chat_id = prompt .id ,
520
- prompt = message_obj ,
521
- provider = prompt .provider ,
522
- type = prompt .type ,
523
- conversation_timestamp = prompt .timestamp ,
524
- total_alerts = total_alerts_row .total_alerts ,
525
- token_usage_agg = ws_token_usage ,
526
- )
527
-
528
- conversation_summaries .append (conversation_summary )
529
-
530
545
return v1_models .PaginatedMessagesResponse (
531
- data = conversation_summaries ,
546
+ data = valid_conversations ,
532
547
limit = page_size ,
533
- offset = ( page - 1 ) * page_size ,
548
+ offset = offset ,
534
549
total = total_count ,
535
550
)
536
551
@@ -543,7 +558,7 @@ async def get_workspace_messages(
543
558
async def get_messages_by_prompt_id (
544
559
workspace_name : str ,
545
560
prompt_id : str ,
546
- ) -> List [ v1_models .Conversation ] :
561
+ ) -> v1_models .Conversation :
547
562
"""Get messages for a workspace."""
548
563
try :
549
564
ws = await wscrud .get_workspace_by_name (workspace_name )
@@ -552,12 +567,13 @@ async def get_messages_by_prompt_id(
552
567
except Exception :
553
568
logger .exception ("Error while getting workspace" )
554
569
raise HTTPException (status_code = 500 , detail = "Internal server error" )
555
-
556
570
prompts_outputs = await dbreader .get_prompts_with_output (
557
571
workspace_id = ws .id , prompt_id = prompt_id
558
572
)
559
573
conversations , _ = await v1_processing .parse_messages_in_conversations (prompts_outputs )
560
- return conversations
574
+ if not conversations :
575
+ raise HTTPException (status_code = 404 , detail = "Conversation not found" )
576
+ return conversations [0 ]
561
577
562
578
563
579
@v1 .get (
0 commit comments