@@ -449,6 +449,7 @@ async def get_workspace_alerts_summary(workspace_name: str) -> v1_models.AlertSu
449
449
malicious_packages = summary .total_packages_count ,
450
450
pii = summary .total_pii_count ,
451
451
secrets = summary .total_secrets_count ,
452
+ total_alerts = summary .total_alerts ,
452
453
)
453
454
except Exception :
454
455
logger .exception ("Error while getting alerts summary" )
@@ -477,60 +478,75 @@ async def get_workspace_messages(
477
478
raise HTTPException (status_code = 500 , detail = "Internal server error" )
478
479
479
480
offset = (page - 1 ) * page_size
481
+ valid_conversations : List [v1_models .ConversationSummary ] = []
482
+ fetched_prompts = 0
483
+
484
+ while len (valid_conversations ) < page_size :
485
+ batch_size = page_size * 2 # Fetch more prompts to compensate for potential skips
486
+
487
+ prompts = await dbreader .get_prompts (
488
+ ws .id ,
489
+ offset + fetched_prompts ,
490
+ batch_size ,
491
+ filter_by_ids ,
492
+ list ([AlertSeverity .CRITICAL .value ]), # TODO: Configurable severity
493
+ filter_by_alert_trigger_types ,
494
+ )
495
+
496
+ # iterate for all prompts to compose the conversation summary
497
+ conversation_summaries : List [v1_models .ConversationSummary ] = []
498
+ for prompt in prompts :
499
+ fetched_prompts += 1
500
+ if not prompt .request :
501
+ logger .warning (f"Skipping prompt { prompt .id } . Empty request field" )
502
+ continue
503
+
504
+ messages , _ = await v1_processing .parse_request (prompt .request )
505
+ if not messages or len (messages ) == 0 :
506
+ logger .warning (f"Skipping prompt { prompt .id } . No messages found" )
507
+ continue
508
+
509
+ # message is just the first entry in the request
510
+ message_obj = v1_models .ChatMessage (
511
+ message = messages [0 ], timestamp = prompt .timestamp , message_id = prompt .id
512
+ )
513
+
514
+ # count total alerts for the prompt
515
+ total_alerts_row = await dbreader .get_alerts_summary (prompt_id = prompt .id )
516
+
517
+ # get token usage for the prompt
518
+ prompts_outputs = await dbreader .get_prompts_with_output (prompt_id = prompt .id )
519
+ ws_token_usage = await v1_processing .parse_workspace_token_usage (prompts_outputs )
520
+
521
+ conversation_summary = v1_models .ConversationSummary (
522
+ chat_id = prompt .id ,
523
+ prompt = message_obj ,
524
+ provider = prompt .provider ,
525
+ type = prompt .type ,
526
+ conversation_timestamp = prompt .timestamp ,
527
+ alerts_summary = v1_models .AlertSummary (
528
+ malicious_packages = total_alerts_row .total_packages_count ,
529
+ pii = total_alerts_row .total_pii_count ,
530
+ secrets = total_alerts_row .total_secrets_count ,
531
+ total_alerts = total_alerts_row .total_alerts ,
532
+ ),
533
+ total_alerts = total_alerts_row .total_alerts ,
534
+ token_usage_agg = ws_token_usage ,
535
+ )
536
+
537
+ valid_conversations .append (conversation_summary )
538
+ if len (valid_conversations ) >= page_size :
539
+ break
480
540
481
- prompts = await dbreader .get_prompts (
482
- ws .id ,
483
- offset ,
484
- page_size ,
485
- filter_by_ids ,
486
- list ([AlertSeverity .CRITICAL .value ]), # TODO: Configurable severity
487
- filter_by_alert_trigger_types ,
488
- )
489
541
# Fetch total message count
490
542
total_count = await dbreader .get_total_messages_count_by_workspace_id (
491
543
ws .id , AlertSeverity .CRITICAL .value
492
544
)
493
545
494
- # iterate for all prompts to compose the conversation summary
495
- conversation_summaries : List [v1_models .ConversationSummary ] = []
496
- for prompt in prompts :
497
- if not prompt .request :
498
- logger .warning (f"Skipping prompt { prompt .id } . Empty request field" )
499
- continue
500
-
501
- messages , _ = await v1_processing .parse_request (prompt .request )
502
- if not messages or len (messages ) == 0 :
503
- logger .warning (f"Skipping prompt { prompt .id } . No messages found" )
504
- continue
505
-
506
- # message is just the first entry in the request
507
- message_obj = v1_models .ChatMessage (
508
- message = messages [0 ], timestamp = prompt .timestamp , message_id = prompt .id
509
- )
510
-
511
- # count total alerts for the prompt
512
- total_alerts_row = await dbreader .get_alerts_summary (prompt_id = prompt .id )
513
-
514
- # get token usage for the prompt
515
- prompts_outputs = await dbreader .get_prompts_with_output (prompt_id = prompt .id )
516
- ws_token_usage = await v1_processing .parse_workspace_token_usage (prompts_outputs )
517
-
518
- conversation_summary = v1_models .ConversationSummary (
519
- chat_id = prompt .id ,
520
- prompt = message_obj ,
521
- provider = prompt .provider ,
522
- type = prompt .type ,
523
- conversation_timestamp = prompt .timestamp ,
524
- total_alerts = total_alerts_row .total_alerts ,
525
- token_usage_agg = ws_token_usage ,
526
- )
527
-
528
- conversation_summaries .append (conversation_summary )
529
-
530
546
return v1_models .PaginatedMessagesResponse (
531
- data = conversation_summaries ,
547
+ data = valid_conversations ,
532
548
limit = page_size ,
533
- offset = ( page - 1 ) * page_size ,
549
+ offset = offset ,
534
550
total = total_count ,
535
551
)
536
552
@@ -543,7 +559,7 @@ async def get_workspace_messages(
543
559
async def get_messages_by_prompt_id (
544
560
workspace_name : str ,
545
561
prompt_id : str ,
546
- ) -> List [ v1_models .Conversation ] :
562
+ ) -> v1_models .Conversation :
547
563
"""Get messages for a workspace."""
548
564
try :
549
565
ws = await wscrud .get_workspace_by_name (workspace_name )
@@ -552,12 +568,13 @@ async def get_messages_by_prompt_id(
552
568
except Exception :
553
569
logger .exception ("Error while getting workspace" )
554
570
raise HTTPException (status_code = 500 , detail = "Internal server error" )
555
-
556
571
prompts_outputs = await dbreader .get_prompts_with_output (
557
572
workspace_id = ws .id , prompt_id = prompt_id
558
573
)
559
574
conversations , _ = await v1_processing .parse_messages_in_conversations (prompts_outputs )
560
- return conversations
575
+ if not conversations :
576
+ raise HTTPException (status_code = 404 , detail = "Conversation not found" )
577
+ return conversations [0 ]
561
578
562
579
563
580
@v1 .get (
0 commit comments