@@ -38,7 +38,7 @@ def create_test_event(
38
38
event = Event ()
39
39
event ._message = message
40
40
event .timestamp = timestamp if timestamp else datetime .now ()
41
- if id :
41
+ if id is not None :
42
42
event ._id = id
43
43
event ._source = EventSource .USER
44
44
return event
@@ -186,13 +186,14 @@ def test_recent_events_condenser():
186
186
assert result == events
187
187
188
188
# If the max_events are smaller than the number of events, only keep the last few.
189
- max_events = 2
189
+ max_events = 3
190
190
condenser = RecentEventsCondenser (max_events = max_events )
191
191
result = condenser .condensed_history (mock_state )
192
192
193
193
assert len (result ) == max_events
194
- assert result [0 ]._message == 'Event 4'
195
- assert result [1 ]._message == 'Event 5'
194
+ assert result [0 ]._message == 'Event 1' # kept from keep_first
195
+ assert result [1 ]._message == 'Event 4' # kept from max_events
196
+ assert result [2 ]._message == 'Event 5' # kept from max_events
196
197
197
198
# If the keep_first flag is set, the first event will always be present.
198
199
keep_first = 1
@@ -211,9 +212,9 @@ def test_recent_events_condenser():
211
212
result = condenser .condensed_history (mock_state )
212
213
213
214
assert len (result ) == max_events
214
- assert result [0 ]._message == 'Event 1'
215
- assert result [1 ]._message == 'Event 2'
216
- assert result [2 ]._message == 'Event 5'
215
+ assert result [0 ]._message == 'Event 1' # kept from keep_first
216
+ assert result [1 ]._message == 'Event 2' # kept from keep_first
217
+ assert result [2 ]._message == 'Event 5' # kept from max_events
217
218
218
219
219
220
def test_llm_summarization_condenser_from_config ():
@@ -539,7 +540,7 @@ def test_llm_attention_condenser_forgets_when_larger_than_max_size(
539
540
):
540
541
"""Test that the LLMAttentionCondenser forgets events when the context grows too large."""
541
542
max_size = 2
542
- condenser = LLMAttentionCondenser (max_size = max_size , llm = mock_llm )
543
+ condenser = LLMAttentionCondenser (max_size = max_size , keep_first = 0 , llm = mock_llm )
543
544
544
545
for i in range (max_size * 10 ):
545
546
event = create_test_event (f'Event { i } ' , id = i )
@@ -560,7 +561,7 @@ def test_llm_attention_condenser_forgets_when_larger_than_max_size(
560
561
def test_llm_attention_condenser_handles_events_outside_history (mock_llm , mock_state ):
561
562
"""Test that the LLMAttentionCondenser handles event IDs that aren't from the event history."""
562
563
max_size = 2
563
- condenser = LLMAttentionCondenser (max_size = max_size , llm = mock_llm )
564
+ condenser = LLMAttentionCondenser (max_size = max_size , keep_first = 0 , llm = mock_llm )
564
565
565
566
for i in range (max_size * 10 ):
566
567
event = create_test_event (f'Event { i } ' , id = i )
@@ -580,7 +581,7 @@ def test_llm_attention_condenser_handles_events_outside_history(mock_llm, mock_s
580
581
def test_llm_attention_condenser_handles_too_many_events (mock_llm , mock_state ):
581
582
"""Test that the LLMAttentionCondenser handles when the response contains too many event IDs."""
582
583
max_size = 2
583
- condenser = LLMAttentionCondenser (max_size = max_size , llm = mock_llm )
584
+ condenser = LLMAttentionCondenser (max_size = max_size , keep_first = 0 , llm = mock_llm )
584
585
585
586
for i in range (max_size * 10 ):
586
587
event = create_test_event (f'Event { i } ' , id = i )
@@ -600,7 +601,9 @@ def test_llm_attention_condenser_handles_too_many_events(mock_llm, mock_state):
600
601
def test_llm_attention_condenser_handles_too_few_events (mock_llm , mock_state ):
601
602
"""Test that the LLMAttentionCondenser handles when the response contains too few event IDs."""
602
603
max_size = 2
603
- condenser = LLMAttentionCondenser (max_size = max_size , llm = mock_llm )
604
+ # Developer note: We must specify keep_first=0 because
605
+ # keep_first (1) >= max_size//2 (1) is invalid.
606
+ condenser = LLMAttentionCondenser (max_size = max_size , keep_first = 0 , llm = mock_llm )
604
607
605
608
for i in range (max_size * 10 ):
606
609
event = create_test_event (f'Event { i } ' , id = i )
@@ -614,3 +617,33 @@ def test_llm_attention_condenser_handles_too_few_events(mock_llm, mock_state):
614
617
615
618
# The number of results should bounce back and forth between 1, 2, 1, 2, ...
616
619
assert len (results ) == (i % 2 ) + 1
620
+
621
+ # Add a new test verifying that keep_first=1 works with max_size > 2
622
+
623
+
624
+ def test_llm_attention_condenser_handles_keep_first_for_larger_max_size (
625
+ mock_llm , mock_state
626
+ ):
627
+ """Test that LLMAttentionCondenser works when keep_first=1 is allowed (must be less than half of max_size)."""
628
+ max_size = 4 # so keep_first=1 < (max_size // 2) = 2
629
+ condenser = LLMAttentionCondenser (max_size = max_size , keep_first = 1 , llm = mock_llm )
630
+
631
+ for i in range (max_size * 2 ):
632
+ # We append new events, then ensure some are pruned.
633
+ event = create_test_event (f'Event { i } ' , id = i )
634
+ mock_state .history .append (event )
635
+
636
+ mock_llm .set_mock_response_content (
637
+ ImportantEventSelection (ids = []).model_dump_json ()
638
+ )
639
+
640
+ results = condenser .condensed_history (mock_state )
641
+
642
+ # We expect that the first event is always kept, and the tail grows until max_size
643
+ if len (mock_state .history ) <= max_size :
644
+ # No condensation needed yet
645
+ assert len (results ) == len (mock_state .history )
646
+ else :
647
+ # The first event is kept, plus some from the tail
648
+ assert results [0 ].id == 0
649
+ assert len (results ) <= max_size
0 commit comments