@@ -129,7 +129,16 @@ impl Caches {
129
129
let mut caches = caches. 0 . write ( ) ;
130
130
131
131
let caches_per_archetype = caches. entry ( key. clone ( ) ) . or_default ( ) ;
132
- caches_per_archetype. handle_pending_invalidation ( & key) ;
132
+
133
+ let removed_bytes = caches_per_archetype. handle_pending_invalidation ( ) ;
134
+ if removed_bytes > 0 {
135
+ re_log:: trace!(
136
+ store_id = %key. store_id,
137
+ entity_path = %key. entity_path,
138
+ removed = removed_bytes,
139
+ "invalidated latest-at caches"
140
+ ) ;
141
+ }
133
142
134
143
let mut latest_at_per_archetype =
135
144
caches_per_archetype. latest_at_per_archetype . write ( ) ;
@@ -166,7 +175,16 @@ impl Caches {
166
175
let mut caches = caches. 0 . write ( ) ;
167
176
168
177
let caches_per_archetype = caches. entry ( key. clone ( ) ) . or_default ( ) ;
169
- caches_per_archetype. handle_pending_invalidation ( & key) ;
178
+
179
+ let removed_bytes = caches_per_archetype. handle_pending_invalidation ( ) ;
180
+ if removed_bytes > 0 {
181
+ re_log:: trace!(
182
+ store_id = %key. store_id,
183
+ entity_path = %key. entity_path,
184
+ removed = removed_bytes,
185
+ "invalidated range caches"
186
+ ) ;
187
+ }
170
188
171
189
let mut range_per_archetype = caches_per_archetype. range_per_archetype . write ( ) ;
172
190
let range_cache = range_per_archetype. entry ( A :: name ( ) ) . or_default ( ) ;
@@ -281,7 +299,7 @@ impl StoreSubscriber for Caches {
281
299
// TODO(cmc): This is horribly stupid and slow and can easily be made faster by adding
282
300
// yet another layer of caching indirection.
283
301
// But since this pretty much never happens in practice, let's not go there until we
284
- // have metrics showing that we need to.
302
+ // have metrics showing that show we need to.
285
303
{
286
304
re_tracing:: profile_scope!( "timeless" ) ;
287
305
@@ -318,62 +336,63 @@ impl CachesPerArchetype {
318
336
///
319
337
/// Invalidation is deferred to query time because it is far more efficient that way: the frame
320
338
/// time effectively behaves as a natural micro-batching mechanism.
321
- fn handle_pending_invalidation ( & mut self , key : & CacheKey ) {
339
+ ///
340
+ /// Returns the number of bytes removed.
341
+ fn handle_pending_invalidation ( & mut self ) -> u64 {
322
342
let pending_timeless_invalidation = self . pending_timeless_invalidation ;
323
343
let pending_timeful_invalidation = self . pending_timeful_invalidation . is_some ( ) ;
324
344
325
345
if !pending_timeless_invalidation && !pending_timeful_invalidation {
326
- return ;
346
+ return 0 ;
327
347
}
328
348
329
349
re_tracing:: profile_function!( ) ;
330
350
331
- // TODO(cmc): range invalidation
351
+ let time_threshold = self . pending_timeful_invalidation . unwrap_or ( TimeInt :: MAX ) ;
332
352
333
- for latest_at_cache in self . latest_at_per_archetype . read ( ) . values ( ) {
334
- let mut latest_at_cache = latest_at_cache. write ( ) ;
335
-
336
- if pending_timeless_invalidation {
337
- latest_at_cache. timeless = None ;
338
- }
353
+ self . pending_timeful_invalidation = None ;
354
+ self . pending_timeless_invalidation = false ;
339
355
340
- let mut removed_bytes = 0u64 ;
341
- if let Some ( min_time) = self . pending_timeful_invalidation {
342
- latest_at_cache
343
- . per_query_time
344
- . retain ( |& query_time, _| query_time < min_time) ;
356
+ // Timeless being infinitely into the past, this effectively invalidates _everything_ with
357
+ // the current coarse-grained / archetype-level caching strategy.
358
+ if pending_timeless_invalidation {
359
+ re_tracing:: profile_scope!( "timeless" ) ;
360
+
361
+ let latest_at_removed_bytes = self
362
+ . latest_at_per_archetype
363
+ . read ( )
364
+ . values ( )
365
+ . map ( |latest_at_cache| latest_at_cache. read ( ) . total_size_bytes ( ) )
366
+ . sum :: < u64 > ( ) ;
367
+ let range_removed_bytes = self
368
+ . range_per_archetype
369
+ . read ( )
370
+ . values ( )
371
+ . map ( |range_cache| range_cache. read ( ) . total_size_bytes ( ) )
372
+ . sum :: < u64 > ( ) ;
373
+
374
+ * self = CachesPerArchetype :: default ( ) ;
375
+
376
+ return latest_at_removed_bytes + range_removed_bytes;
377
+ }
345
378
346
- latest_at_cache. per_data_time . retain ( |& data_time, bucket| {
347
- if data_time < min_time {
348
- return true ;
349
- }
379
+ re_tracing:: profile_scope!( "timeful" ) ;
350
380
351
- // Only if that bucket is about to be dropped.
352
- if Arc :: strong_count ( bucket) == 1 {
353
- removed_bytes += bucket. read ( ) . total_size_bytes ;
354
- }
381
+ let mut removed_bytes = 0u64 ;
355
382
356
- false
357
- } ) ;
358
- }
383
+ for latest_at_cache in self . latest_at_per_archetype . read ( ) . values ( ) {
384
+ let mut latest_at_cache = latest_at_cache. write ( ) ;
385
+ removed_bytes =
386
+ removed_bytes. saturating_add ( latest_at_cache. truncate_at_time ( time_threshold) ) ;
387
+ }
359
388
360
- latest_at_cache. total_size_bytes = latest_at_cache
361
- . total_size_bytes
362
- . checked_sub ( removed_bytes)
363
- . unwrap_or_else ( || {
364
- re_log:: debug!(
365
- store_id = %key. store_id,
366
- entity_path = %key. entity_path,
367
- current = latest_at_cache. total_size_bytes,
368
- removed = removed_bytes,
369
- "book keeping underflowed"
370
- ) ;
371
- u64:: MIN
372
- } ) ;
389
+ for range_cache in self . range_per_archetype . read ( ) . values ( ) {
390
+ let mut range_cache = range_cache. write ( ) ;
391
+ removed_bytes =
392
+ removed_bytes. saturating_add ( range_cache. truncate_at_time ( time_threshold) ) ;
373
393
}
374
394
375
- self . pending_timeful_invalidation = None ;
376
- self . pending_timeless_invalidation = false ;
395
+ removed_bytes
377
396
}
378
397
}
379
398
@@ -558,6 +577,64 @@ impl CacheBucket {
558
577
. and_then ( |data| data. as_any ( ) . downcast_ref :: < FlatVecDeque < Option < C > > > ( ) ) ?;
559
578
Some ( data. range ( entry_range) )
560
579
}
580
+
581
+ /// Removes everything from the bucket that corresponds to a time equal or greater than the
582
+ /// specified `threshold`.
583
+ ///
584
+ /// Returns the number of bytes removed.
585
+ #[ inline]
586
+ pub fn truncate_at_time ( & mut self , threshold : TimeInt ) -> u64 {
587
+ let Self {
588
+ data_times,
589
+ pov_instance_keys,
590
+ components,
591
+ total_size_bytes,
592
+ } = self ;
593
+
594
+ let mut removed_bytes = 0u64 ;
595
+
596
+ let threshold_idx = data_times. partition_point ( |( data_time, _) | data_time < & threshold) ;
597
+
598
+ {
599
+ let total_size_bytes_before = data_times. total_size_bytes ( ) ;
600
+ data_times. truncate ( threshold_idx) ;
601
+ removed_bytes += total_size_bytes_before - data_times. total_size_bytes ( ) ;
602
+ }
603
+
604
+ {
605
+ let total_size_bytes_before = pov_instance_keys. total_size_bytes ( ) ;
606
+ pov_instance_keys. truncate ( threshold_idx) ;
607
+ removed_bytes += total_size_bytes_before - pov_instance_keys. total_size_bytes ( ) ;
608
+ }
609
+
610
+ for data in components. values_mut ( ) {
611
+ let total_size_bytes_before = data. dyn_total_size_bytes ( ) ;
612
+ data. dyn_truncate ( threshold_idx) ;
613
+ removed_bytes += total_size_bytes_before - data. dyn_total_size_bytes ( ) ;
614
+ }
615
+
616
+ debug_assert ! ( {
617
+ let expected_num_entries = data_times. len( ) ;
618
+ data_times. len( ) == expected_num_entries
619
+ && pov_instance_keys. num_entries( ) == expected_num_entries
620
+ && components
621
+ . values( )
622
+ . all( |data| data. dyn_num_entries( ) == expected_num_entries)
623
+ } ) ;
624
+
625
+ * total_size_bytes = total_size_bytes
626
+ . checked_sub ( removed_bytes)
627
+ . unwrap_or_else ( || {
628
+ re_log:: debug!(
629
+ current = * total_size_bytes,
630
+ removed = removed_bytes,
631
+ "book keeping underflowed"
632
+ ) ;
633
+ u64:: MIN
634
+ } ) ;
635
+
636
+ removed_bytes
637
+ }
561
638
}
562
639
563
640
macro_rules! impl_insert {
@@ -591,7 +668,7 @@ macro_rules! impl_insert {
591
668
592
669
{
593
670
// The `FlatVecDeque` will have to collect the data one way or another: do it ourselves
594
- // instead, that way we can efficiently computes its size while we're at it.
671
+ // instead, that way we can efficiently compute its size while we're at it.
595
672
let added: FlatVecDeque <InstanceKey > = arch_view
596
673
. iter_instance_keys( )
597
674
. collect:: <VecDeque <InstanceKey >>( )
0 commit comments