@@ -29,17 +29,22 @@ class CleanerKwargs(TypedDict):
29
29
lock_validity_period : float
30
30
use_database : bool
31
31
depth : int
32
+ batch_size : int | None
33
+ batch_sleep : int
32
34
33
35
34
36
def _cache_cleaner () -> None :
35
37
use_database = strtobool (os .environ .get ("USE_DATABASE" , "1" ))
38
+ batch_size = os .getenv ("CACHE_BATCH_SLEEP" )
36
39
cleaner_kwargs = CleanerKwargs (
37
40
maxsize = int (os .environ .get ("MAX_SIZE" , 1_000_000_000 )),
38
41
method = os .environ .get ("METHOD" , "LRU" ),
39
42
delete_unknown_files = not use_database ,
40
43
lock_validity_period = float (os .environ .get ("LOCK_VALIDITY_PERIOD" , 86400 )),
41
44
use_database = use_database ,
42
45
depth = int (os .getenv ("CACHE_DEPTH" , 2 )),
46
+ batch_size = batch_size if batch_size is None else int (batch_size ),
47
+ batch_sleep = int (os .getenv ("CACHE_BATCH_SLEEP" , 0 )),
43
48
)
44
49
for cache_files_urlpath in utils .parse_data_volumes_config ():
45
50
cacholote .config .set (cache_files_urlpath = cache_files_urlpath )
@@ -77,6 +82,12 @@ def _expire_cache_entries(
77
82
delete : Annotated [
78
83
bool , Option ("--delete" , help = "Delete entries to expire" )
79
84
] = False ,
85
+ batch_size : Annotated [
86
+ int | None , Option (help = "Group cache entries to expire into batches" )
87
+ ] = None ,
88
+ batch_sleep : Annotated [
89
+ int , Option (help = "Sleep duration after processing each batch" )
90
+ ] = 0 ,
80
91
) -> int :
81
92
"""Expire cache entries."""
82
93
if (all_collections and collection_id ) or not (all_collections or collection_id ):
@@ -89,6 +100,8 @@ def _expire_cache_entries(
89
100
before = _add_tzinfo (before ),
90
101
after = _add_tzinfo (after ),
91
102
delete = delete ,
103
+ batch_size = batch_size ,
104
+ batch_sleep = batch_sleep ,
92
105
)
93
106
typer .echo (f"Number of entries expired: { count } " )
94
107
return count
0 commit comments