Skip to content

Commit 4757af2

Browse files
Add a bound to the inference tips cache
Small bounds still yield about equal hits and misses. Further work could determine if storing only the last result is optimal.
1 parent c807c03 commit 4757af2

File tree

1 file changed

+9
-3
lines changed

1 file changed

+9
-3
lines changed

astroid/inference_tip.py

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66

77
from __future__ import annotations
88

9+
from collections import OrderedDict
910
from collections.abc import Generator
1011
from typing import Any, TypeVar
1112

@@ -18,9 +19,9 @@
1819
TransformFn,
1920
)
2021

21-
_cache: dict[
22+
_cache: OrderedDict[
2223
tuple[InferFn[Any], NodeNG, InferenceContext | None], list[InferenceResult]
23-
] = {}
24+
] = OrderedDict()
2425

2526
_CURRENTLY_INFERRING: set[tuple[InferFn[Any], NodeNG]] = set()
2627

@@ -61,14 +62,19 @@ def inner(
6162
_CURRENTLY_INFERRING.add(partial_cache_key)
6263
try:
6364
# May raise UseInferenceDefault
64-
result = _cache[func, node, context] = list(func(node, context, **kwargs))
65+
result = _cache[func, node, context] = list(
66+
func(node, context, **kwargs)
67+
)
6568
finally:
6669
# Remove recursion guard.
6770
try:
6871
_CURRENTLY_INFERRING.remove(partial_cache_key)
6972
except KeyError:
7073
pass # Recursion may beat us to the punch.
7174

75+
if len(_cache) > 64:
76+
_cache.popitem(last=False)
77+
7278
yield from result
7379

7480
return inner

0 commit comments

Comments
 (0)