Skip to content
This repository was archived by the owner on Apr 26, 2024. It is now read-only.

Commit c9dffd5

Browse files
FizzadarDavid Robertson
andauthored
Remove unused @lru_cache decorator (#13595)
* Remove unused `@lru_cache` decorator Spotted this working on something else. Co-authored-by: David Robertson <[email protected]>
1 parent d125919 commit c9dffd5

File tree

3 files changed

+5
-140
lines changed

3 files changed

+5
-140
lines changed

changelog.d/13595.misc

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
Remove unused `@lru_cache` decorator.

synapse/util/caches/descriptors.py

Lines changed: 0 additions & 104 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@
1212
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1313
# See the License for the specific language governing permissions and
1414
# limitations under the License.
15-
import enum
1615
import functools
1716
import inspect
1817
import logging
@@ -146,109 +145,6 @@ def __init__(
146145
)
147146

148147

149-
class _LruCachedFunction(Generic[F]):
150-
cache: LruCache[CacheKey, Any]
151-
__call__: F
152-
153-
154-
def lru_cache(
155-
*, max_entries: int = 1000, cache_context: bool = False
156-
) -> Callable[[F], _LruCachedFunction[F]]:
157-
"""A method decorator that applies a memoizing cache around the function.
158-
159-
This is more-or-less a drop-in equivalent to functools.lru_cache, although note
160-
that the signature is slightly different.
161-
162-
The main differences with functools.lru_cache are:
163-
(a) the size of the cache can be controlled via the cache_factor mechanism
164-
(b) the wrapped function can request a "cache_context" which provides a
165-
callback mechanism to indicate that the result is no longer valid
166-
(c) prometheus metrics are exposed automatically.
167-
168-
The function should take zero or more arguments, which are used as the key for the
169-
cache. Single-argument functions use that argument as the cache key; otherwise the
170-
arguments are built into a tuple.
171-
172-
Cached functions can be "chained" (i.e. a cached function can call other cached
173-
functions and get appropriately invalidated when they called caches are
174-
invalidated) by adding a special "cache_context" argument to the function
175-
and passing that as a kwarg to all caches called. For example:
176-
177-
@lru_cache(cache_context=True)
178-
def foo(self, key, cache_context):
179-
r1 = self.bar1(key, on_invalidate=cache_context.invalidate)
180-
r2 = self.bar2(key, on_invalidate=cache_context.invalidate)
181-
return r1 + r2
182-
183-
The wrapped function also has a 'cache' property which offers direct access to the
184-
underlying LruCache.
185-
"""
186-
187-
def func(orig: F) -> _LruCachedFunction[F]:
188-
desc = LruCacheDescriptor(
189-
orig,
190-
max_entries=max_entries,
191-
cache_context=cache_context,
192-
)
193-
return cast(_LruCachedFunction[F], desc)
194-
195-
return func
196-
197-
198-
class LruCacheDescriptor(_CacheDescriptorBase):
199-
"""Helper for @lru_cache"""
200-
201-
class _Sentinel(enum.Enum):
202-
sentinel = object()
203-
204-
def __init__(
205-
self,
206-
orig: Callable[..., Any],
207-
max_entries: int = 1000,
208-
cache_context: bool = False,
209-
):
210-
super().__init__(
211-
orig, num_args=None, uncached_args=None, cache_context=cache_context
212-
)
213-
self.max_entries = max_entries
214-
215-
def __get__(self, obj: Optional[Any], owner: Optional[Type]) -> Callable[..., Any]:
216-
cache: LruCache[CacheKey, Any] = LruCache(
217-
cache_name=self.name,
218-
max_size=self.max_entries,
219-
)
220-
221-
get_cache_key = self.cache_key_builder
222-
sentinel = LruCacheDescriptor._Sentinel.sentinel
223-
224-
@functools.wraps(self.orig)
225-
def _wrapped(*args: Any, **kwargs: Any) -> Any:
226-
invalidate_callback = kwargs.pop("on_invalidate", None)
227-
callbacks = (invalidate_callback,) if invalidate_callback else ()
228-
229-
cache_key = get_cache_key(args, kwargs)
230-
231-
ret = cache.get(cache_key, default=sentinel, callbacks=callbacks)
232-
if ret != sentinel:
233-
return ret
234-
235-
# Add our own `cache_context` to argument list if the wrapped function
236-
# has asked for one
237-
if self.add_cache_context:
238-
kwargs["cache_context"] = _CacheContext.get_instance(cache, cache_key)
239-
240-
ret2 = self.orig(obj, *args, **kwargs)
241-
cache.set(cache_key, ret2, callbacks=callbacks)
242-
243-
return ret2
244-
245-
wrapped = cast(CachedFunction, _wrapped)
246-
wrapped.cache = cache
247-
obj.__dict__[self.name] = wrapped
248-
249-
return wrapped
250-
251-
252148
class DeferredCacheDescriptor(_CacheDescriptorBase):
253149
"""A method decorator that applies a memoizing cache around the function.
254150

tests/util/caches/test_descriptors.py

Lines changed: 4 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -28,46 +28,14 @@
2828
make_deferred_yieldable,
2929
)
3030
from synapse.util.caches import descriptors
31-
from synapse.util.caches.descriptors import cached, cachedList, lru_cache
31+
from synapse.util.caches.descriptors import cached, cachedList
3232

3333
from tests import unittest
3434
from tests.test_utils import get_awaitable_result
3535

3636
logger = logging.getLogger(__name__)
3737

3838

39-
class LruCacheDecoratorTestCase(unittest.TestCase):
40-
def test_base(self):
41-
class Cls:
42-
def __init__(self):
43-
self.mock = mock.Mock()
44-
45-
@lru_cache()
46-
def fn(self, arg1, arg2):
47-
return self.mock(arg1, arg2)
48-
49-
obj = Cls()
50-
obj.mock.return_value = "fish"
51-
r = obj.fn(1, 2)
52-
self.assertEqual(r, "fish")
53-
obj.mock.assert_called_once_with(1, 2)
54-
obj.mock.reset_mock()
55-
56-
# a call with different params should call the mock again
57-
obj.mock.return_value = "chips"
58-
r = obj.fn(1, 3)
59-
self.assertEqual(r, "chips")
60-
obj.mock.assert_called_once_with(1, 3)
61-
obj.mock.reset_mock()
62-
63-
# the two values should now be cached
64-
r = obj.fn(1, 2)
65-
self.assertEqual(r, "fish")
66-
r = obj.fn(1, 3)
67-
self.assertEqual(r, "chips")
68-
obj.mock.assert_not_called()
69-
70-
7139
def run_on_reactor():
7240
d = defer.Deferred()
7341
reactor.callLater(0, d.callback, 0)
@@ -478,10 +446,10 @@ async def func1(self, key, cache_context):
478446

479447
@cached(cache_context=True)
480448
async def func2(self, key, cache_context):
481-
return self.func3(key, on_invalidate=cache_context.invalidate)
449+
return await self.func3(key, on_invalidate=cache_context.invalidate)
482450

483-
@lru_cache(cache_context=True)
484-
def func3(self, key, cache_context):
451+
@cached(cache_context=True)
452+
async def func3(self, key, cache_context):
485453
self.invalidate = cache_context.invalidate
486454
return 42
487455

0 commit comments

Comments
 (0)