|
12 | 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 | 13 | # See the License for the specific language governing permissions and
|
14 | 14 | # limitations under the License.
|
15 |
| -import enum |
16 | 15 | import functools
|
17 | 16 | import inspect
|
18 | 17 | import logging
|
@@ -146,109 +145,6 @@ def __init__(
|
146 | 145 | )
|
147 | 146 |
|
148 | 147 |
|
149 |
| -class _LruCachedFunction(Generic[F]): |
150 |
| - cache: LruCache[CacheKey, Any] |
151 |
| - __call__: F |
152 |
| - |
153 |
| - |
154 |
| -def lru_cache( |
155 |
| - *, max_entries: int = 1000, cache_context: bool = False |
156 |
| -) -> Callable[[F], _LruCachedFunction[F]]: |
157 |
| - """A method decorator that applies a memoizing cache around the function. |
158 |
| -
|
159 |
| - This is more-or-less a drop-in equivalent to functools.lru_cache, although note |
160 |
| - that the signature is slightly different. |
161 |
| -
|
162 |
| - The main differences with functools.lru_cache are: |
163 |
| - (a) the size of the cache can be controlled via the cache_factor mechanism |
164 |
| - (b) the wrapped function can request a "cache_context" which provides a |
165 |
| - callback mechanism to indicate that the result is no longer valid |
166 |
| - (c) prometheus metrics are exposed automatically. |
167 |
| -
|
168 |
| - The function should take zero or more arguments, which are used as the key for the |
169 |
| - cache. Single-argument functions use that argument as the cache key; otherwise the |
170 |
| - arguments are built into a tuple. |
171 |
| -
|
172 |
| - Cached functions can be "chained" (i.e. a cached function can call other cached |
173 |
| - functions and get appropriately invalidated when they called caches are |
174 |
| - invalidated) by adding a special "cache_context" argument to the function |
175 |
| - and passing that as a kwarg to all caches called. For example: |
176 |
| -
|
177 |
| - @lru_cache(cache_context=True) |
178 |
| - def foo(self, key, cache_context): |
179 |
| - r1 = self.bar1(key, on_invalidate=cache_context.invalidate) |
180 |
| - r2 = self.bar2(key, on_invalidate=cache_context.invalidate) |
181 |
| - return r1 + r2 |
182 |
| -
|
183 |
| - The wrapped function also has a 'cache' property which offers direct access to the |
184 |
| - underlying LruCache. |
185 |
| - """ |
186 |
| - |
187 |
| - def func(orig: F) -> _LruCachedFunction[F]: |
188 |
| - desc = LruCacheDescriptor( |
189 |
| - orig, |
190 |
| - max_entries=max_entries, |
191 |
| - cache_context=cache_context, |
192 |
| - ) |
193 |
| - return cast(_LruCachedFunction[F], desc) |
194 |
| - |
195 |
| - return func |
196 |
| - |
197 |
| - |
198 |
| -class LruCacheDescriptor(_CacheDescriptorBase): |
199 |
| - """Helper for @lru_cache""" |
200 |
| - |
201 |
| - class _Sentinel(enum.Enum): |
202 |
| - sentinel = object() |
203 |
| - |
204 |
| - def __init__( |
205 |
| - self, |
206 |
| - orig: Callable[..., Any], |
207 |
| - max_entries: int = 1000, |
208 |
| - cache_context: bool = False, |
209 |
| - ): |
210 |
| - super().__init__( |
211 |
| - orig, num_args=None, uncached_args=None, cache_context=cache_context |
212 |
| - ) |
213 |
| - self.max_entries = max_entries |
214 |
| - |
215 |
| - def __get__(self, obj: Optional[Any], owner: Optional[Type]) -> Callable[..., Any]: |
216 |
| - cache: LruCache[CacheKey, Any] = LruCache( |
217 |
| - cache_name=self.name, |
218 |
| - max_size=self.max_entries, |
219 |
| - ) |
220 |
| - |
221 |
| - get_cache_key = self.cache_key_builder |
222 |
| - sentinel = LruCacheDescriptor._Sentinel.sentinel |
223 |
| - |
224 |
| - @functools.wraps(self.orig) |
225 |
| - def _wrapped(*args: Any, **kwargs: Any) -> Any: |
226 |
| - invalidate_callback = kwargs.pop("on_invalidate", None) |
227 |
| - callbacks = (invalidate_callback,) if invalidate_callback else () |
228 |
| - |
229 |
| - cache_key = get_cache_key(args, kwargs) |
230 |
| - |
231 |
| - ret = cache.get(cache_key, default=sentinel, callbacks=callbacks) |
232 |
| - if ret != sentinel: |
233 |
| - return ret |
234 |
| - |
235 |
| - # Add our own `cache_context` to argument list if the wrapped function |
236 |
| - # has asked for one |
237 |
| - if self.add_cache_context: |
238 |
| - kwargs["cache_context"] = _CacheContext.get_instance(cache, cache_key) |
239 |
| - |
240 |
| - ret2 = self.orig(obj, *args, **kwargs) |
241 |
| - cache.set(cache_key, ret2, callbacks=callbacks) |
242 |
| - |
243 |
| - return ret2 |
244 |
| - |
245 |
| - wrapped = cast(CachedFunction, _wrapped) |
246 |
| - wrapped.cache = cache |
247 |
| - obj.__dict__[self.name] = wrapped |
248 |
| - |
249 |
| - return wrapped |
250 |
| - |
251 |
| - |
252 | 148 | class DeferredCacheDescriptor(_CacheDescriptorBase):
|
253 | 149 | """A method decorator that applies a memoizing cache around the function.
|
254 | 150 |
|
|
0 commit comments