Import the async caches

This commit is contained in:
Reid 'arrdem' McKenzie 2022-11-21 00:04:31 -07:00
parent ea7acd915c
commit b2b7363797
10 changed files with 325 additions and 0 deletions

View file

@ -0,0 +1,3 @@
py_project(
name = "async_cache"
)

View file

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2020 Rajat Singh
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -0,0 +1,13 @@
# Async cache
An LRU and TTL cache for async functions in Python.
- `alru_cache` provides an LRU cache decorator with configurable size.
- `attl_cache` provides a TTL+LRU cache decorator with configurable size.
Neither cache proactively expires keys.
Maintenance occurs only when requesting keys out of the cache.
## License
Derived from https://github.com/iamsinghrajat/async-cache, published under the MIT license.

View file

@ -0,0 +1,4 @@
"""The interface to the package. Just re-exports implemented caches."""
from .lru import ALRU, alru_cache # noqa
from .ttl import ATTL, attl_cache # noqa

View file

@ -0,0 +1,23 @@
from typing import Any
class KEY:
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def __eq__(self, obj):
return hash(self) == hash(obj)
def __hash__(self):
def _hash(param: Any):
if isinstance(param, tuple):
return tuple(map(_hash, param))
if isinstance(param, dict):
return tuple(map(_hash, param.items()))
elif hasattr(param, "__dict__"):
return str(vars(param))
else:
return str(param)
return hash(_hash(self.args) + _hash(self.kwargs))

View file

@ -0,0 +1,44 @@
from collections import OrderedDict
from .key import KEY
class LRU(OrderedDict):
def __init__(self, maxsize, *args, **kwargs):
self.maxsize = maxsize
super().__init__(*args, **kwargs)
def __getitem__(self, key):
value = super().__getitem__(key)
self.move_to_end(key)
return value
def __setitem__(self, key, value):
super().__setitem__(key, value)
if self.maxsize and len(self) > self.maxsize:
oldest = next(iter(self))
del self[oldest]
class ALRU(object):
def __init__(self, maxsize=128):
"""
:param maxsize: Use maxsize as None for unlimited size cache
"""
self.lru = LRU(maxsize=maxsize)
def __call__(self, func):
async def wrapper(*args, **kwargs):
key = KEY(args, kwargs)
if key in self.lru:
return self.lru[key]
else:
self.lru[key] = await func(*args, **kwargs)
return self.lru[key]
wrapper.__name__ += func.__name__
return wrapper
alru_cache = ALRU

View file

@ -0,0 +1,77 @@
from typing import Union, Optional
import datetime
from .key import KEY
from .lru import LRU
class ATTL:
class _TTL(LRU):
def __init__(self, ttl: Optional[Union[datetime.timedelta, int, float]], maxsize: int):
super().__init__(maxsize=maxsize)
if isinstance(ttl, datetime.timedelta):
self.ttl = ttl
elif isinstance(ttl, (int, float)):
self.ttl = datetime.timedelta(seconds=ttl)
elif ttl is None:
self.ttl = None
else:
raise ValueError("TTL must be int or timedelta")
self.maxsize = maxsize
def __contains__(self, key):
if key not in self.keys():
return False
else:
key_expiration = super().__getitem__(key)[1]
if key_expiration and key_expiration < datetime.datetime.now():
del self[key]
return False
else:
return True
def __getitem__(self, key):
if key in self:
value = super().__getitem__(key)[0]
return value
raise KeyError
def __setitem__(self, key, value):
ttl_value = (
datetime.datetime.now() + self.ttl
) if self.ttl else None
super().__setitem__(key, (value, ttl_value))
def __init__(
self,
ttl: Optional[Union[datetime.timedelta, int]] = datetime.timedelta(seconds=60),
maxsize: int = 1024,
skip_args: int = 0
):
"""
:param ttl: Use ttl as None for non expiring cache
:param maxsize: Use maxsize as None for unlimited size cache
:param skip_args: Use `1` to skip first arg of func in determining cache key
"""
self.ttl = self._TTL(ttl=ttl, maxsize=maxsize)
self.skip_args = skip_args
def __call__(self, func):
async def wrapper(*args, **kwargs):
key = KEY(args[self.skip_args:], kwargs)
if key in self.ttl:
val = self.ttl[key]
else:
self.ttl[key] = await func(*args, **kwargs)
val = self.ttl[key]
return val
wrapper.__name__ += func.__name__
return wrapper
attl_cache = ATTL

View file

@ -0,0 +1 @@
#!/usr/bin/env python3

View file

@ -0,0 +1,82 @@
import asyncio
import time
from async_cache import ALRU, ATTL
@ALRU(maxsize=128)
async def func(wait: int):
await asyncio.sleep(wait)
class TestClassFunc:
@ALRU(maxsize=128)
async def obj_func(self, wait: int):
await asyncio.sleep(wait)
@staticmethod
@ATTL(maxsize=128, ttl=None, skip_args=1)
async def skip_arg_func(arg: int, wait: int):
await asyncio.sleep(wait)
@classmethod
@ALRU(maxsize=128)
async def class_func(cls, wait: int):
await asyncio.sleep(wait)
def test():
t1 = time.time()
asyncio.get_event_loop().run_until_complete(func(4))
t2 = time.time()
asyncio.get_event_loop().run_until_complete(func(4))
t3 = time.time()
t_first_exec = (t2 - t1) * 1000
t_second_exec = (t3 - t2) * 1000
print(t_first_exec)
print(t_second_exec)
assert t_first_exec > 4000
assert t_second_exec < 4000
def test_obj_fn():
t1 = time.time()
obj = TestClassFunc()
asyncio.get_event_loop().run_until_complete(obj.obj_func(4))
t2 = time.time()
asyncio.get_event_loop().run_until_complete(obj.obj_func(4))
t3 = time.time()
t_first_exec = (t2 - t1) * 1000
t_second_exec = (t3 - t2) * 1000
print(t_first_exec)
print(t_second_exec)
assert t_first_exec > 4000
assert t_second_exec < 4000
def test_class_fn():
t1 = time.time()
asyncio.get_event_loop().run_until_complete(TestClassFunc.class_func(4))
t2 = time.time()
asyncio.get_event_loop().run_until_complete(TestClassFunc.class_func(4))
t3 = time.time()
t_first_exec = (t2 - t1) * 1000
t_second_exec = (t3 - t2) * 1000
print(t_first_exec)
print(t_second_exec)
assert t_first_exec > 4000
assert t_second_exec < 4000
def test_skip_args():
t1 = time.time()
asyncio.get_event_loop().run_until_complete(TestClassFunc.skip_arg_func(5, 4))
t2 = time.time()
asyncio.get_event_loop().run_until_complete(TestClassFunc.skip_arg_func(6, 4))
t3 = time.time()
t_first_exec = (t2 - t1) * 1000
t_second_exec = (t3 - t2) * 1000
print(t_first_exec)
print(t_second_exec)
assert t_first_exec > 4000
assert t_second_exec < 4000

View file

@ -0,0 +1,57 @@
import asyncio
import time
from async_cache import ATTL
@ATTL(ttl=60)
async def long_expiration_fn(wait: int):
await asyncio.sleep(wait)
return wait
@ATTL(ttl=5)
async def short_expiration_fn(wait: int):
await asyncio.sleep(wait)
return wait
@ATTL(ttl=3)
async def short_cleanup_fn(wait: int):
await asyncio.sleep(wait)
return wait
def test_cache_hit():
t1 = time.time()
asyncio.get_event_loop().run_until_complete(long_expiration_fn(4))
t2 = time.time()
asyncio.get_event_loop().run_until_complete(long_expiration_fn(4))
t3 = time.time()
t_first_exec = (t2 - t1) * 1000
t_second_exec = (t3 - t2) * 1000
print(t_first_exec)
print(t_second_exec)
assert t_first_exec > 4000
assert t_second_exec < 4000
def test_cache_expiration():
t1 = time.time()
asyncio.get_event_loop().run_until_complete(short_expiration_fn(1))
t2 = time.time()
asyncio.get_event_loop().run_until_complete(short_expiration_fn(1))
t3 = time.time()
time.sleep(5)
t4 = time.time()
asyncio.get_event_loop().run_until_complete(short_expiration_fn(1))
t5 = time.time()
t_first_exec = (t2 - t1) * 1000
t_second_exec = (t3 - t2) * 1000
t_third_exec = (t5 - t4) * 1000
print(t_first_exec)
print(t_second_exec)
print(t_third_exec)
assert t_first_exec > 1000
assert t_second_exec < 1000
assert t_third_exec > 1000