From b2b7363797b0d904a9e3644e4049b01cad384386 Mon Sep 17 00:00:00 2001 From: Reid 'arrdem' McKenzie Date: Mon, 21 Nov 2022 00:04:31 -0700 Subject: [PATCH] Import the async caches --- projects/async_cache/BUILD | 3 + projects/async_cache/LICENSE | 21 +++++ projects/async_cache/README.md | 13 +++ .../src/python/async_cache/__init__.py | 4 + .../async_cache/src/python/async_cache/key.py | 23 ++++++ .../async_cache/src/python/async_cache/lru.py | 44 ++++++++++ .../async_cache/src/python/async_cache/ttl.py | 77 +++++++++++++++++ projects/async_cache/test/python/__init__.py | 1 + projects/async_cache/test/python/test_lru.py | 82 +++++++++++++++++++ projects/async_cache/test/python/test_ttl.py | 57 +++++++++++++ 10 files changed, 325 insertions(+) create mode 100644 projects/async_cache/BUILD create mode 100644 projects/async_cache/LICENSE create mode 100644 projects/async_cache/README.md create mode 100644 projects/async_cache/src/python/async_cache/__init__.py create mode 100644 projects/async_cache/src/python/async_cache/key.py create mode 100644 projects/async_cache/src/python/async_cache/lru.py create mode 100644 projects/async_cache/src/python/async_cache/ttl.py create mode 100644 projects/async_cache/test/python/__init__.py create mode 100644 projects/async_cache/test/python/test_lru.py create mode 100644 projects/async_cache/test/python/test_ttl.py diff --git a/projects/async_cache/BUILD b/projects/async_cache/BUILD new file mode 100644 index 0000000..c5bbcba --- /dev/null +++ b/projects/async_cache/BUILD @@ -0,0 +1,3 @@ +py_project( + name = "async_cache" +) diff --git a/projects/async_cache/LICENSE b/projects/async_cache/LICENSE new file mode 100644 index 0000000..8b969f9 --- /dev/null +++ b/projects/async_cache/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Rajat Singh + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/projects/async_cache/README.md b/projects/async_cache/README.md new file mode 100644 index 0000000..e3425ad --- /dev/null +++ b/projects/async_cache/README.md @@ -0,0 +1,13 @@ +# Async cache + +An LRU and TTL cache for async functions in Python. + +- `alru_cache` provides an LRU cache decorator with configurable size. +- `attl_cache` provides a TTL+LRU cache decorator with configurable size. + +Neither cache proactively expires keys. +Maintenance occurs only when requesting keys out of the cache. + +## License + +Derived from https://github.com/iamsinghrajat/async-cache, published under the MIT license. diff --git a/projects/async_cache/src/python/async_cache/__init__.py b/projects/async_cache/src/python/async_cache/__init__.py new file mode 100644 index 0000000..91575b2 --- /dev/null +++ b/projects/async_cache/src/python/async_cache/__init__.py @@ -0,0 +1,4 @@ +"""The interface to the package. Just re-exports implemented caches.""" + +from .lru import ALRU, alru_cache # noqa +from .ttl import ATTL, attl_cache # noqa diff --git a/projects/async_cache/src/python/async_cache/key.py b/projects/async_cache/src/python/async_cache/key.py new file mode 100644 index 0000000..7d36518 --- /dev/null +++ b/projects/async_cache/src/python/async_cache/key.py @@ -0,0 +1,23 @@ +from typing import Any + + +class KEY: + def __init__(self, *args, **kwargs): + self.args = args + self.kwargs = kwargs + + def __eq__(self, obj): + return hash(self) == hash(obj) + + def __hash__(self): + def _hash(param: Any): + if isinstance(param, tuple): + return tuple(map(_hash, param)) + if isinstance(param, dict): + return tuple(map(_hash, param.items())) + elif hasattr(param, "__dict__"): + return str(vars(param)) + else: + return str(param) + + return hash(_hash(self.args) + _hash(self.kwargs)) diff --git a/projects/async_cache/src/python/async_cache/lru.py b/projects/async_cache/src/python/async_cache/lru.py new file mode 100644 index 0000000..785d76d --- /dev/null +++ b/projects/async_cache/src/python/async_cache/lru.py @@ -0,0 +1,44 @@ +from collections import OrderedDict + +from .key import KEY + + +class LRU(OrderedDict): + def __init__(self, maxsize, *args, **kwargs): + self.maxsize = maxsize + super().__init__(*args, **kwargs) + + def __getitem__(self, key): + value = super().__getitem__(key) + self.move_to_end(key) + return value + + def __setitem__(self, key, value): + super().__setitem__(key, value) + if self.maxsize and len(self) > self.maxsize: + oldest = next(iter(self)) + del self[oldest] + + +class ALRU(object): + def __init__(self, maxsize=128): + """ + :param maxsize: Use maxsize as None for unlimited size cache + """ + self.lru = LRU(maxsize=maxsize) + + def __call__(self, func): + async def wrapper(*args, **kwargs): + key = KEY(args, kwargs) + if key in self.lru: + return self.lru[key] + else: + self.lru[key] = await func(*args, **kwargs) + return self.lru[key] + + wrapper.__name__ += func.__name__ + + return wrapper + + +alru_cache = ALRU diff --git a/projects/async_cache/src/python/async_cache/ttl.py b/projects/async_cache/src/python/async_cache/ttl.py new file mode 100644 index 0000000..c37c2e5 --- /dev/null +++ b/projects/async_cache/src/python/async_cache/ttl.py @@ -0,0 +1,77 @@ +from typing import Union, Optional +import datetime + +from .key import KEY +from .lru import LRU + + +class ATTL: + class _TTL(LRU): + def __init__(self, ttl: Optional[Union[datetime.timedelta, int, float]], maxsize: int): + super().__init__(maxsize=maxsize) + + if isinstance(ttl, datetime.timedelta): + self.ttl = ttl + elif isinstance(ttl, (int, float)): + self.ttl = datetime.timedelta(seconds=ttl) + elif ttl is None: + self.ttl = None + else: + raise ValueError("TTL must be int or timedelta") + + self.maxsize = maxsize + + def __contains__(self, key): + if key not in self.keys(): + return False + else: + key_expiration = super().__getitem__(key)[1] + if key_expiration and key_expiration < datetime.datetime.now(): + del self[key] + return False + else: + return True + + def __getitem__(self, key): + if key in self: + value = super().__getitem__(key)[0] + return value + raise KeyError + + def __setitem__(self, key, value): + ttl_value = ( + datetime.datetime.now() + self.ttl + ) if self.ttl else None + super().__setitem__(key, (value, ttl_value)) + + def __init__( + self, + ttl: Optional[Union[datetime.timedelta, int]] = datetime.timedelta(seconds=60), + maxsize: int = 1024, + skip_args: int = 0 + ): + """ + :param ttl: Use ttl as None for non expiring cache + :param maxsize: Use maxsize as None for unlimited size cache + :param skip_args: Use `1` to skip first arg of func in determining cache key + """ + self.ttl = self._TTL(ttl=ttl, maxsize=maxsize) + self.skip_args = skip_args + + def __call__(self, func): + async def wrapper(*args, **kwargs): + key = KEY(args[self.skip_args:], kwargs) + if key in self.ttl: + val = self.ttl[key] + else: + self.ttl[key] = await func(*args, **kwargs) + val = self.ttl[key] + + return val + + wrapper.__name__ += func.__name__ + + return wrapper + + +attl_cache = ATTL diff --git a/projects/async_cache/test/python/__init__.py b/projects/async_cache/test/python/__init__.py new file mode 100644 index 0000000..e5a0d9b --- /dev/null +++ b/projects/async_cache/test/python/__init__.py @@ -0,0 +1 @@ +#!/usr/bin/env python3 diff --git a/projects/async_cache/test/python/test_lru.py b/projects/async_cache/test/python/test_lru.py new file mode 100644 index 0000000..86bdbdd --- /dev/null +++ b/projects/async_cache/test/python/test_lru.py @@ -0,0 +1,82 @@ +import asyncio +import time + +from async_cache import ALRU, ATTL + + +@ALRU(maxsize=128) +async def func(wait: int): + await asyncio.sleep(wait) + + +class TestClassFunc: + @ALRU(maxsize=128) + async def obj_func(self, wait: int): + await asyncio.sleep(wait) + + @staticmethod + @ATTL(maxsize=128, ttl=None, skip_args=1) + async def skip_arg_func(arg: int, wait: int): + await asyncio.sleep(wait) + + @classmethod + @ALRU(maxsize=128) + async def class_func(cls, wait: int): + await asyncio.sleep(wait) + + +def test(): + t1 = time.time() + asyncio.get_event_loop().run_until_complete(func(4)) + t2 = time.time() + asyncio.get_event_loop().run_until_complete(func(4)) + t3 = time.time() + t_first_exec = (t2 - t1) * 1000 + t_second_exec = (t3 - t2) * 1000 + print(t_first_exec) + print(t_second_exec) + assert t_first_exec > 4000 + assert t_second_exec < 4000 + + +def test_obj_fn(): + t1 = time.time() + obj = TestClassFunc() + asyncio.get_event_loop().run_until_complete(obj.obj_func(4)) + t2 = time.time() + asyncio.get_event_loop().run_until_complete(obj.obj_func(4)) + t3 = time.time() + t_first_exec = (t2 - t1) * 1000 + t_second_exec = (t3 - t2) * 1000 + print(t_first_exec) + print(t_second_exec) + assert t_first_exec > 4000 + assert t_second_exec < 4000 + + +def test_class_fn(): + t1 = time.time() + asyncio.get_event_loop().run_until_complete(TestClassFunc.class_func(4)) + t2 = time.time() + asyncio.get_event_loop().run_until_complete(TestClassFunc.class_func(4)) + t3 = time.time() + t_first_exec = (t2 - t1) * 1000 + t_second_exec = (t3 - t2) * 1000 + print(t_first_exec) + print(t_second_exec) + assert t_first_exec > 4000 + assert t_second_exec < 4000 + + +def test_skip_args(): + t1 = time.time() + asyncio.get_event_loop().run_until_complete(TestClassFunc.skip_arg_func(5, 4)) + t2 = time.time() + asyncio.get_event_loop().run_until_complete(TestClassFunc.skip_arg_func(6, 4)) + t3 = time.time() + t_first_exec = (t2 - t1) * 1000 + t_second_exec = (t3 - t2) * 1000 + print(t_first_exec) + print(t_second_exec) + assert t_first_exec > 4000 + assert t_second_exec < 4000 diff --git a/projects/async_cache/test/python/test_ttl.py b/projects/async_cache/test/python/test_ttl.py new file mode 100644 index 0000000..d333d78 --- /dev/null +++ b/projects/async_cache/test/python/test_ttl.py @@ -0,0 +1,57 @@ +import asyncio +import time + +from async_cache import ATTL + + +@ATTL(ttl=60) +async def long_expiration_fn(wait: int): + await asyncio.sleep(wait) + return wait + + +@ATTL(ttl=5) +async def short_expiration_fn(wait: int): + await asyncio.sleep(wait) + return wait + + +@ATTL(ttl=3) +async def short_cleanup_fn(wait: int): + await asyncio.sleep(wait) + return wait + + +def test_cache_hit(): + t1 = time.time() + asyncio.get_event_loop().run_until_complete(long_expiration_fn(4)) + t2 = time.time() + asyncio.get_event_loop().run_until_complete(long_expiration_fn(4)) + t3 = time.time() + t_first_exec = (t2 - t1) * 1000 + t_second_exec = (t3 - t2) * 1000 + print(t_first_exec) + print(t_second_exec) + assert t_first_exec > 4000 + assert t_second_exec < 4000 + + +def test_cache_expiration(): + t1 = time.time() + asyncio.get_event_loop().run_until_complete(short_expiration_fn(1)) + t2 = time.time() + asyncio.get_event_loop().run_until_complete(short_expiration_fn(1)) + t3 = time.time() + time.sleep(5) + t4 = time.time() + asyncio.get_event_loop().run_until_complete(short_expiration_fn(1)) + t5 = time.time() + t_first_exec = (t2 - t1) * 1000 + t_second_exec = (t3 - t2) * 1000 + t_third_exec = (t5 - t4) * 1000 + print(t_first_exec) + print(t_second_exec) + print(t_third_exec) + assert t_first_exec > 1000 + assert t_second_exec < 1000 + assert t_third_exec > 1000