1
Fork 0
mirror of https://github.com/RGBCube/GitHubWrapper synced 2025-05-20 07:55:09 +00:00

update cache to reflect objects easier

This commit is contained in:
NextChai 2022-04-30 01:54:17 -04:00
parent 8091515ee9
commit b551458c03
2 changed files with 51 additions and 72 deletions

View file

@ -2,91 +2,70 @@
from __future__ import annotations from __future__ import annotations
__all__ = ( from collections import deque
'UserCache', from collections.abc import MutableMapping
'RepoCache', from typing import Any, Deque, Tuple, TypeVar
'OrgCache',
__all__: Tuple[str, ...] = (
'ObjectCache',
) )
from collections import deque
from .objects import APIObject, User, Repository, Organization K = TypeVar('K')
V = TypeVar('V')
class _BaseCache(dict): class _BaseCache(MutableMapping[K, V]):
"""This is a rough implementation of an LRU Cache using a deque and a dict.""" """This is a rough implementation of an LRU Cache using a deque and a dict."""
_max_size: int
_lru_keys: deque
def __init__(self, max_size: int, *args):
self._max_size = max(min(max_size, 15), 0) # bounding max_size to 15 for now
self._lru_keys = deque(maxlen=self._max_size)
super().__init__(args)
def __getitem__(self, __k: str) -> APIObject: __slots__: Tuple[str, ...] = ('_max_size', '_lru_keys')
target = self._lru_keys.pop(self._lru_keys.index(__k))
def __init__(self, max_size: int, *args: Any) -> None:
self._max_size: int = max(min(max_size, 15), 0) # bounding max_size to 15 for now
self._lru_keys: Deque[K] = deque[K](maxlen=self._max_size)
super().__init__(*args)
def __getitem__(self, __k: K) -> V:
index = self._lru_keys.index(__k)
target = self._lru_keys[index]
del self._lru_keys[index]
self._lru_keys.appendleft(target) self._lru_keys.appendleft(target)
return super().__getitem__(__k) return super().__getitem__(__k)
def __setitem__(self, __k: str, __v: APIObject) -> None: def __setitem__(self, __k: K, __v: V) -> None:
if len(self) == self._max_size: if len(self) == self._max_size:
to_pop = self._lru_keys.pop(-1) self.__delitem__(self._lru_keys.pop())
del self[to_pop]
self._lru_keys.appendleft(__k) self._lru_keys.appendleft(__k)
return super().__setitem__(__k, __v) return super().__setitem__(__k, __v)
def update(self, *args, **kwargs) -> None: def update(self, **kwargs: Any) -> None:
for key, value in dict(*args, **kwargs).iteritems(): for key, value in dict(**kwargs).items():
self[key] = value key: K
value: V
self.__setitem__(key, value)
class UserCache(_BaseCache):
"""This adjusts the typehints to reflect User objects""" class ObjectCache(_BaseCache[K, V]):
def __getitem__(self, __k: str) -> 'User': """This adjusts the typehints to reflect Github objects."""
target = self._lru_keys.pop(self._lru_keys.index(__k)) def __getitem__(self, __k: K) -> V:
index = self._lru_keys.index(__k)
target = self._lru_keys[index]
self._lru_keys.appendleft(target) self._lru_keys.appendleft(target)
return super().__getitem__(__k) return super().__getitem__(__k)
def __setitem__(self, __k: str, __v: 'User') -> None: def __setitem__(self, __k: K, __v: V) -> None:
if len(self) == self._max_size: if self.__len__() == self._max_size:
to_pop = self._lru_keys.pop(-1) self.__delitem__(self._lru_keys.pop())
del self[to_pop]
self._lru_keys.appendleft(__k) self._lru_keys.appendleft(__k)
return super().__setitem__(__k, __v) return super().__setitem__(__k, __v)
def update(self, *args, **kwargs) -> None: def update(self, **kwargs: Any) -> None:
for key, value in dict(*args, **kwargs).iteritems(): for key, value in dict(**kwargs).items():
self[key] = value key: K
value: V
class RepoCache(_BaseCache):
"""This adjusts the typehints to reflect Repo objects""" self.__setitem__(key, value)
def __getitem__(self, __k: str) -> 'Repository':
target = self._lru_keys.pop(self._lru_keys.index(__k))
self._lru_keys.appendleft(target)
return super().__getitem__(__k)
def __setitem__(self, __k: str, __v: 'Repository') -> None:
if len(self) == self._max_size:
to_pop = self._lru_keys.pop(-1)
del self[to_pop]
self._lru_keys.appendleft(__k)
return super().__setitem__(__k, __v)
def update(self, *args, **kwargs) -> None:
for key, value in dict(*args, **kwargs).iteritems():
self[key] = value
class OrgCache(_BaseCache):
def __getitem__(self, __k: str) -> 'Organization':
target = self._lru_keys.pop(self._lru_keys.index(__k))
self._lru_keys.appendleft(target)
return super().__getitem__(__k)
def __setitem__(self, __k: str, __v: 'Organization') -> None:
if len(self) == self._max_size:
to_pop = self._lru_keys.pop(-1)
del self[to_pop]
self._lru_keys.appendleft(__k)
return super().__setitem__(__k, __v)
def update(self, *args, **kwargs) -> None:
for key, value in dict(*args, **kwargs).iteritems():
self[key] = value

View file

@ -8,12 +8,12 @@ __all__ = (
import asyncio import asyncio
import functools import functools
from typing import Union, List, Dict from typing import Any, Union, List, Dict
import aiohttp import aiohttp
from . import exceptions from . import exceptions
from .cache import RepoCache, UserCache from .cache import ObjectCache
from .http import http from .http import http
from .objects import Gist, Issue, Organization, Repository, User, File from .objects import Gist, Issue, Organization, Repository, User, File
@ -32,9 +32,9 @@ class GHClient:
): ):
"""The main client, used to start most use-cases.""" """The main client, used to start most use-cases."""
self._headers = custom_headers self._headers = custom_headers
bound = lambda hi, lo, value: max(min(value, hi), lo)
self._user_cache = UserCache(bound(50, 0, user_cache_size)) self._user_cache = ObjectCache[Any, User](user_cache_size)
self._repo_cache = RepoCache(bound(50, 0, repo_cache_size)) self._repo_cache = ObjectCache[Any, Repository](repo_cache_size)
if username and token: if username and token:
self.username = username self.username = username
self.token = token self.token = token