Reimplements Throttle as a standalone class with improved memory management.

This commit is contained in:
Johnny 2018-09-21 00:11:15 +00:00
parent bde8281c1e
commit 0f3e0fadf7
2 changed files with 138 additions and 0 deletions

View file

@ -26,6 +26,9 @@ except ImportError:
from django.test.runner import DiscoverRunner
from evennia.server.throttle import Throttle
from evennia.utils.test_resources import EvenniaTest
from .deprecations import check_errors
@ -77,3 +80,40 @@ class TestDeprecations(TestCase):
self.assertRaises(DeprecationWarning, check_errors, MockSettings(setting))
# test check for WEBSERVER_PORTS having correct value
self.assertRaises(DeprecationWarning, check_errors, MockSettings("WEBSERVER_PORTS", value=["not a tuple"]))
class ThrottleTest(EvenniaTest):
"""
Class for testing the connection/IP throttle.
"""
def test_throttle(self):
ips = ('94.100.176.153', '45.56.148.77', '5.196.1.129')
kwargs = {
'maxlim': 5,
'timeout': 5 * 60
}
for ip in ips:
# Throttle should not be engaged by default
self.assertFalse(Throttle.check(ip, **kwargs))
# Pretend to fail a bunch of events
for x in xrange(5):
obj = Throttle.update(ip)
self.assertFalse(obj)
# Next ones should be blocked
self.assertTrue(Throttle.check(ip, **kwargs))
for x in xrange(Throttle.cache_size * 2):
obj = Throttle.update(ip)
self.assertFalse(obj)
# Should still be blocked
self.assertTrue(Throttle.check(ip, **kwargs))
# Number of values should be limited by cache size
self.assertEqual(Throttle.cache_size, len(Throttle.get(ip)))
# There should only be (cache_size * num_ips) total in the Throttle cache
cache = Throttle.get()
self.assertEqual(sum([len(cache[x]) for x in cache.keys()]), Throttle.cache_size * len(ips))

View file

@ -0,0 +1,98 @@
from collections import defaultdict, deque
import time
_LATEST_FAILURES = defaultdict(deque)
class Throttle(object):
"""
Keeps a running count of failed actions per IP address.
Available methods indicate whether or not the number of failures exceeds a
particular threshold.
This version of the throttle is usable by both the terminal server as well
as the web server, imposes limits on memory consumption by using deques
with length limits instead of open-ended lists, and removes sparse keys when
no recent failures have been recorded.
"""
error_msg = 'Too many failed attempts; you must wait a few minutes before trying again.'
cache_size = 20
@classmethod
def get(cls, ip=None, storage=_LATEST_FAILURES):
"""
Convenience function that appends a new event to the table.
Args:
ip (str, optional): IP address of requestor
Returns:
storage (dict): When no IP is provided, returns a dict of all
current IPs being tracked and the timestamps of their recent
failures.
timestamps (deque): When an IP is provided, returns a deque of
timestamps of recent failures only for that IP.
"""
if ip: return storage.get(ip, deque(maxlen=cls.cache_size))
return storage
@classmethod
def update(cls, ip):
"""
Convenience function that appends a new event to the table.
Args:
ip (str): IP address of requestor
Returns:
throttled (False): Always returns False
"""
return cls.check(ip)
@classmethod
def check(cls, ip, maxlim=None, timeout=None, storage=_LATEST_FAILURES):
"""
This will check the session's address against the
_LATEST_FAILURES dictionary to check they haven't
spammed too many fails recently.
Args:
ip (str): IP address of requestor
maxlim (int): max number of attempts to allow
timeout (int): number of timeout seconds after
max number of tries has been reached.
Returns:
throttled (bool): True if throttling is active,
False otherwise.
Notes:
If maxlim and/or timeout are set, the function will
just do the comparison, not append a new datapoint.
"""
now = time.time()
ip = str(ip)
if maxlim and timeout:
# checking mode
latest_fails = storage[ip]
if latest_fails and len(latest_fails) >= maxlim:
# too many fails recently
if now - latest_fails[-1] < timeout:
# too soon - timeout in play
return True
else:
# timeout has passed. clear faillist
del(storage[ip])
return False
else:
return False
else:
# store the time of the latest fail
if ip not in storage or not storage[ip].maxlen:
storage[ip] = deque(maxlen=cls.cache_size)
storage[ip].append(time.time())
return False