Fixed flush protection for instances, memory is successfully made available on an idmapper flush now.

This commit is contained in:
Griatch 2014-05-17 14:38:43 +02:00
parent 4b8ed234fd
commit 58af67bdf2
6 changed files with 47 additions and 35 deletions

View file

@ -14,6 +14,7 @@ See CmdHandler for practical examples on how to apply cmdsets
together to create interesting in-game effects.
"""
from weakref import WeakKeyDictionary
from django.utils.translation import ugettext as _
from src.utils.utils import inherits_from, is_iter
__all__ = ("CmdSet",)
@ -159,7 +160,7 @@ class CmdSet(object):
# initialize system
self.at_cmdset_creation()
self._contains_cache = {}
self._contains_cache = WeakKeyDictionary()#{}
# Priority-sensitive merge operations for cmdsets

View file

@ -686,6 +686,7 @@ class CmdServerLoad(MuxCommand):
if "mem" in self.switches:
caller.msg("Memory usage: RMEM: {w%g{n MB (%g%%), VMEM (res+swap+cache): {w%g{n MB." % (rmem, pmem, vmem))
return
if "flushmem" in self.switches:
caller.msg("Flushed object idmapper cache. Python garbage collector recovered memory from %i objects." % _idmapper.flush_cache())
return
@ -712,29 +713,26 @@ class CmdServerLoad(MuxCommand):
# because it lacks sys.getsizeof
# object cache size
cachedict = _idmapper.cache_size()
totcache = cachedict["_total"]
total_num, total_size, cachedict = _idmapper.cache_size()
sorted_cache = sorted([(key, tup[0], tup[1]) for key, tup in cachedict.items() if key !="_total" and tup[0] > 0],
key=lambda tup: tup[2], reverse=True)
memtable = prettytable.PrettyTable(["entity name",
"number",
"cache (MB)",
"idmapper %%"])
memtable.align = 'l'
for tup in sorted_cache:
memtable.add_row([tup[0],
"%i" % tup[1],
"%5.2f" % tup[2],
"%.2f" % (float(tup[2] / totcache[1]) * 100)])
"%.2f" % (float(tup[1] / 1.0*total_num) * 100)])
# get sizes of other caches
attr_cache_info, prop_cache_info = get_cache_sizes()
string += "\n{w Entity idmapper cache usage:{n %5.2f MB (%i items)\n%s" % (totcache[1], totcache[0], memtable)
#attr_cache_info, prop_cache_info = get_cache_sizes()
string += "\n{w Entity idmapper cache:{n %i items\n%s" % (total_num, memtable)
#string += "\n{w On-entity Attribute cache usage:{n %5.2f MB (%i attrs)" % (attr_cache_info[1], attr_cache_info[0])
#string += "\n{w On-entity Property cache usage:{n %5.2f MB (%i props)" % (prop_cache_info[1], prop_cache_info[0])
base_mem = vmem - totcache[1] - attr_cache_info[1] - prop_cache_info[1]
#base_mem = vmem - total_size - attr_cache_info[1] - prop_cache_info[1]
#string += "\n{w Base Server usage (virtmem-idmapper-attrcache-propcache):{n %5.2f MB" % base_mem
string += "\n{w Base Server usage (virtmem - cache):{n %5.2f MB" % base_mem
#string += "\n{w Base Server usage (virtmem - cache):{n %5.2f MB" % base_mem
caller.msg(string)

View file

@ -1088,7 +1088,7 @@ class Exit(Object):
if self.ndb.exit_reset or not self.cmdset.has_cmdset("_exitset", must_be_default=True):
# we are resetting, or no exit-cmdset was set. Create one dynamically.
self.cmdset.add_default(self.create_exit_cmdset(self.dbobj), permanent=False)
self.ndb.exit_reset = False
del self.ndb.exit_reset
# this and other hooks are what usually can be modified safely.

View file

@ -467,6 +467,7 @@ class NAttributeHandler(object):
def add(self, key, value):
"Add new key and value"
self._store[key] = value
print "set_recache_protection:", self.obj.key, key
self.obj.set_recache_protection()
def remove(self, key):

View file

@ -113,7 +113,7 @@ def c_digs(client):
exitname1 = EXIT_TEMPLATE % client.counter()
exitname2 = EXIT_TEMPLATE % client.counter()
client.exits.extend([exitname1, exitname2])
cmd = '@dig %s = %s, %s' % (roomname, exitname1, exitname2)
cmd = '@dig/tel %s = %s, %s' % (roomname, exitname1, exitname2)
return cmd, "digs ..."
def c_creates_obj(client):
@ -199,11 +199,17 @@ def c_moves(client):
# (0.1, c_help),
# (0.4, c_moves))
## "socializing heavy builder" definition
#ACTIONS = (c_login,
# c_logout,
# (0.1, c_socialize),
# (0.1, c_looks),
# (0.2, c_help),
# (0.1, c_creates_obj),
# (0.2, c_digs),
# (0.3, c_moves))
## "heavy digger memory tester" definition
ACTIONS = (c_login,
c_logout,
#(0.1, c_socialize),
(0.1, c_looks),
(0.2, c_help),
(0.2, c_creates_obj),
(0.2, c_digs),
(0.3, c_moves))
(0.1, c_creates_obj),
(0.8, c_digs))

View file

@ -177,6 +177,10 @@ class SharedMemoryModel(Model):
class Meta:
abstract = True
#def __init__(cls, *args, **kwargs):
# super(SharedMemoryModel, cls).__init__(*args, **kwargs)
# cls._idmapper_recache_protection = False
def _get_cache_key(cls, args, kwargs):
"""
This method is used by the caching subsystem to infer the PK value from the constructor arguments.
@ -238,11 +242,6 @@ class SharedMemoryModel(Model):
pass
_flush_cached_by_key = classmethod(_flush_cached_by_key)
def set_recache_protection(cls, mode=True):
"set if this instance should be allowed to be recached."
cls._idmapper_recache_protection = bool(mode)
set_recache_protection = classmethod(set_recache_protection)
def flush_cached_instance(cls, instance, force=True):
"""
Method to flush an instance from the cache. The instance will
@ -254,6 +253,12 @@ class SharedMemoryModel(Model):
cls._flush_cached_by_key(instance._get_pk_val(), force=force)
flush_cached_instance = classmethod(flush_cached_instance)
# per-instance methods
def set_recache_protection(cls, mode=True):
"set if this instance should be allowed to be recached."
cls._idmapper_recache_protection = bool(mode)
def flush_instance_cache(cls, force=False):
"""
This will clean safe objects from the cache. Use force
@ -352,29 +357,30 @@ post_save.connect(update_cached_instance)
def cache_size(mb=True):
"""
Returns a dictionary with estimates of the
cache size of each subclass.
Calculate statistics about the cache
mb - return the result in MB.
Returns
total_num, total_size, {objclass:(total_num, total_size)}
"""
import sys
sizedict = {"_total": [0, 0]}
totals = [0, 0] # totalnum, totalsize
sizedict = {}
def getsize(model):
instances = model.get_all_cached_instances()
linst = len(instances)
size = sum([sys.getsizeof(o) for o in instances])
size = (mb and size/1024.0) or size
return (linst, size)
num_inst = len(instances)
size = sum(sys.getsizeof(o) for o in instances)
size = size / 1000.0 if mb else size
return num_inst, size
def get_recurse(submodels):
for submodel in submodels:
subclasses = submodel.__subclasses__()
if not subclasses:
tup = getsize(submodel)
sizedict["_total"][0] += tup[0]
sizedict["_total"][1] += tup[1]
sizedict[submodel.__name__] = tup
num_inst, size = getsize(submodel)
totals[0] += num_inst
totals[1] += size
sizedict[submodel.__name__] = (num_inst, size)
else:
get_recurse(subclasses)
get_recurse(SharedMemoryModel.__subclasses__())
sizedict["_total"] = tuple(sizedict["_total"])
return sizedict
return totals[0], totals[1], sizedict