Updated to a supported idmapper version. Added a method for calculating the cache usage of the idmapper, and tied it to the @system command.

This commit is contained in:
Griatch 2012-04-30 00:51:36 +02:00
parent 571c7a3cab
commit e82515f8cb
5 changed files with 138 additions and 60 deletions

View file

@ -16,6 +16,10 @@ from src.players.models import PlayerDB
from src.utils import logger, utils, gametime, create
from src.commands.default.muxcommand import MuxCommand
_resource = None
_idmapper = None
# limit symbol import for API
__all__ = ("CmdReload", "CmdReset", "CmdShutdown", "CmdPy",
"CmdScripts", "CmdObjects", "CmdService", "CmdVersion",
@ -540,7 +544,8 @@ class CmdServerLoad(MuxCommand):
Show server load statistics in a table.
"""
key = "@serverload"
key = "@server"
aliases = ["@serverload", "@serverprocess"]
locks = "cmd:perm(list) or perm(Immortals)"
help_category = "System"
@ -554,9 +559,15 @@ class CmdServerLoad(MuxCommand):
if not utils.host_os_is('posix'):
string = "Process listings are only available under Linux/Unix."
else:
global _resource, _idmapper
if not _resource:
import resource as _resource
if not _idmapper:
from src.utils.idmapper import base as _idmapper
import resource
loadavg = os.getloadavg()
psize = resource.getpagesize()
psize = _resource.getpagesize()
pid = os.getpid()
rmem = float(os.popen('ps -p %d -o %s | tail -1' % (pid, "rss")).read()) / 1024.0
vmem = float(os.popen('ps -p %d -o %s | tail -1' % (pid, "vsz")).read()) / 1024.0
@ -579,8 +590,8 @@ class CmdServerLoad(MuxCommand):
"%s (%gs)" % (utils.time_format(rusage.ru_utime), rusage.ru_utime),
#"%10d shared" % rusage.ru_ixrss,
#"%10d pages" % rusage.ru_maxrss,
"%10d Mb" % rmem,
"%10d Mb" % vmem,
"%10.2f MB" % rmem,
"%10.2f MB" % vmem,
"%10d hard" % rusage.ru_majflt,
"%10d reads" % rusage.ru_inblock,
"%10d in" % rusage.ru_msgrcv,
@ -611,6 +622,18 @@ class CmdServerLoad(MuxCommand):
for row in ftable:
string += "\n " + "{w%s{n" % row[0] + "".join(row[1:])
# cache size
cachedict = _idmapper.cache_size()
totcache = cachedict["_total"]
string += "\n{w Object cache usage: %5.2f MB (%i items){n" % (totcache[1], totcache[0])
sorted_cache = sorted([(key, tup[0], tup[1]) for key, tup in cachedict.items() if key !="_total" and tup[0] > 0],
key=lambda tup: tup[2], reverse=True)
table = [[tup[0] for tup in sorted_cache],
["%5.2f MB" % tup[2] for tup in sorted_cache],
["%i item(s)" % tup[1] for tup in sorted_cache]]
ftable = utils.format_table(table, 5)
for row in ftable:
string += "\n " + row[0] + row[1] + row[2]
caller.msg(string)
# class CmdPs(MuxCommand):

View file

@ -1,2 +0,0 @@
include setup.py README.rst LICENSE MANIFEST.in
global-exclude *~

12
src/utils/idmapper/README.rst Normal file → Executable file
View file

@ -1,3 +1,13 @@
This fork of django-idmapper fixes some bugs that prevented the idmapper from
being used in many instances. In particular, the caching manager is now inherited
by SharedMemoryManager subclasses, and it is used when Django uses an automatic
manager (see http://docs.djangoproject.com/en/dev/topics/db/managers/#controlling-automatic-manager-types). This means access through foreign keys now uses
identity mapping.
Tested with Django version 1.2 alpha 1 SVN-12375.
My modifications are usually accompanied by comments marked with "CL:".
Django Identity Mapper
======================
@ -17,7 +27,7 @@ For example, if you want to simply mark all of your models as a SharedMemoryMode
class MyModel(models.SharedMemoryModel):
name = models.CharField(...)
Because the system is isolated, you may mix and match SharedMemoryModel's with regular Model's.
Because the system is isolated, you may mix and match SharedMemoryModels with regular Models. The module idmapper.models imports everything from django.db.models and only adds SharedMemoryModel, so you can simply replace your import of models from django.db.
::
from idmapper import models

View file

@ -1,29 +1,24 @@
"""
This is mostly unmodified from the original idmapper.
Django ID mapper
Evennia changes:
The cache mechanism was changed from a WeakValueDictionary to a
normal dictionary. The old way caused very hard-to-diagnose bugs
over long periods of time (which Evennia requires)
added save() overloading mechanism to update cache
added get_all_cached_instances() for convenient access to objects
Modified for Evennia by making sure that no model references
leave caching unexpectedly (no use if WeakRefs).
Also adds cache_size() for monitoring the size of the cache.
"""
from django.db.models.base import Model, ModelBase
from django.db.models.signals import post_save, pre_delete, \
post_syncdb
from manager import SharedMemoryManager
class SharedMemoryModelBase(ModelBase):
#def __new__(cls, name, bases, attrs):
# super_new = super(ModelBase, cls).__new__
# parents = [b for b in bases if isinstance(b, SharedMemoryModelBase)]
# if not parents:
# # If this isn't a subclass of Model, don't do anything special.
# print "not a subclass of Model", name, bases
# return super_new(cls, name, bases, attrs)
# return super(SharedMemoryModelBase, cls).__new__(cls, name, bases, attrs)
# CL: upstream had a __new__ method that skipped ModelBase's __new__ if
# SharedMemoryModelBase was not in the model class's ancestors. It's not
# clear what was the intended purpose, but skipping ModelBase.__new__
# broke things; in particular, default manager inheritance.
def __call__(cls, *args, **kwargs):
"""
@ -35,9 +30,6 @@ class SharedMemoryModelBase(ModelBase):
def new_instance():
return super(SharedMemoryModelBase, cls).__call__(*args, **kwargs)
#if _get_full_cache:
# return cls.__instance_cache__.values()
instance_key = cls._get_cache_key(args, kwargs)
# depending on the arguments, we might not be able to infer the PK, so in that case we create a new instance
if instance_key is None:
@ -51,17 +43,17 @@ class SharedMemoryModelBase(ModelBase):
return cached_instance
def _prepare(cls):
# this is the core cache
cls.__instance_cache__ = {} #WeakValueDictionary()
cls.__instance_cache__ = {} #WeakValueDictionary()
super(SharedMemoryModelBase, cls)._prepare()
class SharedMemoryModel(Model):
# XXX: this is creating a model and it shouldn't be.. how do we properly
# subclass now?
# CL: setting abstract correctly to allow subclasses to inherit the default
# manager.
__metaclass__ = SharedMemoryModelBase
objects = SharedMemoryManager()
class Meta:
abstract = True
@ -117,9 +109,11 @@ class SharedMemoryModel(Model):
return cls.__instance_cache__.values()
get_all_cached_instances = classmethod(get_all_cached_instances)
def _flush_cached_by_key(cls, key):
del cls.__instance_cache__[key]
try:
del cls.__instance_cache__[key]
except KeyError:
pass
_flush_cached_by_key = classmethod(_flush_cached_by_key)
def flush_cached_instance(cls, instance):
@ -128,31 +122,59 @@ class SharedMemoryModel(Model):
since this is most likely called from delete(), and we want to make sure we don't cache dead objects.
"""
cls._flush_cached_by_key(instance._get_pk_val())
#key = "%s-%s" % (cls, instance.pk)
#print "uncached: %s (%s: %s) (total cached: %s)" % (instance, cls.__name__, len(cls.__instance_cache__), len(TCACHE))
flush_cached_instance = classmethod(flush_cached_instance)
def save(self, *args, **kwargs):
#ssave = super(SharedMemoryModel, self).save
#reactor.callInThread(ssave, *args, **kwargs)
super(SharedMemoryModel, self).save(*args, **kwargs)
self.__class__.cache_instance(self)
# TODO: This needs moved to the prepare stage (I believe?)
objects = SharedMemoryManager()
from django.db.models.signals import pre_delete
def flush_instance_cache(cls):
cls.__instance_cache__ = {} #WeakValueDictionary()
flush_instance_cache = classmethod(flush_instance_cache)
# Use a signal so we make sure to catch cascades.
def flush_singleton_cache(sender, instance, **kwargs):
# XXX: Is this the best way to make sure we can flush?
if isinstance(instance, SharedMemoryModel):
instance.__class__.flush_cached_instance(instance)
pre_delete.connect(flush_singleton_cache)
def flush_cache(**kwargs):
for model in SharedMemoryModel.__subclasses__():
model.flush_instance_cache()
#request_finished.connect(flush_cache)
post_syncdb.connect(flush_cache)
def flush_cached_instance(sender, instance, **kwargs):
# XXX: Is this the best way to make sure we can flush?
if not hasattr(instance, 'flush_cached_instance'):
return
sender.flush_cached_instance(instance)
pre_delete.connect(flush_cached_instance)
def update_cached_instance(sender, instance, **kwargs):
if not hasattr(instance, 'cache_instance'):
return
sender.cache_instance(instance)
post_save.connect(update_cached_instance)
def cache_size(mb=True):
"""
Returns a dictionary with estimates of the
cache size of each subclass.
mb - return the result in MB.
"""
import sys
sizedict = {"_total": [0, 0]}
def getsize(model):
instances = model.get_all_cached_instances()
linst = len(instances)
size = sum([sys.getsizeof(o) for o in instances])
size = (mb and size/1024.0) or size
return (linst, size)
def get_recurse(submodels):
for submodel in submodels:
subclasses = submodel.__subclasses__()
if not subclasses:
tup = getsize(submodel)
sizedict["_total"][0] += tup[0]
sizedict["_total"][1] += tup[1]
sizedict[submodel.__name__] = tup
else:
get_recurse(subclasses)
get_recurse(SharedMemoryModel.__subclasses__())
sizedict["_total"] = tuple(sizedict["_total"])
return sizedict
# XXX: It's to be determined if we should use this or not.
# def update_singleton_cache(sender, instance, **kwargs):
# if isinstance(instance.__class__, SharedMemoryModel):
# instance.__class__.cache_instance(instance)
# post_save.connect(flush_singleton_cache)

View file

@ -1,6 +1,26 @@
from django.db.models.manager import Manager
try:
from django.db import router
except:
pass
class SharedMemoryManager(Manager):
# CL: this ensures our manager is used when accessing instances via
# ForeignKey etc. (see docs)
use_for_related_fields = True
# CL: in the dev version of django, ReverseSingleRelatedObjectDescriptor
# will call us as:
# rel_obj = rel_mgr.using(db).get(**params)
# We need to handle using, or the get method will be called on a vanilla
# queryset, and we won't get a change to use the cache.
def using(self, alias):
if alias == router.db_for_read(self.model):
return self
else:
return super(SharedMemoryManager, self).using(alias)
# TODO: improve on this implementation
# We need a way to handle reverse lookups so that this model can
# still use the singleton cache, but the active model isn't required
@ -8,8 +28,13 @@ class SharedMemoryManager(Manager):
def get(self, **kwargs):
items = kwargs.keys()
inst = None
if len(items) == 1 and items[0] in ('pk', self.model._meta.pk.attname):
inst = self.model.get_cached_instance(kwargs[items[0]])
if len(items) == 1:
# CL: support __exact
key = items[0]
if key.endswith('__exact'):
key = key[:-len('__exact')]
if key in ('pk', self.model._meta.pk.attname):
inst = self.model.get_cached_instance(kwargs[items[0]])
if inst is None:
inst = super(SharedMemoryManager, self).get(**kwargs)
return inst
return inst