mirror of
https://github.com/evennia/evennia.git
synced 2026-03-30 04:27:16 +02:00
Added the ability to clear an object from the global cache. This is rarely
needed (and can be potentially dangerous if the object depends on certain startup methods to run and/or holds temporary attributes on themselves - these will all be lost due to a new instance being created. It is hoever necessary when it comes to renaming Exits - since the Command on the exit must then change name too, recaching the Exit will also update the command. Resolves issue 223.
This commit is contained in:
parent
91ec33b9a7
commit
464aa8ca9e
3 changed files with 38 additions and 28 deletions
|
|
@ -1015,6 +1015,9 @@ class CmdName(ObjManipCommand):
|
|||
if aliases:
|
||||
obj.aliases = aliases
|
||||
astring = " (%s)" % (", ".join(aliases))
|
||||
# fix for exits - we need their exit-command to change name too
|
||||
if obj.destination:
|
||||
obj.flush_from_cache()
|
||||
caller.msg("Object's name changed to '%s'%s." % (newname, astring))
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -41,7 +41,7 @@ from src.server.models import ServerConfig
|
|||
from src.typeclasses import managers
|
||||
from src.locks.lockhandler import LockHandler
|
||||
from src.utils import logger, utils
|
||||
from src.utils.utils import make_iter, is_iter, has_parent, to_unicode, to_str
|
||||
from src.utils.utils import make_iter, is_iter, to_unicode, to_str
|
||||
|
||||
__all__ = ("Attribute", "TypeNick", "TypedObject")
|
||||
|
||||
|
|
@ -1447,3 +1447,11 @@ class TypedObject(SharedMemoryModel):
|
|||
return any(True for hpos, hperm in enumerate(_PERMISSION_HIERARCHY)
|
||||
if hperm in [p.lower() for p in self.permissions] and hpos > ppos)
|
||||
return False
|
||||
|
||||
def flush_from_cache(self):
|
||||
"""
|
||||
Flush this object instance from cache, forcing an object reload. Note that this
|
||||
will kill all temporary attributes on this object since it will be recreated
|
||||
as a new Typeclass instance.
|
||||
"""
|
||||
self.__class__.flush_cached_instance(self)
|
||||
|
|
|
|||
|
|
@ -1,18 +1,17 @@
|
|||
"""
|
||||
This is mostly unmodified from the original idmapper.
|
||||
This is mostly unmodified from the original idmapper.
|
||||
|
||||
Evennia changes:
|
||||
The cache mechanism was changed from a WeakValueDictionary to a
|
||||
The cache mechanism was changed from a WeakValueDictionary to a
|
||||
normal dictionary. The old way caused very hard-to-diagnose bugs
|
||||
over long periods of time (which Evennia requires)
|
||||
|
||||
added save() overloading mechanism to update cache
|
||||
added save() overloading mechanism to update cache
|
||||
|
||||
added get_all_cached_instances() for convenient access to objects
|
||||
|
||||
"""
|
||||
|
||||
from twisted.internet import reactor
|
||||
from django.db.models.base import Model, ModelBase
|
||||
from manager import SharedMemoryManager
|
||||
|
||||
|
|
@ -29,8 +28,8 @@ class SharedMemoryModelBase(ModelBase):
|
|||
def __call__(cls, *args, **kwargs):
|
||||
"""
|
||||
this method will either create an instance (by calling the default implementation)
|
||||
or try to retrieve one from the class-wide cache by infering the pk value from
|
||||
args and kwargs. If instance caching is enabled for this class, the cache is
|
||||
or try to retrieve one from the class-wide cache by infering the pk value from
|
||||
args and kwargs. If instance caching is enabled for this class, the cache is
|
||||
populated whenever possible (ie when it is possible to infer the pk value).
|
||||
"""
|
||||
def new_instance():
|
||||
|
|
@ -38,7 +37,7 @@ class SharedMemoryModelBase(ModelBase):
|
|||
|
||||
#if _get_full_cache:
|
||||
# return cls.__instance_cache__.values()
|
||||
|
||||
|
||||
instance_key = cls._get_cache_key(args, kwargs)
|
||||
# depending on the arguments, we might not be able to infer the PK, so in that case we create a new instance
|
||||
if instance_key is None:
|
||||
|
|
@ -52,11 +51,11 @@ class SharedMemoryModelBase(ModelBase):
|
|||
return cached_instance
|
||||
|
||||
def _prepare(cls):
|
||||
# this is the core cache
|
||||
# this is the core cache
|
||||
cls.__instance_cache__ = {} #WeakValueDictionary()
|
||||
super(SharedMemoryModelBase, cls)._prepare()
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
class SharedMemoryModel(Model):
|
||||
# XXX: this is creating a model and it shouldn't be.. how do we properly
|
||||
|
|
@ -64,12 +63,12 @@ class SharedMemoryModel(Model):
|
|||
__metaclass__ = SharedMemoryModelBase
|
||||
|
||||
class Meta:
|
||||
abstract = True
|
||||
|
||||
abstract = True
|
||||
|
||||
def _get_cache_key(cls, args, kwargs):
|
||||
"""
|
||||
This method is used by the caching subsystem to infer the PK value from the constructor arguments.
|
||||
It is used to decide if an instance has to be built or is already in the cache.
|
||||
This method is used by the caching subsystem to infer the PK value from the constructor arguments.
|
||||
It is used to decide if an instance has to be built or is already in the cache.
|
||||
"""
|
||||
result = None
|
||||
# Quick hack for my composites work for now.
|
||||
|
|
@ -78,18 +77,18 @@ class SharedMemoryModel(Model):
|
|||
else:
|
||||
pk = cls._meta.pk
|
||||
# get the index of the pk in the class fields. this should be calculated *once*, but isn't atm
|
||||
pk_position = cls._meta.fields.index(pk)
|
||||
if len(args) > pk_position:
|
||||
# if it's in the args, we can get it easily by index
|
||||
pk_position = cls._meta.fields.index(pk)
|
||||
if len(args) > pk_position:
|
||||
# if it's in the args, we can get it easily by index
|
||||
result = args[pk_position]
|
||||
elif pk.attname in kwargs:
|
||||
# retrieve the pk value. Note that we use attname instead of name, to handle the case where the pk is a
|
||||
# retrieve the pk value. Note that we use attname instead of name, to handle the case where the pk is a
|
||||
# a ForeignKey.
|
||||
result = kwargs[pk.attname]
|
||||
elif pk.name != pk.attname and pk.name in kwargs:
|
||||
# ok we couldn't find the value, but maybe it's a FK and we can find the corresponding object instead
|
||||
result = kwargs[pk.name]
|
||||
|
||||
|
||||
if result is not None and isinstance(result, Model):
|
||||
# if the pk value happens to be a model instance (which can happen wich a FK), we'd rather use its own pk as the key
|
||||
result = result._get_pk_val()
|
||||
|
|
@ -98,8 +97,8 @@ class SharedMemoryModel(Model):
|
|||
|
||||
def get_cached_instance(cls, id):
|
||||
"""
|
||||
Method to retrieve a cached instance by pk value. Returns None when not found
|
||||
(which will always be the case when caching is disabled for this class). Please
|
||||
Method to retrieve a cached instance by pk value. Returns None when not found
|
||||
(which will always be the case when caching is disabled for this class). Please
|
||||
note that the lookup will be done even when instance caching is disabled.
|
||||
"""
|
||||
return cls.__instance_cache__.get(id)
|
||||
|
|
@ -110,7 +109,7 @@ class SharedMemoryModel(Model):
|
|||
Method to store an instance in the cache.
|
||||
"""
|
||||
if instance._get_pk_val() is not None:
|
||||
cls.__instance_cache__[instance._get_pk_val()] = instance
|
||||
cls.__instance_cache__[instance._get_pk_val()] = instance
|
||||
cache_instance = classmethod(cache_instance)
|
||||
|
||||
def get_all_cached_instances(cls):
|
||||
|
|
@ -120,20 +119,20 @@ class SharedMemoryModel(Model):
|
|||
|
||||
|
||||
def _flush_cached_by_key(cls, key):
|
||||
del cls.__instance_cache__[key]
|
||||
del cls.__instance_cache__[key]
|
||||
_flush_cached_by_key = classmethod(_flush_cached_by_key)
|
||||
|
||||
|
||||
def flush_cached_instance(cls, instance):
|
||||
"""
|
||||
Method to flush an instance from the cache. The instance will always be flushed from the cache,
|
||||
Method to flush an instance from the cache. The instance will always be flushed from the cache,
|
||||
since this is most likely called from delete(), and we want to make sure we don't cache dead objects.
|
||||
"""
|
||||
cls._flush_cached_by_key(instance._get_pk_val())
|
||||
#key = "%s-%s" % (cls, instance.pk)
|
||||
#key = "%s-%s" % (cls, instance.pk)
|
||||
#print "uncached: %s (%s: %s) (total cached: %s)" % (instance, cls.__name__, len(cls.__instance_cache__), len(TCACHE))
|
||||
|
||||
flush_cached_instance = classmethod(flush_cached_instance)
|
||||
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
#ssave = super(SharedMemoryModel, self).save
|
||||
#reactor.callInThread(ssave, *args, **kwargs)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue