Made the reload mechanism fully asynchronous. Work on improving cache operations.

This commit is contained in:
Griatch 2011-03-20 13:24:07 +00:00
parent 85e61bbf2d
commit e965830735
6 changed files with 113 additions and 59 deletions

View file

@ -19,7 +19,7 @@ class ObjManipCommand(MuxCommand):
some optional data, such as a typeclass or a location. A comma ','
separates different objects. Like this:
name1;alias;alias;alias:option, name2 ;alias;alias ...
name1;alias;alias;alias:option, name2;alias;alias ...
Spaces between all components are stripped.
@ -1360,7 +1360,7 @@ class CmdExamine(ObjManipCommand):
text = "%s[...]" % text[:line_width - headlen - 5]
return text
def format_attributes(self, obj, attrname=None):
def format_attributes(self, obj, attrname=None, crop=True):
"""
Helper function that returns info about attributes and/or
non-persistent data stored on object
@ -1382,12 +1382,14 @@ class CmdExamine(ObjManipCommand):
#self.caller.msg(db_attr)
string += "\n{wPersistent attributes{n:"
for attr, value in db_attr:
value = self.crop_line(value, attr)
if crop:
value = self.crop_line(value, attr)
string += "\n %s = %s" % (attr, value)
if ndb_attr and ndb_attr[0]:
string += "\n{wNon-persistent attributes{n:"
for attr, value in ndb_attr:
value = self.crop_line(value, attr)
if crop:
value = self.crop_line(value, attr)
string += "\n %s = %s" % (attr, value)
return string
@ -1472,14 +1474,16 @@ class CmdExamine(ObjManipCommand):
obj = caller.search(obj_name)
if not obj:
continue
if not obj.access(caller, 'examine'):
#If we don't have special info access, just look at the object instead.
caller.execute_cmd('look %s' % obj_name)
continue
if obj_attrs:
for attrname in obj_attrs:
# we are only interested in specific attributes
string += self.format_attributes(obj, attrname)
string += self.format_attributes(obj, attrname, crop=False)
else:
string += self.format_output(obj)
string = string.strip()

View file

@ -438,8 +438,8 @@ class CmdSay(MuxCommand):
emit_string = '{c%s{n says, "%s{n"' % (caller.name,
speech)
caller.location.msg_contents(emit_string,
exclude=caller)
exclude=caller)
## def cmd_fsay(command):
## """
## @fsay - make an object say something

View file

@ -38,29 +38,30 @@ class CmdReload(MuxCommand):
Reload the system.
"""
caller = self.caller
reloads.reload_modules()
reloads.start_reload_loop()
max_attempts = 4
for attempt in range(max_attempts):
# if reload modules take a long time,
# we might end up in a situation where
# the subsequent commands fail since they
# can't find the reloads module (due to it
# not yet fully loaded). So we retry a few
# times before giving up.
try:
reloads.reload_scripts()
reloads.reload_commands()
break
except AttributeError:
if attempt < max_attempts-1:
caller.msg(" Waiting for modules(s) to finish (%s) ..." % attempt)
else:
string = "{r ... The module(s) took too long to reload, "
string += "\n so the remaining reloads where skipped."
string += "\n Re-run @reload again when modules have fully "
string += "\n re-initialized.{n"
caller.msg(string)
#reloads.reload_modules()
# max_attempts = 4
# for attempt in range(max_attempts):
# # if reload modules take a long time,
# # we might end up in a situation where
# # the subsequent commands fail since they
# # can't find the reloads module (due to it
# # not yet fully loaded). So we retry a few
# # times before giving up.
# try:
# reloads.reload_scripts()
# reloads.reload_commands()
# break
# except AttributeError:
# if attempt < max_attempts-1:
# caller.msg(" Waiting for modules(s) to finish (%s) ..." % attempt)
# else:
# string = "{r ... The module(s) took too long to reload, "
# string += "\n so the remaining reloads where skipped."
# string += "\n Re-run @reload again when modules have fully "
# string += "\n re-initialized.{n"
# caller.msg(string)
class CmdPy(MuxCommand):
"""

View file

@ -341,7 +341,7 @@ class Character(Object):
Setup character-specific security
"""
super(Character, self).basetype_setup()
self.locks.add("puppet:id(%s) or perm(Immortals)" % self.dbobj.dbref)
self.locks.add("puppet:id(%s) or perm(Immortals); get:false()" % self.dbobj.dbref)
# add the default cmdset
from settings import CMDSET_DEFAULT
@ -374,6 +374,10 @@ class Room(Object):
Simple setup, shown as an example
(since default is None anyway)
"""
super(Room, self).basetype_setup()
self.locks.add("get:false()")
super(Room, self).basetype_setup()
self.location = None
@ -395,7 +399,7 @@ class Exit(Object):
"""
# the lock is open to all by default
super(Exit, self).basetype_setup()
self.locks.add("traverse:all()")
self.locks.add("traverse:all(); get:false()")
def at_object_creation(self):
"""

View file

@ -25,6 +25,9 @@ class SharedMemoryModelBase(ModelBase):
"""
def new_instance():
return super(SharedMemoryModelBase, cls).__call__(*args, **kwargs)
#if _get_full_cache:
# return cls.__instance_cache__.values()
instance_key = cls._get_cache_key(args, kwargs)
# depending on the arguments, we might not be able to infer the PK, so in that case we create a new instance
@ -39,6 +42,7 @@ class SharedMemoryModelBase(ModelBase):
return cached_instance
def _prepare(cls):
# this is the core cache
cls.__instance_cache__ = {} #WeakValueDictionary()
super(SharedMemoryModelBase, cls)._prepare()
@ -99,10 +103,15 @@ class SharedMemoryModel(Model):
cls.__instance_cache__[instance._get_pk_val()] = instance
#key = "%s-%s" % (cls, instance.pk)
#TCACHE[key] = instance
#print "cached: %s (%s: %s) (total cached: %s)" % (instance, cls.__name__, len(cls.__instance_cache__), len(TCACHE))
#print "cached: %s (%s: %s) (total cached: %s)" % (instance, cls.__name__, len(cls.__instance_cache__), len(TCACHE))
cache_instance = classmethod(cache_instance)
def get_all_cached_instances(cls):
"return the objects so far cached by idmapper for this class."
return cls.__instance_cache__.values()
get_all_cached_instances = classmethod(get_all_cached_instances)
def _flush_cached_by_key(cls, key):
del cls.__instance_cache__[key]
_flush_cached_by_key = classmethod(_flush_cached_by_key)

View file

@ -6,6 +6,7 @@ also not good to tie such important functionality to a user-definable
command class.
"""
import time
from django.db.models.loading import AppCache
from django.utils.datastructures import SortedDict
from django.conf import settings
@ -21,6 +22,40 @@ from src.comms import channelhandler
from src.comms.models import Channel
from src.utils import reimport, utils, logger
def start_reload_loop():
"""
This starts the asynchronous reset loop. While
important that it runs asynchronously (to not block the
mud while its running), the order at which things are
updated does matter.
"""
def run_loop():
""
cemit_info('-'*50)
cemit_info(" Starting asynchronous server reload ...")
reload_modules() # this must be given time to finish
wait_time = 5
cemit_info(" Wait for %ss to give modules time to fully re-cache ..." % wait_time)
time.sleep(wait_time)
reload_scripts()
reload_commands()
reset_loop()
def at_return(r):
"default callback"
cemit_info(" Asynchronous server reload finished.\n" + '-'*50)
def at_err(e):
"error callback"
string = "%s\n reload: Asynchronous reset loop exited with an error." % e
string += "\n This might be harmless. Wait a moment then reload again to see if the problem persists."
cemit_info(string)
utils.run_async(run_loop, at_return, at_err)
def reload_modules():
"""
Reload modules that don't have any variables that can be reset.
@ -55,7 +90,7 @@ def reload_modules():
"Check so modpath is not in an unsafe module"
return not any(mpath.startswith(modpath) for mpath in unsafe_modules)
cemit_info('-'*50 +"\n Cleaning module caches ...")
cemit_info("\n Cleaning module caches ...")
# clean as much of the caches as we can
cache = AppCache()
@ -74,17 +109,15 @@ def reload_modules():
string = ""
if unsafe_dir_modified or unsafe_mod_modified:
string += "\n WARNING: Some modules can not be reloaded"
string += "\n since it would not be safe to do so.\n"
if unsafe_dir_modified:
string += "\n-The following module(s) is/are located in the src/ directory and"
string += "\n should not be reloaded without a server reboot:\n %s\n"
string += "\n-{rThe following changed module(s) can only be reloaded{n"
string += "\n {rby a server reboot:{n\n %s\n"
string = string % unsafe_dir_modified
if unsafe_mod_modified:
string += "\n-The following modules contains at least one Script class with a timer"
string += "\n component and which has already spawned instances - these cannot be "
string += "\n safely cleaned from memory on the fly. Stop all the affected scripts "
string += "\n or restart the server to safely reload:\n %s\n"
string += "\n-{rThe following modules contains at least one Script class with a timer{n"
string += "\n {rcomponent and has already spawned instances - these cannot be{n "
string += "\n {rsafely cleaned from memory on the fly. Stop all the affected scripts{n "
string += "\n {ror restart the server to safely reload:{n\n %s\n"
string = string % unsafe_mod_modified
if string:
cemit_info(string)
@ -92,9 +125,9 @@ def reload_modules():
if safe_modified:
cemit_info(" Reloading module(s):\n %s ..." % safe_modified)
reimport.reimport(*safe_modified)
cemit_info(" ...all safe modules reloaded.")
cemit_info(" ... all safe modules reloaded.")
else:
cemit_info(" Nothing was reloaded.")
cemit_info(" ... no modules could be (or needed to be) reloaded.")
# clean out cache dictionary of typeclasses, exits and channe
typeclassmodels.reset()
@ -103,18 +136,6 @@ def reload_modules():
# run through all objects in database, forcing re-caching.
cemit_info(" Starting asynchronous object reset loop ...")
def run_reset_loop():
# run a reset loop on all objects
[(o.cmdset.reset(), o.locks.reset()) for o in ObjectDB.objects.all()]
[s.locks.reset() for s in ScriptDB.objects.all()]
[p.locks.reset() for p in PlayerDB.objects.all()]
[h.locks.reset() for h in HelpEntry.objects.all()]
[m.locks.reset() for m in Msg.objects.all()]
[c.locks.reset() for c in Channel.objects.all()]
at_return = lambda r: cemit_info(" ... @reload: Asynchronous reset loop finished.")
at_err = lambda e: cemit_info("%s\nreload: Asynchronous reset loop exited with an error. This might be harmless and just due to some modules or scripts not having had time to restart before being called by the reset loop. Wait a moment then reload again to see if the problem persists." % e)
utils.run_async(run_reset_loop, at_return, at_err)
def reload_scripts(scripts=None, obj=None, key=None,
dbref=None, init_mode=False):
@ -141,7 +162,22 @@ def reload_scripts(scripts=None, obj=None, key=None,
def reload_commands():
from src.commands import cmdsethandler
cmdsethandler.CACHED_CMDSETS = {}
cemit_info(" Cleaned cmdset cache.\n" + '-'*50)
cemit_info(" Cleaned cmdset cache.")
def reset_loop():
"Reload and restart all entities that can be reloaded."
# run the reset loop on all objects
cemit_info(" Running resets on database entities ...")
t1 = time.time()
[s.locks.reset() for s in ScriptDB.objects.all()]
[p.locks.reset() for p in PlayerDB.objects.all()]
[h.locks.reset() for h in HelpEntry.objects.all()]
[m.locks.reset() for m in Msg.objects.all()]
[c.locks.reset() for c in Channel.objects.all()]
[(o.typeclass(o), o.cmdset.reset(), o.locks.reset()) for o in ObjectDB.get_all_cached_instances()]
t2 = time.time()
cemit_info(" ... Loop finished in %g seconds." % (t2-t1))
def cemit_info(message):
"""
@ -158,8 +194,8 @@ def cemit_info(message):
pass
if infochan:
cname = infochan.key
cmessage = "\n".join(["[%s]: %s" % (cname, line) for line in message.split('\n')])
cmessage = "\n".join(["[%s][reload]: %s" % (cname, line) for line in message.split('\n')])
infochan.msg(cmessage)
else:
cmessage = "\n".join(["[NO MUDINFO CHANNEL]: %s" % line for line in message.split('\n')])
cmessage = "\n".join(["[MUDINFO][reload] %s" % line for line in message.split('\n')])
logger.log_infomsg(cmessage)