A first test using PickledObjectField and a rewritten dbserialize module to store Attributes. No migrations set up yet.

This commit is contained in:
Griatch 2013-04-13 15:15:02 +02:00
parent 4d5cd5352a
commit 75341ade6f
7 changed files with 678 additions and 140 deletions

View file

@ -156,7 +156,7 @@ try:
test = ObjectDB.objects.get(id=1)
except ObjectDB.DoesNotExist:
pass # this is fine at this point
except DatabaseError:
except DatabaseError,e:
print """
Your database does not seem to be set up correctly.
@ -172,6 +172,7 @@ except DatabaseError:
When you have a database set up, rerun evennia.py.
"""
print e
sys.exit()
# Add this to the environmental variable for the 'twistd' command.

View file

@ -195,6 +195,7 @@ class ObjectDB(TypedObject):
# database storage of persistant cmdsets.
db_cmdset_storage = models.CharField('cmdset', max_length=255, null=True, blank=True,
help_text="optional python path to a cmdset class.")
# Database manager
objects = ObjectManager()

View file

@ -173,7 +173,6 @@ class ServerSession(Session):
"""
self.sessionhandler.data_out(self, msg, data)
def oob_data_in(self, data):
"""
This receives out-of-band data from the Portal.

View file

@ -48,6 +48,8 @@ from src.typeclasses import managers
from src.locks.lockhandler import LockHandler
from src.utils import logger, utils
from src.utils.utils import make_iter, is_iter, to_unicode, to_str
from src.utils.dbserialize import to_pickle, from_pickle
from src.utils.picklefield import PickledObjectField
__all__ = ("Attribute", "TypeNick", "TypedObject")
@ -322,6 +324,7 @@ class Attribute(SharedMemoryModel):
db_key = models.CharField('key', max_length=255, db_index=True)
# access through the value property
db_value = models.TextField('value', blank=True, null=True)
db_value2 = PickledObjectField('value2', null=True)
# Lock storage
db_lock_storage = models.TextField('locks', blank=True)
# references the object the attribute is linked to (this is set
@ -409,29 +412,44 @@ class Attribute(SharedMemoryModel):
"""
if self.no_cache:
# re-create data from database and cache it
try:
value = self.__from_attr(_PLOADS(to_str(self.db_value)))
except pickle.UnpicklingError:
value = self.db_value
value = from_pickle(self.db_value2, db_obj=self)
self.cached_value = value
self.no_cache = False
return value
else:
# normally the memory cache holds the latest data so no db access is needed.
return self.cached_value
#if self.no_cache:
# # re-create data from database and cache it
# try:
# value = self.__from_attr(_PLOADS(to_str(self.db_value)))
# except pickle.UnpicklingError:
# value = self.db_value
# self.cached_value = value
# self.no_cache = False
# return value
#else:
# # normally the memory cache holds the latest data so no db access is needed.
# return self.cached_value
#@value.setter
def __value_set(self, new_value):
"""
Setter. Allows for self.value = value. We make sure to cache everything.
"""
new_value = self.__to_attr(new_value)
self.cached_value = self.__from_attr(new_value)
to_store = to_pickle(new_value)
self.cached_value = from_pickle(to_store, db_obj=self)
self.no_cache = False
self.db_value = to_unicode(_PDUMPS(to_str(new_value)))
self.db_value2 = to_store
self.save()
# call attribute hook
self.at_set(new_value)
self.at_set(self.cached_value)
#new_value = self.__to_attr(new_value)
#self.cached_value = self.__from_attr(new_value)
#self.no_cache = False
#self.db_value = to_unicode(_PDUMPS(to_str(new_value)))
#self.save()
## call attribute hook
#self.at_set(new_value)
#@value.deleter
def __value_del(self):
@ -470,148 +488,148 @@ class Attribute(SharedMemoryModel):
# operators on various data
def __to_attr(self, data):
"""
Convert data to proper attr data format before saving
#def __to_attr(self, data):
# """
# Convert data to proper attr data format before saving
We have to make sure to not store database objects raw, since
this will crash the system. Instead we must store their IDs
and make sure to convert back when the attribute is read back
later.
# We have to make sure to not store database objects raw, since
# this will crash the system. Instead we must store their IDs
# and make sure to convert back when the attribute is read back
# later.
Due to this it's criticial that we check all iterables
recursively, converting all found database objects to a form
the database can handle. We handle lists, tuples and dicts
(and any nested combination of them) this way, all other
iterables are stored and returned as lists.
# Due to this it's criticial that we check all iterables
# recursively, converting all found database objects to a form
# the database can handle. We handle lists, tuples and dicts
# (and any nested combination of them) this way, all other
# iterables are stored and returned as lists.
data storage format:
(simple|dbobj|iter, <data>)
where
simple - a single non-db object, like a string or number
dbobj - a single dbobj
iter - any iterable object - will be looped over recursively
to convert dbobj->id.
# data storage format:
# (simple|dbobj|iter, <data>)
# where
# simple - a single non-db object, like a string or number
# dbobj - a single dbobj
# iter - any iterable object - will be looped over recursively
# to convert dbobj->id.
"""
# """
def iter_db2id(item):
"""
recursively looping through stored iterables, replacing objects with ids.
(Python only builds nested functions once, so there is no overhead for nesting)
"""
dtype = type(item)
if dtype in (basestring, int, float): # check the most common types first, for speed
return item
elif hasattr(item, "id") and hasattr(item, "_db_model_name") and hasattr(item, "db_key"):
db_model_name = item._db_model_name # don't use _GA here, could be typeclass
if db_model_name == "typeclass":
db_model_name = _GA(item.dbobj, "_db_model_name")
return PackedDBobject(item.id, db_model_name, item.db_key)
elif dtype == tuple:
return tuple(iter_db2id(val) for val in item)
elif dtype in (dict, PackedDict):
return dict((key, iter_db2id(val)) for key, val in item.items())
elif dtype in (set, PackedSet):
return set(iter_db2id(val) for val in item)
elif hasattr(item, '__iter__'):
return list(iter_db2id(val) for val in item)
else:
return item
# def iter_db2id(item):
# """
# recursively looping through stored iterables, replacing objects with ids.
# (Python only builds nested functions once, so there is no overhead for nesting)
# """
# dtype = type(item)
# if dtype in (basestring, int, float): # check the most common types first, for speed
# return item
# elif hasattr(item, "id") and hasattr(item, "_db_model_name") and hasattr(item, "db_key"):
# db_model_name = item._db_model_name # don't use _GA here, could be typeclass
# if db_model_name == "typeclass":
# db_model_name = _GA(item.dbobj, "_db_model_name")
# return PackedDBobject(item.id, db_model_name, item.db_key)
# elif dtype == tuple:
# return tuple(iter_db2id(val) for val in item)
# elif dtype in (dict, PackedDict):
# return dict((key, iter_db2id(val)) for key, val in item.items())
# elif dtype in (set, PackedSet):
# return set(iter_db2id(val) for val in item)
# elif hasattr(item, '__iter__'):
# return list(iter_db2id(val) for val in item)
# else:
# return item
dtype = type(data)
# dtype = type(data)
if dtype in (basestring, int, float):
return ("simple",data)
elif hasattr(data, "id") and hasattr(data, "_db_model_name") and hasattr(data, 'db_key'):
# all django models (objectdb,scriptdb,playerdb,channel,msg,typeclass)
# have the protected property _db_model_name hardcoded on themselves for speed.
db_model_name = data._db_model_name # don't use _GA here, could be typeclass
if db_model_name == "typeclass":
# typeclass cannot help us, we want the actual child object model name
db_model_name = _GA(data.dbobj,"_db_model_name")
return ("dbobj", PackedDBobject(data.id, db_model_name, data.db_key))
elif hasattr(data, "__iter__"):
return ("iter", iter_db2id(data))
else:
return ("simple", data)
# if dtype in (basestring, int, float):
# return ("simple",data)
# elif hasattr(data, "id") and hasattr(data, "_db_model_name") and hasattr(data, 'db_key'):
# # all django models (objectdb,scriptdb,playerdb,channel,msg,typeclass)
# # have the protected property _db_model_name hardcoded on themselves for speed.
# db_model_name = data._db_model_name # don't use _GA here, could be typeclass
# if db_model_name == "typeclass":
# # typeclass cannot help us, we want the actual child object model name
# db_model_name = _GA(data.dbobj,"_db_model_name")
# return ("dbobj", PackedDBobject(data.id, db_model_name, data.db_key))
# elif hasattr(data, "__iter__"):
# return ("iter", iter_db2id(data))
# else:
# return ("simple", data)
def __from_attr(self, datatuple):
"""
Retrieve data from a previously stored attribute. This
is always a dict with keys type and data.
#def __from_attr(self, datatuple):
# """
# Retrieve data from a previously stored attribute. This
# is always a dict with keys type and data.
datatuple comes from the database storage and has
the following format:
(simple|dbobj|iter, <data>)
where
simple - a single non-db object, like a string. is returned as-is.
dbobj - a single dbobj-id. This id is retrieved back from the database.
iter - an iterable. This is traversed iteratively, converting all found
dbobj-ids back to objects. Also, all lists and dictionaries are
returned as their PackedList/PackedDict counterparts in order to
allow in-place assignment such as obj.db.mylist[3] = val. Mylist
is then a PackedList that saves the data on the fly.
"""
# nested functions
def id2db(data):
"""
Convert db-stored dbref back to object
"""
mclass = _CTYPEGET(model=data.db_model).model_class()
try:
return mclass.objects.dbref_search(data.id)
# datatuple comes from the database storage and has
# the following format:
# (simple|dbobj|iter, <data>)
# where
# simple - a single non-db object, like a string. is returned as-is.
# dbobj - a single dbobj-id. This id is retrieved back from the database.
# iter - an iterable. This is traversed iteratively, converting all found
# dbobj-ids back to objects. Also, all lists and dictionaries are
# returned as their PackedList/PackedDict counterparts in order to
# allow in-place assignment such as obj.db.mylist[3] = val. Mylist
# is then a PackedList that saves the data on the fly.
# """
# # nested functions
# def id2db(data):
# """
# Convert db-stored dbref back to object
# """
# mclass = _CTYPEGET(model=data.db_model).model_class()
# try:
# return mclass.objects.dbref_search(data.id)
except AttributeError:
try:
return mclass.objects.get(id=data.id)
except mclass.DoesNotExist: # could happen if object was deleted in the interim.
return None
# except AttributeError:
# try:
# return mclass.objects.get(id=data.id)
# except mclass.DoesNotExist: # could happen if object was deleted in the interim.
# return None
def iter_id2db(item, parent=None):
"""
Recursively looping through stored iterables, replacing ids with actual objects.
We return PackedDict and PackedLists instead of normal lists; this is needed in order for
the user to do dynamic saving of nested in-place, such as obj.db.attrlist[2]=3. What is
stored in the database are however always normal python primitives.
"""
dtype = type(item)
if dtype in (basestring, int, float): # check the most common types first, for speed
return item
elif dtype == PackedDBobject:
return id2db(item)
elif dtype == tuple:
return tuple([iter_id2db(val) for val in item])
elif dtype in (dict, PackedDict):
pdict = PackedDict(self)
pdict.update(dict(zip([key for key in item.keys()],
[iter_id2db(val, pdict) for val in item.values()])))
pdict.parent = parent
return pdict
elif dtype in (set, PackedSet):
pset = PackedSet(self)
pset.update(set(iter_id2db(val) for val in item))
return pset
elif hasattr(item, '__iter__'):
plist = PackedList(self)
plist.extend(list(iter_id2db(val, plist) for val in item))
plist.parent = parent
return plist
else:
return item
# def iter_id2db(item, parent=None):
# """
# Recursively looping through stored iterables, replacing ids with actual objects.
# We return PackedDict and PackedLists instead of normal lists; this is needed in order for
# the user to do dynamic saving of nested in-place, such as obj.db.attrlist[2]=3. What is
# stored in the database are however always normal python primitives.
# """
# dtype = type(item)
# if dtype in (basestring, int, float): # check the most common types first, for speed
# return item
# elif dtype == PackedDBobject:
# return id2db(item)
# elif dtype == tuple:
# return tuple([iter_id2db(val) for val in item])
# elif dtype in (dict, PackedDict):
# pdict = PackedDict(self)
# pdict.update(dict(zip([key for key in item.keys()],
# [iter_id2db(val, pdict) for val in item.values()])))
# pdict.parent = parent
# return pdict
# elif dtype in (set, PackedSet):
# pset = PackedSet(self)
# pset.update(set(iter_id2db(val) for val in item))
# return pset
# elif hasattr(item, '__iter__'):
# plist = PackedList(self)
# plist.extend(list(iter_id2db(val, plist) for val in item))
# plist.parent = parent
# return plist
# else:
# return item
typ, data = datatuple
# typ, data = datatuple
if typ == 'simple':
# single non-db objects
return data
elif typ == 'dbobj':
# a single stored dbobj
return id2db(data)
elif typ == 'iter':
# all types of iterables
return iter_id2db(data)
# if typ == 'simple':
# # single non-db objects
# return data
# elif typ == 'dbobj':
# # a single stored dbobj
# return id2db(data)
# elif typ == 'iter':
# # all types of iterables
# return iter_id2db(data)
def access(self, accessing_obj, access_type='read', default=False):
"""

282
src/utils/dbserialize.py Normal file
View file

@ -0,0 +1,282 @@
"""
This module handles serialization of arbitrary python structural data,
intended primarily to be stored in the database. It also supports
storing Django model instances (which plain pickle cannot do).
This serialization is used internally by the server, notably for
storing data in Attributes and for piping data to process pools.
The purpose of dbserialize is to handle all forms of data. For
well-structured non-arbitrary exchange, such as communicating with a
rich web client, a simpler JSON serialization makes more sense.
This module also implements the SaverList, SaverDict and SaverSet
classes. These are iterables that track their position in a nested
structure and makes sure to send updates up to their root. This is
used by Attributes - without it, one would not be able to update mutables
in-situ, e.g obj.db.mynestedlist[3][5] = 3 would never be saved and
be out of sync with the database.
"""
from collections import defaultdict, MutableSequence, MutableSet, MutableMapping
try:
from cPickle import dumps, loads
except ImportError:
from pickle import dumps, loads
from django.db import transaction
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.contenttypes.models import ContentType
from src.utils.utils import to_str
HIGHEST_PROTOCOL = 2
# initialization and helpers
_GA = object.__getattribute__
_SA = object.__setattr__
_FROM_MODEL_MAP = defaultdict(str)
_FROM_MODEL_MAP.update(dict((c.model, c.natural_key()) for c in ContentType.objects.all()))
_TO_MODEL_MAP = defaultdict(str)
_TO_MODEL_MAP.update(dict((c.natural_key(), c.model_class()) for c in ContentType.objects.all()))
_TO_TYPECLASS = lambda o: hasattr(o, 'typeclass') and o.typeclass or o
_IS_PACKED_DBOBJ = lambda o: type(o) == tuple and len(o) == 4 and o[0] == '__packed_dbobj__'
#
# SaverList, SaverDict, SaverSet - Attribute-specific helper classes and functions
#
def _save(method):
"method decorator that saves data to Attribute"
def save_wrapper(self, *args, **kwargs):
ret = method(self, *args, **kwargs)
self._save_tree()
return ret
return save_wrapper
class SaverMutable(object):
"""
Parent class for properly handling of nested mutables in
an Attribute. If not used something like
obj.db.mylist[1][2] = "test" (allocation to a nested list)
will not save the updated value to the database.
"""
def __init__(self, *args, **kwargs):
"store all properties for tracking the tree"
self._db_obj = kwargs.pop("db_obj", None)
self._parent = None
self._data = None
def _save_tree(self):
"recursively traverse back up the tree, save when we reach the root"
if self._parent:
self._parent._save_tree()
else:
try:
self._db_obj.value = self
except AttributeError:
raise AttributeError("SaverMutable %s lacks dobj at its root." % self)
def _convert_mutables(self, item):
"converts mutables to Saver* variants and assigns .parent property"
dtype = type(item)
if dtype in (basestring, int, long, float, bool, tuple):
return item
elif dtype == list:
item = SaverList(item)
item._parent = self
elif dtype == dict:
item = SaverDict(item)
item._parent = self
elif dtype == set:
item = SaverSet(item)
item._parent = self
return item
def __repr__(self):
return self._data.__repr__()
def __len__(self):
return self._data.__len__()
def __iter__(self):
return self._data.__iter__()
def __getitem__(self, key):
return self._data.__getitem__(key)
@_save
def __setitem__(self, key, value):
self._data.__setitem__(key, self._convert_mutables(value))
@_save
def __delitem__(self, key):
self._data.__delitem__(key)
class SaverList(SaverMutable, MutableSequence):
"""
A list that saves itself to an Attribute when updated.
"""
def __init__(self, *args, **kwargs):
super(SaverList, self).__init__(*args, **kwargs)
self._data = list(*args)
@_save
def insert(self, index, value):
self._data.insert(index, self._convert_mutables(value))
class SaverDict(SaverMutable, MutableMapping):
"""
A dict that stores changes to an Attribute when updated
"""
def __init__(self, *args, **kwargs):
super(SaverDict, self).__init__(*args, **kwargs)
self._data = dict(*args)
class SaverSet(SaverMutable, MutableSet):
"""
A set that saves to an Attribute when updated
"""
def __init__(self, *args, **kwargs):
super(SaverSet, self).__init__(*args, **kwargs)
self._data = set(*args)
def __contains__(self, value):
return self._data.__contains__(value)
@_save
def add(self, value):
self._data.add(self._convert_mutables(value))
@_save
def discard(self, value):
self._data.discard(value)
#
# serialization access functions
#
def _pack_dbobj(item):
"""
Check and convert django database objects to an internal representation.
This either returns the original input item or a tuple ("__packed_dbobj__", key, obj, id)
"""
obj = hasattr(item, 'dbobj') and item.dbobj or item
natural_key = _FROM_MODEL_MAP[hasattr(obj, "id") and hasattr("db_date_created") and
hasattr(obj, '__class__') and obj.__class__.__name__.lower()]
# build the internal representation as a tuple ("__packed_dbobj__", key, obj, id)
return natural_key and ('__packed_dbobj__', natural_key, _GA(obj, "db_date_created"), _GA(obj, id)) or item
def _unpack_dbobj(item):
"""
Check and convert internal representations back to Django database models.
The fact that item is a packed dbobj should be checked before this call.
This either returns the original input or converts the internal store back
to a database representation (its typeclass is returned if applicable).
"""
try:
obj = item[3] and _TO_TYPECLASS(_TO_MODEL_MAP[item[1]].objects.get(id=item[3]))
except ObjectDoesNotExist:
return None
# even if we got back a match, check the sanity of the date (some databases may 're-use' the id)
return obj and obj.db_data_created == item[3] and obj or None
def to_pickle(data):
"""
This prepares data on arbitrary form to be pickled. It handles any nested structure
and returns data on a form that is safe to pickle (including having converted any
database models to their internal representation). We also convert any Saver*-type
objects back to their normal representations, they are not pickle-safe.
"""
def process_item(item):
"Recursive processor and identification of data"
dtype = type(item)
if dtype in (basestring, int, long, float, bool):
return item
elif dtype == tuple:
return tuple(process_item(val) for val in item)
elif dtype in (list, SaverList):
return [key for key in item]
elif dtype in (dict, SaverDict):
return dict((key, process_item(val)) for key, val in item.items())
elif dtype in (set, SaverSet):
return set(process_item(val) for val in item)
elif hasattr(item, '__item__'):
# we try to conserve the iterable class, if not convert to list
try:
return item.__class__([process_item(val) for val in item])
except (AttributeError, TypeError):
return [process_item(val) for val in item]
return _pack_dbobj(item)
return process_item(data)
@transaction.autocommit
def from_pickle(data, db_obj=None):
"""
This should be fed a just de-pickled data object. It will be converted back
to a form that may contain database objects again. Note that if a database
object was removed (or changed in-place) in the database, None will be returned.
db_obj - this is the model instance (normally an Attribute) that Saver*-type
iterables will save to when they update. It must have a 'value'
property that saves assigned data to the database.
If db_obj is given, this function will convert lists, dicts and sets to their
SaverList, SaverDict and SaverSet counterparts.
"""
def process_item(item):
"Recursive processor and identification of data"
dtype = type(item)
if dtype in (basestring, int, long, float, bool):
return item
elif _IS_PACKED_DBOBJ(item):
# this must be checked before tuple
return _unpack_dbobj(item)
elif dtype == tuple:
return tuple(process_item(val) for val in item)
elif dtype == dict:
return dict((key, process_item(val)) for key, val in item.items())
elif dtype == set:
return set(process_item(val) for val in item)
elif hasattr(item, '__iter__'):
try:
# we try to conserve the iterable class if it accepts an iterator
return item.__class__(process_item(val) for val in item)
except (AttributeError, TypeError):
return [process_item(val) for val in item]
return item
def process_item_to_savers(item):
"Recursive processor, convertion and identification of data"
dtype = type(item)
if dtype in (basestring, int, long, float, bool):
return item
elif _IS_PACKED_DBOBJ(item):
# this must be checked before tuple
return _unpack_dbobj(item)
elif dtype == tuple:
return tuple(process_item_to_savers(val) for val in item)
elif dtype == list:
return SaverList(process_item_to_savers(val) for val in item)
elif dtype == dict:
return SaverDict((key, process_item_to_savers(val)) for key, val in item.items())
elif dtype == set:
return SaverSet(process_item_to_savers(val) for val in item)
elif hasattr(item, '__iter__'):
try:
# we try to conserve the iterable class if it accepts an iterator
return item.__class__(process_item_to_savers(val) for val in item)
except (AttributeError, TypeError):
return SaverList(process_item_to_savers(val) for val in item)
return item
if db_obj:
# convert lists, dicts and sets to their Saved* counterparts. It
# is only relevant if the "root" is an iterable of the right type.
dtype = type(data)
if dtype == list:
return process_item_to_savers(SaverList(data, db_obj=db_obj))
elif dtype == dict:
return process_item_to_savers(SaverDict(data, db_obj=db_obj))
elif dtype == set:
return process_item_to_savers(SaverSet(data, db_obj=db_obj))
return process_item(data)
def do_pickle(data):
"Perform pickle to string"
return to_str(dumps(data, protocol=HIGHEST_PROTOCOL))
def do_unpickle(data):
"Retrieve pickle from pickled string"
return loads(to_str(data))

234
src/utils/picklefield.py Normal file
View file

@ -0,0 +1,234 @@
#
# Copyright (c) 2009-2010 Gintautas Miliauskas
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""Pickle field implementation for Django."""
from copy import deepcopy
from base64 import b64encode, b64decode
from zlib import compress, decompress
import six
import django
from django.db import models
# django 1.5 introduces force_text instead of force_unicode
try:
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import force_unicode as force_text
# python 3.x does not have cPickle module
try:
from cPickle import loads, dumps # cpython 2.x
except ImportError:
from pickle import loads, dumps # cpython 3.x, other interpreters
try:
from django.utils import simplejson as json
except ImportError:
import json
DEFAULT_PROTOCOL = 2
#from picklefield import DEFAULT_PROTOCOL
#from picklefield.compat import force_text, loads, dumps
class PickledObject(str):
"""
A subclass of string so it can be told whether a string is a pickled
object or not (if the object is an instance of this class then it must
[well, should] be a pickled one).
Only really useful for passing pre-encoded values to ``default``
with ``dbsafe_encode``, not that doing so is necessary. If you
remove PickledObject and its references, you won't be able to pass
in pre-encoded values anymore, but you can always just pass in the
python objects themselves.
"""
class _ObjectWrapper(object):
"""
A class used to wrap object that have properties that may clash with the
ORM internals.
For example, objects with the `prepare_database_save` property such as
`django.db.Model` subclasses won't work under certain conditions and the
same apply for trying to retrieve any `callable` object.
"""
__slots__ = ('_obj',)
def __init__(self, obj):
self._obj = obj
def wrap_conflictual_object(obj):
if hasattr(obj, 'prepare_database_save') or callable(obj):
obj = _ObjectWrapper(obj)
return obj
def dbsafe_encode(value, compress_object=False, pickle_protocol=DEFAULT_PROTOCOL):
# We use deepcopy() here to avoid a problem with cPickle, where dumps
# can generate different character streams for same lookup value if
# they are referenced differently.
# The reason this is important is because we do all of our lookups as
# simple string matches, thus the character streams must be the same
# for the lookups to work properly. See tests.py for more information.
value = dumps(deepcopy(value), protocol=pickle_protocol)
if compress_object:
value = compress(value)
value = b64encode(value).decode() # decode bytes to str
return PickledObject(value)
def dbsafe_decode(value, compress_object=False):
value = value.encode() # encode str to bytes
value = b64decode(value)
if compress_object:
value = decompress(value)
return loads(value)
def _get_subfield_superclass():
# hardcore trick to support django < 1.3 - there was something wrong with
# inheritance and SubfieldBase before django 1.3
# see https://github.com/django/django/commit/222c73261650201f5ce99e8dd4b1ce0d30a69eb4
if django.VERSION < (1,3):
return models.Field
return six.with_metaclass(models.SubfieldBase, models.Field)
class PickledObjectField(_get_subfield_superclass()):
"""
A field that will accept *any* python object and store it in the
database. PickledObjectField will optionally compress its values if
declared with the keyword argument ``compress=True``.
Does not actually encode and compress ``None`` objects (although you
can still do lookups using None). This way, it is still possible to
use the ``isnull`` lookup type correctly.
"""
__metaclass__ = models.SubfieldBase # for django < 1.3
def __init__(self, *args, **kwargs):
self.compress = kwargs.pop('compress', False)
self.protocol = kwargs.pop('protocol', DEFAULT_PROTOCOL)
kwargs.setdefault('editable', False)
super(PickledObjectField, self).__init__(*args, **kwargs)
def get_default(self):
"""
Returns the default value for this field.
The default implementation on models.Field calls force_unicode
on the default, which means you can't set arbitrary Python
objects as the default. To fix this, we just return the value
without calling force_unicode on it. Note that if you set a
callable as a default, the field will still call it. It will
*not* try to pickle and encode it.
"""
if self.has_default():
if callable(self.default):
return self.default()
return self.default
# If the field doesn't have a default, then we punt to models.Field.
return super(PickledObjectField, self).get_default()
def to_python(self, value):
"""
B64decode and unpickle the object, optionally decompressing it.
If an error is raised in de-pickling and we're sure the value is
a definite pickle, the error is allowed to propagate. If we
aren't sure if the value is a pickle or not, then we catch the
error and return the original value instead.
"""
if value is not None:
try:
value = dbsafe_decode(value, self.compress)
except:
# If the value is a definite pickle; and an error is raised in
# de-pickling it should be allowed to propogate.
if isinstance(value, PickledObject):
raise
else:
if isinstance(value, _ObjectWrapper):
return value._obj
return value
def pre_save(self, model_instance, add):
value = super(PickledObjectField, self).pre_save(model_instance, add)
return wrap_conflictual_object(value)
def get_db_prep_value(self, value, connection=None, prepared=False):
"""
Pickle and b64encode the object, optionally compressing it.
The pickling protocol is specified explicitly (by default 2),
rather than as -1 or HIGHEST_PROTOCOL, because we don't want the
protocol to change over time. If it did, ``exact`` and ``in``
lookups would likely fail, since pickle would now be generating
a different string.
"""
if value is not None and not isinstance(value, PickledObject):
# We call force_text here explicitly, so that the encoded string
# isn't rejected by the postgresql_psycopg2 backend. Alternatively,
# we could have just registered PickledObject with the psycopg
# marshaller (telling it to store it like it would a string), but
# since both of these methods result in the same value being stored,
# doing things this way is much easier.
value = force_text(dbsafe_encode(value, self.compress, self.protocol))
return value
def value_to_string(self, obj):
value = self._get_val_from_obj(obj)
return self.get_db_prep_value(value)
def get_internal_type(self):
return 'TextField'
def get_db_prep_lookup(self, lookup_type, value, connection=None, prepared=False):
if lookup_type not in ['exact', 'in', 'isnull']:
raise TypeError('Lookup type %s is not supported.' % lookup_type)
# The Field model already calls get_db_prep_value before doing the
# actual lookup, so all we need to do is limit the lookup types.
try:
return super(PickledObjectField, self).get_db_prep_lookup(
lookup_type, value, connection=connection, prepared=prepared)
except TypeError:
# Try not to break on older versions of Django, where the
# `connection` and `prepared` parameters are not available.
return super(PickledObjectField, self).get_db_prep_lookup(
lookup_type, value)
# South support; see http://south.aeracode.org/docs/tutorial/part4.html#simple-inheritance
try:
from south.modelsinspector import add_introspection_rules
except ImportError:
pass
else:
add_introspection_rules([], [r"^src\.utils\.picklefield\.PickledObjectField"])

View file

@ -478,6 +478,7 @@ def delay(delay=2, retval=None, callback=None):
reactor.callLater(delay, callb, retval)
return d
_FROM_MODEL_MAP = None
_TO_DBOBJ = lambda o: (hasattr(o, "dbobj") and o.dbobj) or o
_TO_PACKED_DBOBJ = lambda natural_key, dbref: ('__packed_dbobj__', natural_key, dbref)
@ -673,6 +674,8 @@ def run_async(to_execute, *args, **kwargs):
deferred.addCallback(callback, **callback_kwargs)
deferred.addErrback(errback, **errback_kwargs)
#
def check_evennia_dependencies():
"""
Checks the versions of Evennia's dependencies.