PEP8 cleanup of the entire codebase. Unchanged are many cases of too-long lines, partly because of the rewrite they would require but also because splitting many lines up would make the code harder to read. Also the third-party libraries (idmapper, prettytable etc) were not cleaned.

This commit is contained in:
Griatch 2013-11-14 19:31:17 +01:00
parent 30b7d2a405
commit 1ae17bcbe4
154 changed files with 5613 additions and 4054 deletions

View file

@ -1,18 +1,19 @@
#
# This sets up how models are displayed
# in the web admin interface.
#
from django.contrib import admin
from src.server.models import ServerConfig
class ServerConfigAdmin(admin.ModelAdmin):
"Custom admin for server configs"
list_display = ('db_key', 'db_value')
list_display_links = ('db_key',)
ordering = ['db_key', 'db_value']
search_fields = ['db_key']
save_as = True
save_on_top = True
list_select_related = True
admin.site.register(ServerConfig, ServerConfigAdmin)
#
# This sets up how models are displayed
# in the web admin interface.
#
from django.contrib import admin
from src.server.models import ServerConfig
class ServerConfigAdmin(admin.ModelAdmin):
"Custom admin for server configs"
list_display = ('db_key', 'db_value')
list_display_links = ('db_key',)
ordering = ['db_key', 'db_value']
search_fields = ['db_key']
save_as = True
save_on_top = True
list_select_related = True
admin.site.register(ServerConfig, ServerConfigAdmin)

View file

@ -1,17 +1,18 @@
"""
Contains the protocols, commands, and client factory needed for the Server and Portal
to communicate with each other, letting Portal work as a proxy. Both sides use this
same protocol.
Contains the protocols, commands, and client factory needed for the Server
and Portal to communicate with each other, letting Portal work as a proxy.
Both sides use this same protocol.
The separation works like this:
Portal - (AMP client) handles protocols. It contains a list of connected sessions in a
dictionary for identifying the respective player connected. If it looses the AMP connection
it will automatically try to reconnect.
Portal - (AMP client) handles protocols. It contains a list of connected
sessions in a dictionary for identifying the respective player
connected. If it looses the AMP connection it will automatically
try to reconnect.
Server - (AMP server) Handles all mud operations. The server holds its own list
of sessions tied to player objects. This is synced against the portal at startup
and when a session connects/disconnects
of sessions tied to player objects. This is synced against the portal
at startup and when a session connects/disconnects
"""
@ -29,16 +30,17 @@ from src.utils.utils import to_str, variable_from_module
# communication bits
PCONN = chr(1) # portal session connect
PDISCONN = chr(2) # portal session disconnect
PSYNC = chr(3) # portal session sync
SLOGIN = chr(4) # server session login
SDISCONN = chr(5) # server session disconnect
SDISCONNALL = chr(6) # server session disconnect all
SSHUTD = chr(7) # server shutdown
SSYNC = chr(8) # server sessigon sync
PCONN = chr(1) # portal session connect
PDISCONN = chr(2) # portal session disconnect
PSYNC = chr(3) # portal session sync
SLOGIN = chr(4) # server session login
SDISCONN = chr(5) # server session disconnect
SDISCONNALL = chr(6) # server session disconnect all
SSHUTD = chr(7) # server shutdown
SSYNC = chr(8) # server sessigon sync
MAXLEN = 65535 # max allowed data length in AMP protocol
MAXLEN = 65535 # max allowed data length in AMP protocol
def get_restart_mode(restart_file):
"""
@ -49,6 +51,7 @@ def get_restart_mode(restart_file):
return flag == "True"
return False
class AmpServerFactory(protocol.ServerFactory):
"""
This factory creates the Server as a new AMPProtocol instance for accepting
@ -71,9 +74,11 @@ class AmpServerFactory(protocol.ServerFactory):
self.server.amp_protocol.factory = self
return self.server.amp_protocol
class AmpClientFactory(protocol.ReconnectingClientFactory):
"""
This factory creates an instance of the Portal, an AMPProtocol instances to use to connect
This factory creates an instance of the Portal, an AMPProtocol
instances to use to connect
"""
# Initial reconnect delay in seconds.
initialDelay = 1
@ -139,6 +144,7 @@ class MsgPortal2Server(amp.Command):
errors = [(Exception, 'EXCEPTION')]
response = []
class MsgServer2Portal(amp.Command):
"""
Message server -> portal
@ -151,6 +157,7 @@ class MsgServer2Portal(amp.Command):
errors = [(Exception, 'EXCEPTION')]
response = []
class ServerAdmin(amp.Command):
"""
Portal -> Server
@ -165,6 +172,7 @@ class ServerAdmin(amp.Command):
errors = [(Exception, 'EXCEPTION')]
response = []
class PortalAdmin(amp.Command):
"""
Server -> Portal
@ -178,6 +186,7 @@ class PortalAdmin(amp.Command):
errors = [(Exception, 'EXCEPTION')]
response = []
class FunctionCall(amp.Command):
"""
Bidirectional
@ -202,6 +211,7 @@ loads = lambda data: pickle.loads(to_str(data))
MSGBUFFER = defaultdict(list)
#------------------------------------------------------------
# Core AMP protocol for communication Server <-> Portal
#------------------------------------------------------------
@ -241,7 +251,8 @@ class AMPProtocol(amp.AMP):
def errback(self, e, info):
"error handler, to avoid dropping connections on server tracebacks."
f = e.trap(Exception)
print "AMP Error for %(info)s: %(e)s" % {'info': info, 'e': e.getErrorMessage()}
print "AMP Error for %(info)s: %(e)s" % {'info': info,
'e': e.getErrorMessage()}
def send_split_msg(self, sessid, msg, data, command):
"""
@ -258,14 +269,16 @@ class AMPProtocol(amp.AMP):
datastr = dumps(data)
nmsg, ndata = len(msg), len(datastr)
if nmsg > MAXLEN or ndata > MAXLEN:
msglist = [msg[i:i+MAXLEN] for i in range(0, len(msg), MAXLEN)]
datalist = [datastr[i:i+MAXLEN] for i in range(0, len(datastr), MAXLEN)]
msglist = [msg[i:i + MAXLEN] for i in range(0, len(msg), MAXLEN)]
datalist = [datastr[i:i + MAXLEN]
for i in range(0, len(datastr), MAXLEN)]
nmsglist, ndatalist = len(msglist), len(datalist)
if ndatalist < nmsglist:
datalist.extend("" for i in range(nmsglist-ndatalist))
datalist.extend("" for i in range(nmsglist - ndatalist))
if nmsglist < ndatalist:
msglist.extend("" for i in range(ndatalist-nmsglist))
# we have split the msg/data into right-size chunks. Now we send it in sequence
msglist.extend("" for i in range(ndatalist - nmsglist))
# we have split the msg/data into right-size chunks. Now we
# send it in sequence
return [self.callRemote(command,
sessid=sessid,
msg=to_str(msg),
@ -295,8 +308,8 @@ class AMPProtocol(amp.AMP):
return {}
else:
# we have all parts. Put it all together in the right order.
msg = "".join(t[1] for t in sorted(MSGBUFFER[sessid], key=lambda o:o[0]))
data = "".join(t[2] for t in sorted(MSGBUFFER[sessid], key=lambda o:o[0]))
msg = "".join(t[1] for t in sorted(MSGBUFFER[sessid], key=lambda o: o[0]))
data = "".join(t[2] for t in sorted(MSGBUFFER[sessid], key=lambda o: o[0]))
del MSGBUFFER[sessid]
# call session hook with the data
self.factory.server.sessions.data_in(sessid, text=msg, **loads(data))
@ -311,13 +324,14 @@ class AMPProtocol(amp.AMP):
try:
return self.callRemote(MsgPortal2Server,
sessid=sessid,
msg=to_str(msg) if msg!=None else "",
msg=to_str(msg) if msg is not None else "",
ipart=0,
nparts=1,
data=dumps(data)).addErrback(self.errback, "MsgPortal2Server")
except amp.TooLong:
# the msg (or data) was too long for AMP to send. We need to send in blocks.
return self.send_split_msg(sessid, msg, kwargs, MsgPortal2Server)
# the msg (or data) was too long for AMP to send.
# We need to send in blocks.
return self.send_split_msg(sessid, msg, data, MsgPortal2Server)
# Server -> Portal message
@ -335,8 +349,8 @@ class AMPProtocol(amp.AMP):
return {}
else:
# we have all parts. Put it all together in the right order.
msg = "".join(t[1] for t in sorted(MSGBUFFER[sessid], key=lambda o:o[0]))
data = "".join(t[2] for t in sorted(MSGBUFFER[sessid], key=lambda o:o[0]))
msg = "".join(t[1] for t in sorted(MSGBUFFER[sessid], key=lambda o: o[0]))
data = "".join(t[2] for t in sorted(MSGBUFFER[sessid], key=lambda o: o[0]))
del MSGBUFFER[sessid]
# call session hook with the data
self.factory.portal.sessions.data_out(sessid, text=msg, **loads(data))
@ -351,14 +365,14 @@ class AMPProtocol(amp.AMP):
try:
return self.callRemote(MsgServer2Portal,
sessid=sessid,
msg=to_str(msg) if msg!=None else "",
msg=to_str(msg) if msg is not None else "",
ipart=0,
nparts=1,
data=dumps(data)).addErrback(self.errback, "MsgServer2Portal")
except amp.TooLong:
# the msg (or data) was too long for AMP to send. We need to send in blocks.
return self.send_split_msg(sessid, msg, kwargs, MsgServer2Portal)
# the msg (or data) was too long for AMP to send.
# We need to send in blocks.
return self.send_split_msg(sessid, msg, data, MsgServer2Portal)
# Server administration from the Portal side
def amp_server_admin(self, sessid, operation, data):
@ -372,17 +386,18 @@ class AMPProtocol(amp.AMP):
#print "serveradmin (server side):", sessid, ord(operation), data
if operation == PCONN: #portal_session_connect
if operation == PCONN: # portal_session_connect
# create a new session and sync it
server_sessionhandler.portal_connect(data)
elif operation == PDISCONN: #'portal_session_disconnect'
elif operation == PDISCONN: # portal_session_disconnect
# session closed from portal side
self.factory.server.sessions.portal_disconnect(sessid)
elif operation == PSYNC: #'portal_session_sync'
# force a resync of sessions when portal reconnects to server (e.g. after a server reboot)
# the data kwarg contains a dict {sessid: {arg1:val1,...}} representing the attributes
elif operation == PSYNC: # portal_session_sync
# force a resync of sessions when portal reconnects to server
# (e.g. after a server reboot) the data kwarg contains a dict
# {sessid: {arg1:val1,...}} representing the attributes
# to sync for each session.
server_sessionhandler.portal_session_sync(data)
else:
@ -414,23 +429,23 @@ class AMPProtocol(amp.AMP):
portal_sessionhandler = self.factory.portal.sessions
#print "portaladmin (portal side):", sessid, ord(operation), data
if operation == SLOGIN: # 'server_session_login'
if operation == SLOGIN: # server_session_login
# a session has authenticated; sync it.
portal_sessionhandler.server_logged_in(sessid, data)
elif operation == SDISCONN: #'server_session_disconnect'
elif operation == SDISCONN: # server_session_disconnect
# the server is ordering to disconnect the session
portal_sessionhandler.server_disconnect(sessid, reason=data)
elif operation == SDISCONNALL: #'server_session_disconnect_all'
elif operation == SDISCONNALL: # server_session_disconnect_all
# server orders all sessions to disconnect
portal_sessionhandler.server_disconnect_all(reason=data)
elif operation == SSHUTD: #server_shutdown'
elif operation == SSHUTD: # server_shutdown
# the server orders the portal to shut down
self.factory.portal.shutdown(restart=False)
elif operation == SSYNC: #'server_session_sync'
elif operation == SSYNC: # server_session_sync
# server wants to save session data to the portal, maybe because
# it's about to shut down.
portal_sessionhandler.server_session_sync(data)
@ -465,25 +480,28 @@ class AMPProtocol(amp.AMP):
result = variable_from_module(module, function)(*args, **kwargs)
if isinstance(result, Deferred):
# if result is a deferred, attach handler to properly wrap the return value
result.addCallback(lambda r: {"result":dumps(r)})
# if result is a deferred, attach handler to properly
# wrap the return value
result.addCallback(lambda r: {"result": dumps(r)})
return result
else:
return {'result':dumps(result)}
return {'result': dumps(result)}
FunctionCall.responder(amp_function_call)
def call_remote_FunctionCall(self, modulepath, functionname, *args, **kwargs):
"""
Access method called by either process. This will call an arbitrary function
on the other process (On Portal if calling from Server and vice versa).
Access method called by either process. This will call an arbitrary
function on the other process (On Portal if calling from Server and
vice versa).
Inputs:
modulepath (str) - python path to module holding function to call
functionname (str) - name of function in given module
*args, **kwargs will be used as arguments/keyword args for the remote function call
*args, **kwargs will be used as arguments/keyword args for the
remote function call
Returns:
A deferred that fires with the return value of the remote function call
A deferred that fires with the return value of the remote
function call
"""
return self.callRemote(FunctionCall,
module=modulepath,

View file

@ -4,10 +4,10 @@ Central caching module.
"""
from sys import getsizeof
import os, threading
import os
import threading
from collections import defaultdict
from django.core.cache import get_cache
from src.server.models import ServerConfig
from src.utils.utils import uses_database, to_str, get_evennia_pids
@ -35,6 +35,7 @@ if uses_database("mysql") and ServerConfig.objects.get_mysql_db_version() < '5.6
else:
_DATESTRING = "%Y:%m:%d-%H:%M:%S:%f"
def hashid(obj, suffix=""):
"""
Returns a per-class unique hash that combines the object's
@ -59,11 +60,15 @@ def hashid(obj, suffix=""):
# rely on memory adressing in this case.
date, idnum = "InMemory", id(obj)
if not idnum or not date:
# this will happen if setting properties on an object which is not yet saved
# this will happen if setting properties on an object which
# is not yet saved
return None
# we have to remove the class-name's space, for eventual use
# of memcached
hid = "%s-%s-#%s" % (_GA(obj, "__class__"), date, idnum)
hid = hid.replace(" ", "") # we have to remove the class-name's space, for memcached's sake
# we cache the object part of the hashid to avoid too many object lookups
hid = hid.replace(" ", "")
# we cache the object part of the hashid to avoid too many
# object lookups
_SA(obj, "_hashid", hid)
# build the complete hashid
hid = "%s%s" % (hid, suffix)
@ -84,8 +89,9 @@ def field_pre_save(sender, instance=None, update_fields=None, raw=False, **kwarg
"""
Called at the beginning of the field save operation. The save method
must be called with the update_fields keyword in order to be most efficient.
This method should NOT save; rather it is the save() that triggers this function.
Its main purpose is to allow to plug-in a save handler and oob handlers.
This method should NOT save; rather it is the save() that triggers this
function. Its main purpose is to allow to plug-in a save handler and oob
handlers.
"""
if raw:
return
@ -102,12 +108,14 @@ def field_pre_save(sender, instance=None, update_fields=None, raw=False, **kwarg
if callable(handler):
handler()
def field_post_save(sender, instance=None, update_fields=None, raw=False, **kwargs):
"""
Called at the beginning of the field save operation. The save method
must be called with the update_fields keyword in order to be most efficient.
This method should NOT save; rather it is the save() that triggers this function.
Its main purpose is to allow to plug-in a save handler and oob handlers.
This method should NOT save; rather it is the save() that triggers this
function. Its main purpose is to allow to plug-in a save handler and oob
handlers.
"""
if raw:
return
@ -127,70 +135,6 @@ def field_post_save(sender, instance=None, update_fields=None, raw=False, **kwar
if trackerhandler:
trackerhandler.update(fieldname, _GA(instance, fieldname))
#------------------------------------------------------------
# Attr cache - caching the attribute objects related to a given object to
# avoid lookups more than necessary (this makes Attributes en par in speed
# to any property).
#------------------------------------------------------------
## connected to m2m_changed signal in respective model class
#def post_attr_update(sender, **kwargs):
# "Called when the many2many relation changes (NOT when updating the value of an Attribute!)"
# obj = kwargs['instance']
# model = kwargs['model']
# action = kwargs['action']
# if kwargs['reverse']:
# # the reverse relation changed (the Attribute itself was acted on)
# pass
# else:
# # forward relation changed (the Object holding the Attribute m2m field)
# if not kwargs["pk_set"]:
# return
# if action == "post_add":
# # cache all added objects
# for attr_id in kwargs["pk_set"]:
# attr_obj = model.objects.get(pk=attr_id)
# set_attr_cache(obj, _GA(attr_obj, "db_key"), attr_obj)
# elif action == "post_remove":
# # obj.db_attributes.remove(attr) was called
# for attr_id in kwargs["pk_set"]:
# attr_obj = model.objects.get(pk=attr_id)
# del_attr_cache(obj, _GA(attr_obj, "db_key"))
# attr_obj.delete()
# elif action == "post_clear":
# # obj.db_attributes.clear() was called
# clear_obj_attr_cache(obj)
#
#
## attr cache - this is only left as deprecated cache
#
#def get_attr_cache(obj, attrname):
# "Called by getting attribute"
# hid = hashid(obj, "-%s" % attrname)
# return _ATTR_CACHE.get(hid, None)
#
#def set_attr_cache(obj, attrname, attrobj):
# "Set the attr cache manually; this can be used to update"
# global _ATTR_CACHE
# hid = hashid(obj, "-%s" % attrname)
# _ATTR_CACHE[hid] = attrobj
#
#def del_attr_cache(obj, attrname):
# "Del attribute cache"
# global _ATTR_CACHE
# hid = hashid(obj, "-%s" % attrname)
# if hid in _ATTR_CACHE:
# del _ATTR_CACHE[hid]
#
#def flush_attr_cache():
# "Clear attribute cache"
# global _ATTR_CACHE
# _ATTR_CACHE = {}
#
#def clear_obj_attr_cache(obj):
# global _ATTR_CACHE
# hid = hashid(obj)
# _ATTR_CACHE = {key:value for key, value in _ATTR_CACHE if not key.startswith(hid)}
#------------------------------------------------------------
# Property cache - this is a generic cache for properties stored on models.
@ -203,12 +147,14 @@ def get_prop_cache(obj, propname):
hid = hashid(obj, "-%s" % propname)
return _PROP_CACHE[hid].get(propname, None) if hid else None
def set_prop_cache(obj, propname, propvalue):
"Set property cache"
hid = hashid(obj, "-%s" % propname)
if hid:
_PROP_CACHE[hid][propname] = propvalue
def del_prop_cache(obj, propname):
"Delete element from property cache"
hid = hashid(obj, "-%s" % propname)
@ -216,11 +162,12 @@ def del_prop_cache(obj, propname):
if propname in _PROP_CACHE[hid]:
del _PROP_CACHE[hid][propname]
def flush_prop_cache():
"Clear property cache"
global _PROP_CACHE
_PROP_CACHE = defaultdict(dict)
#_PROP_CACHE.clear()
def get_cache_sizes():
"""
@ -229,8 +176,8 @@ def get_cache_sizes():
global _ATTR_CACHE, _PROP_CACHE
attr_n = len(_ATTR_CACHE)
attr_mb = sum(getsizeof(obj) for obj in _ATTR_CACHE) / 1024.0
field_n = 0 #sum(len(dic) for dic in _FIELD_CACHE.values())
field_mb = 0 # sum(sum([getsizeof(obj) for obj in dic.values()]) for dic in _FIELD_CACHE.values()) / 1024.0
field_n = 0 # sum(len(dic) for dic in _FIELD_CACHE.values())
field_mb = 0 # sum(sum([getsizeof(obj) for obj in dic.values()]) for dic in _FIELD_CACHE.values()) / 1024.0
prop_n = sum(len(dic) for dic in _PROP_CACHE.values())
prop_mb = sum(sum([getsizeof(obj) for obj in dic.values()]) for dic in _PROP_CACHE.values()) / 1024.0
return (attr_n, attr_mb), (field_n, field_mb), (prop_n, prop_mb)

View file

@ -7,16 +7,13 @@ Everything starts at handle_setup()
"""
import django
from django.core import management
from django.conf import settings
from django.contrib.auth import get_user_model
from src.server.models import ServerConfig
from src.help.models import HelpEntry
from src.utils import create
from django.utils.translation import ugettext as _
def create_config_values():
"""
Creates the initial config values.
@ -24,6 +21,7 @@ def create_config_values():
ServerConfig.objects.conf("site_name", settings.SERVERNAME)
ServerConfig.objects.conf("idle_timeout", settings.IDLE_TIMEOUT)
def get_god_player():
"""
Creates the god user.
@ -32,13 +30,15 @@ def get_god_player():
try:
god_player = PlayerDB.objects.get(id=1)
except PlayerDB.DoesNotExist:
txt = "\n\nNo superuser exists yet. The superuser is the 'owner' account on the"
txt += "\nEvennia server. Create a new superuser using the command"
txt = "\n\nNo superuser exists yet. The superuser is the 'owner'"
txt += "\account on the Evennia server. Create a new superuser using"
txt += "\nthe command"
txt += "\n\n python manage.py createsuperuser"
txt += "\n\nFollow the prompts, then restart the server."
raise Exception(txt)
return god_player
def create_objects():
"""
Creates the #1 player and Limbo room.
@ -54,18 +54,23 @@ def create_objects():
# mud-specific settings for the PlayerDB object.
player_typeclass = settings.BASE_PLAYER_TYPECLASS
# run all creation hooks on god_player (we must do so manually since the manage.py command does not)
# run all creation hooks on god_player (we must do so manually
# since the manage.py command does not)
god_player.typeclass_path = player_typeclass
god_player.basetype_setup()
god_player.at_player_creation()
god_player.locks.add("examine:perm(Immortals);edit:false();delete:false();boot:false();msg:all()")
god_player.permissions.add("Immortals") # this is necessary for quelling to work correctly.
# this is necessary for quelling to work correctly.
god_player.permissions.add("Immortals")
# Limbo is the default "nowhere" starting room
# Create the in-game god-character for player #1 and set it to exist in Limbo.
# Create the in-game god-character for player #1 and set
# it to exist in Limbo.
character_typeclass = settings.BASE_CHARACTER_TYPECLASS
god_character = create.create_object(character_typeclass, key=god_player.username, nohome=True)
god_character = create.create_object(character_typeclass,
key=god_player.username, nohome=True)
print "god_character:", character_typeclass, god_character, god_character.cmdset.all()
god_character.id = 1
god_character.db.desc = _('This is User #1.')
@ -81,10 +86,13 @@ def create_objects():
limbo_obj = create.create_object(room_typeclass, _('Limbo'), nohome=True)
limbo_obj.id = 2
string = " ".join([
"Welcome to your new {wEvennia{n-based game. From here you are ready to begin development.",
"Visit http://evennia.com if you should need help or would like to participate in community discussions.",
"If you are logged in as User #1 you can create a demo/tutorial area with '@batchcommand contrib.tutorial_world.build'.",
"Log out and create a new non-admin account at the login screen to play the tutorial properly."])
"Welcome to your new {wEvennia{n-based game. From here you are ready",
"to begin development. Visit http://evennia.com if you should need",
"help or would like to participate in community discussions. If you",
"are logged in as User #1 you can create a demo/tutorial area with",
"'@batchcommand contrib.tutorial_world.build'. Log out and create",
"a new non-admin account at the login screen to play the tutorial",
"properly."])
string = _(string)
limbo_obj.db.desc = string
limbo_obj.save()
@ -96,6 +104,7 @@ def create_objects():
if not god_character.home:
god_character.home = limbo_obj
def create_channels():
"""
Creates some sensible default channels.
@ -112,20 +121,21 @@ def create_channels():
key3, aliases, desc, locks = settings.CHANNEL_CONNECTINFO
cchan = create.create_channel(key3, aliases, desc, locks=locks)
# TODO: postgresql-psycopg2 has a strange error when trying to connect the user
# to the default channels. It works fine from inside the game, but not from
# the initial startup. We are temporarily bypassing the problem with the following
# fix. See Evennia Issue 151.
if ((".".join(str(i) for i in django.VERSION) < "1.2" and settings.DATABASE_ENGINE == "postgresql_psycopg2")
# TODO: postgresql-psycopg2 has a strange error when trying to
# connect the user to the default channels. It works fine from inside
# the game, but not from the initial startup. We are temporarily bypassing
# the problem with the following fix. See Evennia Issue 151.
if ((".".join(str(i) for i in django.VERSION) < "1.2"
and settings.DATABASE_ENGINE == "postgresql_psycopg2")
or (hasattr(settings, 'DATABASES')
and settings.DATABASES.get("default", {}).get('ENGINE', None)
== 'django.db.backends.postgresql_psycopg2')):
warning = """
PostgreSQL-psycopg2 compatability fix:
The in-game channels %s, %s and %s were created,
but the superuser was not yet connected to them. Please use in-game commands to
connect Player #1 to those channels when first logging in.
but the superuser was not yet connected to them. Please use in
game commands to onnect Player #1 to those channels when first
logging in.
""" % (key1, key2, key3)
print warning
return
@ -137,6 +147,7 @@ def create_channels():
PlayerChannelConnection.objects.create_connection(goduser, ichan)
PlayerChannelConnection.objects.create_connection(goduser, cchan)
def create_system_scripts():
"""
Setup the system repeat scripts. They are automatically started
@ -152,11 +163,10 @@ def create_system_scripts():
script2 = create.create_script(scripts.ValidateScripts)
# update the channel handler to make sure it's in sync
script3 = create.create_script(scripts.ValidateChannelHandler)
# clear the attribute cache regularly
#script4 = create.create_script(scripts.ClearAttributeCache)
if not script1 or not script2 or not script3:# or not script4:
if not script1 or not script2 or not script3:
print " Error creating system scripts."
def start_game_time():
"""
This starts a persistent script that keeps track of the
@ -168,6 +178,7 @@ def start_game_time():
from src.utils import gametime
gametime.init_gametime()
def create_admin_media_links():
"""
This traverses to src/web/media and tries to create a symbolic
@ -179,7 +190,8 @@ def create_admin_media_links():
since the django install may be at different locations depending
on system.
"""
import django, os
import django
import os
if django.get_version() < 1.4:
dpath = os.path.join(django.__path__[0], 'contrib', 'admin', 'media')
@ -202,6 +214,7 @@ def create_admin_media_links():
else:
print " Admin-media files should be copied manually to ADMIN_MEDIA_ROOT."
def at_initial_setup():
"""
Custom hook for users to overload some or all parts of the initial
@ -220,6 +233,7 @@ def at_initial_setup():
if mod.__dict__.get("at_initial_setup", None):
mod.at_initial_setup()
def reset_server():
"""
We end the initialization by resetting the server. This
@ -231,6 +245,7 @@ def reset_server():
print " Initial setup complete. Restarting Server once."
SESSIONS.server.shutdown(mode='reset')
def handle_setup(last_step):
"""
Main logic for the module. It allows for restarting
@ -242,7 +257,7 @@ def handle_setup(last_step):
# this means we don't need to handle setup since
# it already ran sucessfully once.
return
elif last_step == None:
elif last_step is None:
# config doesn't exist yet. First start of server
last_step = 0

View file

@ -3,6 +3,7 @@ Custom manager for ServerConfig objects.
"""
from django.db import models
class ServerConfigManager(models.Manager):
"""
This ServerConfigManager implements methods for searching
@ -24,16 +25,16 @@ class ServerConfigManager(models.Manager):
"""
if not key:
return self.all()
elif delete == True:
elif delete is True:
for conf in self.filter(db_key=key):
conf.delete()
elif value != None:
elif value is not None:
conf = self.filter(db_key=key)
if conf:
conf = conf[0]
else:
conf = self.model(db_key=key)
conf.value = value # this will pickle
conf.value = value # this will pickle
else:
conf = self.filter(db_key=key)
if not conf:
@ -42,7 +43,8 @@ class ServerConfigManager(models.Manager):
def get_mysql_db_version(self):
"""
This is a helper method for getting the version string of a mysql database.
This is a helper method for getting the version string
of a mysql database.
"""
from django.db import connection
conn = connection.cursor()

View file

@ -18,6 +18,7 @@ from src.utils.idmapper.models import SharedMemoryModel
from src.utils import logger, utils
from src.server.manager import ServerConfigManager
#------------------------------------------------------------
#
# ServerConfig
@ -61,11 +62,13 @@ class ServerConfig(SharedMemoryModel):
def __key_get(self):
"Getter. Allows for value = self.key"
return self.db_key
#@key.setter
def __key_set(self, value):
"Setter. Allows for self.key = value"
self.db_key = value
self.save()
#@key.deleter
def __key_del(self):
"Deleter. Allows for del self.key. Deletes entry."
@ -77,6 +80,7 @@ class ServerConfig(SharedMemoryModel):
def __value_get(self):
"Getter. Allows for value = self.value"
return pickle.loads(str(self.db_value))
#@value.setter
def __value_set(self, value):
"Setter. Allows for self.value = value"
@ -86,6 +90,7 @@ class ServerConfig(SharedMemoryModel):
return
self.db_value = pickle.dumps(value)
self.save()
#@value.deleter
def __value_del(self):
"Deleter. Allows for del self.value. Deletes entry."

View file

@ -8,13 +8,14 @@ from django.conf import settings
from src.utils.utils import to_str
_GA = object.__getattribute__
_SA = object.__setattr__
_NA = lambda o: (None, "N/A") # not implemented
_NA = lambda o: (None, "N/A") # not implemented
# mapper for which properties may be requested/sent to the client and how to do so.
# Each entry should define a function that returns two values - the name of the
# propertye being returned (a string) and the value. If tracking database fields,
# make sure to enter the full database field name (e.g. db_key rather than just key)
# since the db_ prefix is used by trackers to know which tracking mechanism to activate.
# mapper for which properties may be requested/sent to the client and how
# to do so. Each entry should define a function that returns two values - the
# name of the property being returned (a string) and the value. If tracking
# database fields, make sure to enter the full database field name (e.g.
# db_key rather than just key) since the db_ prefix is used by trackers
# to know which tracking mechanism to activate.
OOB_SENDABLE = {
## General
@ -97,13 +98,16 @@ class TrackerBase(object):
"""
def __init__(self, oobhandler, *args, **kwargs):
self.oobhandler = oobhandler
def update(self, *args, **kwargs):
"Called by tracked objects"
pass
def at_remove(self, *args, **kwargs):
"Called when tracker is removed"
pass
class OOBFieldTracker(TrackerBase):
"""
Tracker that passively sends data to a stored sessid whenever
@ -127,7 +131,9 @@ class OOBFieldTracker(TrackerBase):
new_value = new_value.key
except AttributeError:
new_value = to_str(new_value, force_string=True)
self.oobhandler.msg(self.sessid, "report", self.fieldname, new_value, *args, **kwargs)
self.oobhandler.msg(self.sessid, "report", self.fieldname,
new_value, *args, **kwargs)
class OOBAttributeTracker(TrackerBase):
"""
@ -136,13 +142,13 @@ class OOBAttributeTracker(TrackerBase):
we instead store the name of the attribute to return.
"""
def __init__(self, oobhandler, fieldname, sessid, attrname, *args, **kwargs):
"""
attrname - name of attribute to track
sessid - sessid of session to report to
"""
self.oobhandler = oobhandler
self.attrname = attrname
self.sessid = sessid
"""
attrname - name of attribute to track
sessid - sessid of session to report to
"""
self.oobhandler = oobhandler
self.attrname = attrname
self.sessid = sessid
def update(self, new_value, *args, **kwargs):
"Called by cache when attribute's db_value field updates"
@ -152,6 +158,7 @@ class OOBAttributeTracker(TrackerBase):
new_value = to_str(new_value, force_string=True)
self.oobhandler.msg(self.sessid, "report", self.attrname, new_value, *args, **kwargs)
#------------------------------------------------------------
# OOB commands
# This defines which internal server commands the OOB handler
@ -173,31 +180,51 @@ def oob_error(oobhandler, session, errmsg, *args, **kwargs):
occurs already at the execution stage (such as the oob function
not being recognized or having the wrong args etc).
"""
session.msg(oob=("send", {"ERROR":errmsg}))
session.msg(oob=("send", {"ERROR": errmsg}))
def LIST(oobhandler, session, mode, *args, **kwargs):
"""
List available properties. Mode is the type of information
desired:
"COMMANDS" Request an array of commands supported by the server.
"LISTS" Request an array of lists supported by the server.
"CONFIGURABLE_VARIABLES" Request an array of variables the client can configure.
"REPORTABLE_VARIABLES" Request an array of variables the server will report.
"REPORTED_VARIABLES" Request an array of variables currently being reported.
"SENDABLE_VARIABLES" Request an array of variables the server will send.
"COMMANDS" Request an array of commands supported
by the server.
"LISTS" Request an array of lists supported
by the server.
"CONFIGURABLE_VARIABLES" Request an array of variables the client
can configure.
"REPORTABLE_VARIABLES" Request an array of variables the server
will report.
"REPORTED_VARIABLES" Request an array of variables currently
being reported.
"SENDABLE_VARIABLES" Request an array of variables the server
will send.
"""
mode = mode.upper()
# the first return argument is treated by the msdp protocol as the name of the msdp array to return
# the first return argument is treated by the msdp protocol as the
# name of the msdp array to return
if mode == "COMMANDS":
session.msg(oob=("list", ("COMMANDS", "LIST", "REPORT", "UNREPORT", "SEND"))) # RESET
session.msg(oob=("list", ("COMMANDS",
"LIST",
"REPORT",
"UNREPORT",
# "RESET",
"SEND")))
elif mode == "LISTS":
session.msg(oob=("list", ("LISTS", "REPORTABLE_VARIABLES","REPORTED_VARIABLES", "SENDABLE_VARIABLES"))) #CONFIGURABLE_VARIABLES
session.msg(oob=("list", ("LISTS",
"REPORTABLE_VARIABLES",
"REPORTED_VARIABLES",
# "CONFIGURABLE_VARIABLES",
"SENDABLE_VARIABLES")))
elif mode == "REPORTABLE_VARIABLES":
session.msg(oob=("list", ("REPORTABLE_VARIABLES",) + tuple(key for key in OOB_REPORTABLE.keys())))
session.msg(oob=("list", ("REPORTABLE_VARIABLES",) +
tuple(key for key in OOB_REPORTABLE.keys())))
elif mode == "REPORTED_VARIABLES":
session.msg(oob=("list", ("REPORTED_VARIABLES",) + tuple(oobhandler.get_all_tracked(session))))
session.msg(oob=("list", ("REPORTED_VARIABLES",) +
tuple(oobhandler.get_all_tracked(session))))
elif mode == "SENDABLE_VARIABLES":
session.msg(oob=("list", ("SENDABLE_VARIABLES",) + tuple(key for key in OOB_REPORTABLE.keys())))
session.msg(oob=("list", ("SENDABLE_VARIABLES",) +
tuple(key for key in OOB_REPORTABLE.keys())))
#elif mode == "CONFIGURABLE_VARIABLES":
# pass
else:
@ -221,6 +248,7 @@ def send(oobhandler, session, *args, **kwargs):
# return result
session.msg(oob=("send", ret))
def report(oobhandler, session, *args, **kwargs):
"""
This creates a tracker instance to track the data given in *args.
@ -232,9 +260,12 @@ def report(oobhandler, session, *args, **kwargs):
key, val = OOB_REPORTABLE.get(name, _NA)(obj)
if key:
if key.startswith("db_"):
oobhandler.track_field(obj, session.sessid, key, OOBFieldTracker)
else: # assume attribute
oobhandler.track_attribute(obj, session.sessid, key, OOBAttributeTracker)
oobhandler.track_field(obj, session.sessid,
key, OOBFieldTracker)
else: # assume attribute
oobhandler.track_attribute(obj, session.sessid,
key, OOBAttributeTracker)
def unreport(oobhandler, session, vartype="prop", *args, **kwargs):
"""
@ -248,6 +279,6 @@ def unreport(oobhandler, session, vartype="prop", *args, **kwargs):
if key:
if key.startswith("db_"):
oobhandler.untrack_field(obj, session.sessid, key)
else: # assume attribute
else: # assume attribute
oobhandler.untrack_attribute(obj, session.sessid, key)

View file

@ -5,14 +5,16 @@ The OOBHandler is called directly by out-of-band protocols. It supplies three
pieces of functionality:
function execution - the oob protocol can execute a function directly on
the server. Only functions specified in settings.OOB_PLUGIN_MODULE.OOB_FUNCS
are valid for this use.
repeat func execution - the oob protocol can request a given function be executed repeatedly
at a regular interval.
tracking - the oob protocol can request Evennia to track changes to fields on
objects, as well as changes in Attributes. This is done by dynamically adding
tracker-objects on entities. The behaviour of those objects can be customized
via settings.OOB_PLUGIN_MODULE
the server. Only functions specified in
settings.OOB_PLUGIN_MODULE.OOB_FUNCS are valid
for this use.
repeat func execution - the oob protocol can request a given function be
executed repeatedly at a regular interval.
tracking - the oob protocol can request Evennia to track changes to
fields on objects, as well as changes in Attributes. This is
done by dynamically adding tracker-objects on entities. The
behaviour of those objects can be customized via
settings.OOB_PLUGIN_MODULE
oob functions have the following call signature:
function(caller, *args, **kwargs)
@ -30,7 +32,7 @@ from src.scripts.scripts import Script
from src.utils.create import create_script
from src.utils.dbserialize import dbserialize, dbunserialize, pack_dbobj, unpack_dbobj
from src.utils import logger
from src.utils.utils import all_from_module, to_str, is_iter, make_iter
from src.utils.utils import all_from_module
_SA = object.__setattr__
_GA = object.__getattribute__
@ -50,14 +52,18 @@ class TrackerHandler(object):
"""
def __init__(self, obj):
"""
This is initiated and stored on the object as a property _trackerhandler.
This is initiated and stored on the object as a
property _trackerhandler.
"""
try: obj = obj.dbobj
except AttributeError: pass
try:
obj = obj.dbobj
except AttributeError:
pass
self.obj = obj
self.ntrackers = 0
# initiate store only with valid on-object fieldnames
self.tracktargets = dict((key, {}) for key in _GA(_GA(self.obj, "_meta"), "get_all_field_names")())
self.tracktargets = dict((key, {})
for key in _GA(_GA(self.obj, "_meta"), "get_all_field_names")())
def add(self, fieldname, tracker):
"""
@ -95,19 +101,23 @@ class TrackerHandler(object):
except Exception:
logger.log_trace()
class TrackerBase(object):
"""
Base class for OOB Tracker objects.
"""
def __init__(self, *args, **kwargs):
pass
def update(self, *args, **kwargs):
"Called by tracked objects"
pass
def at_remove(self, *args, **kwargs):
"Called when tracker is removed"
pass
class _RepeaterScript(Script):
"""
Repeating and subscription-enabled script for triggering OOB
@ -117,7 +127,7 @@ class _RepeaterScript(Script):
"Called when script is initialized"
self.key = "oob_func"
self.desc = "OOB functionality script"
self.persistent = False #oob scripts should always be non-persistent
self.persistent = False # oob scripts should always be non-persistent
self.ndb.subscriptions = {}
def at_repeat(self):
@ -142,11 +152,12 @@ class _RepeaterScript(Script):
"""
self.ndb.subscriptions.pop(store_key, None)
class _RepeaterPool(object):
"""
This maintains a pool of _RepeaterScript scripts, ordered one per interval. It
will automatically cull itself once a given interval's script has no more
subscriptions.
This maintains a pool of _RepeaterScript scripts, ordered one per
interval. It will automatically cull itself once a given interval's
script has no more subscriptions.
This is used and accessed from oobhandler.repeat/unrepeat
"""
@ -160,9 +171,11 @@ class _RepeaterPool(object):
"""
if interval not in self.scripts:
# if no existing interval exists, create new script to fill the gap
new_tracker = create_script(_RepeaterScript, key="oob_repeater_%is" % interval, interval=interval)
new_tracker = create_script(_RepeaterScript,
key="oob_repeater_%is" % interval, interval=interval)
self.scripts[interval] = new_tracker
self.scripts[interval].subscribe(store_key, sessid, func_key, interval, *args, **kwargs)
self.scripts[interval].subscribe(store_key, sessid, func_key,
interval, *args, **kwargs)
def remove(self, store_key, interval):
"""
@ -176,8 +189,8 @@ class _RepeaterPool(object):
def stop(self):
"""
Stop all scripts in pool. This is done at server reload since restoring the pool
will automatically re-populate the pool.
Stop all scripts in pool. This is done at server reload since
restoring the pool will automatically re-populate the pool.
"""
for script in self.scripts.values():
script.stop()
@ -188,8 +201,8 @@ class _RepeaterPool(object):
class OOBHandler(object):
"""
The OOBHandler maintains all dynamic on-object oob hooks. It will store the
creation instructions and and re-apply them at a server reload (but not after
a server shutdown)
creation instructions and and re-apply them at a server reload (but
not after a server shutdown)
"""
def __init__(self):
"""
@ -207,10 +220,12 @@ class OOBHandler(object):
"""
if self.oob_tracker_storage:
#print "saved tracker_storage:", self.oob_tracker_storage
ServerConfig.objects.conf(key="oob_tracker_storage", value=dbserialize(self.oob_tracker_storage))
ServerConfig.objects.conf(key="oob_tracker_storage",
value=dbserialize(self.oob_tracker_storage))
if self.oob_repeat_storage:
#print "saved repeat_storage:", self.oob_repeat_storage
ServerConfig.objects.conf(key="oob_repeat_storage", value=dbserialize(self.oob_repeat_storage))
ServerConfig.objects.conf(key="oob_repeat_storage",
value=dbserialize(self.oob_repeat_storage))
self.oob_tracker_pool.stop()
def restore(self):
@ -242,12 +257,14 @@ class OOBHandler(object):
Create an OOB obj of class _oob_MAPPING[tracker_key] on obj. args,
kwargs will be used to initialize the OOB hook before adding
it to obj.
If property_key is not given, but the OOB has a class property property_name, this
will be used as the property name when assigning the OOB to
obj, otherwise tracker_key is used as the property name.
If property_key is not given, but the OOB has a class property
property_name, this will be used as the property name when assigning
the OOB to obj, otherwise tracker_key is used as the property name.
"""
try: obj = obj.dbobj
except AttributeError: pass
try:
obj = obj.dbobj
except AttributeError:
pass
if not "_trackerhandler" in _GA(obj, "__dict__"):
# assign trackerhandler to object
@ -266,8 +283,10 @@ class OOBHandler(object):
Remove the OOB from obj. If oob implements an
at_delete hook, this will be called with args, kwargs
"""
try: obj = obj.dbobj
except AttributeError: pass
try:
obj = obj.dbobj
except AttributeError:
pass
try:
# call at_delete hook
@ -278,7 +297,7 @@ class OOBHandler(object):
store_key = (pack_dbobj(obj), sessid, fieldname)
self.oob_tracker_storage.pop(store_key, None)
def get_all_tracked(session):
def get_all_tracked(self, session):
"""
Get the names of all variables this session is tracking.
"""
@ -304,12 +323,14 @@ class OOBHandler(object):
def track_attribute(self, obj, sessid, attr_name, trackerclass):
"""
Shortcut wrapper method for specifically tracking the changes of an
Attribute on an object. Will create a tracker on the Attribute Object and
name in a way the Attribute expects.
Attribute on an object. Will create a tracker on the Attribute
Object and name in a way the Attribute expects.
"""
# get the attribute object if we can
try: obj = obj.dbobj
except AttributeError: pass
try:
obj = obj.dbobj
except AttributeError:
pass
attrobj = _GA(obj, "attributes").get(attr_name, return_obj=True)
if attrobj:
self.track(attrobj, sessid, "db_value", trackerclass, attr_name)
@ -318,8 +339,10 @@ class OOBHandler(object):
"""
Shortcut for deactivating tracking for a given attribute.
"""
try: obj = obj.dbobj
except AttributeError: pass
try:
obj = obj.dbobj
except AttributeError:
pass
attrobj = _GA(obj, "attributes").get(attr_name, return_obj=True)
if attrobj:
self.untrack(attrobj, sessid, attr_name, trackerclass)
@ -335,7 +358,7 @@ class OOBHandler(object):
try:
obj = obj.dbobj
except AttributeError:
passj
pass
store_obj = pack_dbobj(obj)
store_key = (store_obj, sessid, func_key, interval)
# prepare to store
@ -363,7 +386,6 @@ class OOBHandler(object):
# access method - called from session.msg()
def execute_cmd(self, session, func_key, *args, **kwargs):
"""
Retrieve oobfunc from OOB_FUNCS and execute it immediately
@ -371,7 +393,7 @@ class OOBHandler(object):
"""
try:
#print "OOB execute_cmd:", session, func_key, args, kwargs, _OOB_FUNCS.keys()
oobfunc = _OOB_FUNCS[func_key] # raise traceback if not found
oobfunc = _OOB_FUNCS[func_key] # raise traceback if not found
oobfunc(self, session, *args, **kwargs)
except KeyError,e:
errmsg = "OOB Error: function '%s' not recognized: %s" % (func_key, e)

View file

@ -0,0 +1 @@
# -*- coding: utf-8 -*-

View file

@ -20,12 +20,14 @@ import zlib
MCCP = chr(86)
FLUSH = zlib.Z_SYNC_FLUSH
def mccp_compress(protocol, data):
"Handles zlib compression, if applicable"
if hasattr(protocol, 'zlib'):
return protocol.zlib.compress(data) + protocol.zlib.flush(FLUSH)
return data
class Mccp(object):
"""
Implements the MCCP protocol. Add this to a

View file

@ -9,9 +9,7 @@ etc.
"""
import re
from django.conf import settings
from src.utils.utils import make_iter, mod_import, to_str
from src.utils import logger
from src.utils.utils import to_str
# MSDP-relevant telnet cmd/opt-codes
MSDP = chr(69)
@ -29,11 +27,18 @@ SE = chr(240)
force_str = lambda inp: to_str(inp, force_string=True)
# pre-compiled regexes
regex_array = re.compile(r"%s(.*?)%s%s(.*?)%s" % (MSDP_VAR, MSDP_VAL, MSDP_ARRAY_OPEN, MSDP_ARRAY_CLOSE)) # return 2-tuple
regex_table = re.compile(r"%s(.*?)%s%s(.*?)%s" % (MSDP_VAR, MSDP_VAL, MSDP_TABLE_OPEN, MSDP_TABLE_CLOSE)) # return 2-tuple (may be nested)
# returns 2-tuple
regex_array = re.compile(r"%s(.*?)%s%s(.*?)%s" % (MSDP_VAR, MSDP_VAL,
MSDP_ARRAY_OPEN,
MSDP_ARRAY_CLOSE))
# returns 2-tuple (may be nested)
regex_table = re.compile(r"%s(.*?)%s%s(.*?)%s" % (MSDP_VAR, MSDP_VAL,
MSDP_TABLE_OPEN,
MSDP_TABLE_CLOSE))
regex_var = re.compile(MSDP_VAR)
regex_val = re.compile(MSDP_VAL)
# Msdp object handler
class Msdp(object):
@ -90,7 +95,7 @@ class Msdp(object):
else:
string += MSDP_VAR + force_str(key) + MSDP_VAL + force_str(val)
string += MSDP_TABLE_CLOSE
return stringk
return string
def make_array(name, *args):
"build a array. Arrays may not nest tables by definition."
@ -169,7 +174,7 @@ class Msdp(object):
tables[key] = {}
for varval in regex_var.split(table):
parts = regex_val.split(varval)
tables[key].expand({parts[0] : tuple(parts[1:]) if len(parts)>1 else ("",)})
tables[key].expand({parts[0]: tuple(parts[1:]) if len(parts) > 1 else ("",)})
for key, array in regex_array.findall(data):
arrays[key] = []
for val in regex_val.split(array):
@ -178,16 +183,18 @@ class Msdp(object):
for varval in regex_var.split(regex_array.sub("", regex_table.sub("", data))):
# get remaining varvals after cleaning away tables/arrays
parts = regex_val.split(varval)
variables[parts[0].upper()] = tuple(parts[1:]) if len(parts)>1 else ("", )
variables[parts[0].upper()] = tuple(parts[1:]) if len(parts) > 1 else ("", )
#print "MSDP: table, array, variables:", tables, arrays, variables
# all variables sent through msdp to Evennia are considered commands with arguments.
# there are three forms of commands possible through msdp:
# all variables sent through msdp to Evennia are considered commands
# with arguments. There are three forms of commands possible
# through msdp:
#
# VARNAME VAR -> varname(var)
# ARRAYNAME VAR VAL VAR VAL VAR VAL ENDARRAY -> arrayname(val,val,val)
# TABLENAME TABLE VARNAME VAL VARNAME VAL ENDTABLE -> tablename(varname=val, varname=val)
# TABLENAME TABLE VARNAME VAL VARNAME VAL ENDTABLE ->
# tablename(varname=val, varname=val)
#
# default MSDP functions
@ -232,82 +239,4 @@ class Msdp(object):
Send oob data to Evennia
"""
#print "msdp data_in:", funcname, args, kwargs
self.protocol.data_in(text=None, oob=(funcname, args, kwargs))
# # MSDP Commands
# # Some given MSDP (varname, value) pairs can also be treated as command + argument.
# # Generic msdp command map. The argument will be sent to the given command.
# # See http://tintin.sourceforge.net/msdp/ for definitions of each command.
# # These are client->server commands.
# def msdp_cmd_list(self, arg):
# """
# The List command allows for retrieving various info about the server/client
# """
# if arg == 'COMMANDS':
# return self.evennia_to_msdp(arg, MSDP_COMMANDS)
# elif arg == 'LISTS':
# return self.evennia_to_msdp(arg, ("COMMANDS", "LISTS", "CONFIGURABLE_VARIABLES",
# "REPORTED_VARIABLES", "SENDABLE_VARIABLES"))
# elif arg == 'CONFIGURABLE_VARIABLES':
# return self.evennia_to_msdp(arg, ("CLIENT_NAME", "CLIENT_VERSION", "PLUGIN_ID"))
# elif arg == 'REPORTABLE_VARIABLES':
# return self.evennia_to_msdp(arg, MSDP_REPORTABLE.keys())
# elif arg == 'REPORTED_VARIABLES':
# # the dynamically set items to report
# return self.evennia_to_msdp(arg, self.msdp_reported.keys())
# elif arg == 'SENDABLE_VARIABLES':
# return self.evennia_to_msdp(arg, MSDP_SENDABLE.keys())
# else:
# return self.evennia_to_msdp("LIST", arg)
# # default msdp commands
# def msdp_cmd_report(self, *arg):
# """
# The report command instructs the server to start reporting a
# reportable variable to the client.
# """
# try:
# return MSDP_REPORTABLE[arg](report=True)
# except Exception:
# logger.log_trace()
# def msdp_cmd_unreport(self, arg):
# """
# Unreport a previously reported variable
# """
# try:
# MSDP_REPORTABLE[arg](report=False)
# except Exception:
# self.logger.log_trace()
# def msdp_cmd_reset(self, arg):
# """
# The reset command resets a variable to its initial state.
# """
# try:
# MSDP_REPORTABLE[arg](reset=True)
# except Exception:
# logger.log_trace()
# def msdp_cmd_send(self, *args):
# """
# Request the server to send a particular variable
# to the client.
# arg - this is a list of variables the client wants.
# """
# ret = []
# for var in make_iter(arg)
# for var in make_iter(arg):
# try:
# ret.append(MSDP_REPORTABLE[var.upper()])# (send=True))
# except Exception:
# ret.append("ERROR")#logger.log_trace()
# return ret
self.protocol.data_in(text=None, oob=(funcname, args, kwargs))

View file

@ -83,7 +83,7 @@ class Portal(object):
# create a store of services
self.services = service.IServiceCollection(application)
self.amp_protocol = None # set by amp factory
self.amp_protocol = None # set by amp factory
self.sessions = PORTAL_SESSIONS
self.sessions.portal = self
@ -99,7 +99,7 @@ class Portal(object):
be restarted or is shutting down. Valid modes are True/False and None.
If mode is None, no change will be done to the flag file.
"""
if mode == None:
if mode is None:
return
f = open(PORTAL_RESTART, 'w')
print "writing mode=%(mode)s to %(portal_restart)s" % {'mode': mode, 'portal_restart': PORTAL_RESTART}
@ -211,17 +211,20 @@ if SSL_ENABLED:
factory = protocol.ServerFactory()
factory.sessionhandler = PORTAL_SESSIONS
factory.protocol = ssl.SSLProtocol
ssl_service = internet.SSLServer(port, factory, ssl.getSSLContext(), interface=interface)
ssl_service = internet.SSLServer(port,
factory,
ssl.getSSLContext(),
interface=interface)
ssl_service.setName('EvenniaSSL%s' % pstring)
PORTAL.services.addService(ssl_service)
print " ssl%s: %s" % (ifacestr, port)
if SSH_ENABLED:
# Start SSH game connections. Will create a keypair in evennia/game if necessary.
# Start SSH game connections. Will create a keypair in
# evennia/game if necessary.
from src.server.portal import ssh
@ -234,9 +237,9 @@ if SSH_ENABLED:
ifacestr = "-%s" % interface
for port in SSH_PORTS:
pstring = "%s:%s" % (ifacestr, port)
factory = ssh.makeFactory({'protocolFactory':ssh.SshProtocol,
'protocolArgs':(),
'sessions':PORTAL_SESSIONS})
factory = ssh.makeFactory({'protocolFactory': ssh.SshProtocol,
'protocolArgs': (),
'sessions': PORTAL_SESSIONS})
ssh_service = internet.TCPServer(port, factory, interface=interface)
ssh_service.setName('EvenniaSSH%s' % pstring)
PORTAL.services.addService(ssh_service)
@ -247,8 +250,6 @@ if WEBSERVER_ENABLED:
# Start a reverse proxy to relay data to the Server-side webserver
from twisted.web import proxy
for interface in WEBSERVER_INTERFACES:
if ":" in interface:
print " iPv6 interfaces not yet supported"
@ -269,7 +270,9 @@ if WEBSERVER_ENABLED:
webclientstr = "/client"
web_root = server.Site(web_root, logPath=settings.HTTP_LOG_FILE)
proxy_service = internet.TCPServer(proxyport, web_root, interface=interface)
proxy_service = internet.TCPServer(proxyport,
web_root,
interface=interface)
proxy_service.setName('EvenniaWebProxy%s' % pstring)
PORTAL.services.addService(proxy_service)
print " webproxy%s%s:%s (<-> %s)" % (webclientstr, ifacestr, proxyport, serverport)
@ -278,7 +281,7 @@ for plugin_module in PORTAL_SERVICES_PLUGIN_MODULES:
# external plugin services to start
plugin_module.start_plugin_services(PORTAL)
print '-' * 50 # end of terminal output
print '-' * 50 # end of terminal output
if os.name == 'nt':
# Windows only: Set PID file manually

View file

@ -4,6 +4,7 @@ Sessionhandler for portal sessions
import time
from src.server.sessionhandler import SessionHandler, PCONN, PDISCONN
#------------------------------------------------------------
# Portal-SessionHandler class
#------------------------------------------------------------
@ -39,8 +40,8 @@ class PortalSessionHandler(SessionHandler):
def connect(self, session):
"""
Called by protocol at first connect. This adds a not-yet authenticated session
using an ever-increasing counter for sessid.
Called by protocol at first connect. This adds a not-yet
authenticated session using an ever-increasing counter for sessid.
"""
self.latest_sessid += 1
sessid = self.latest_sessid
@ -48,13 +49,15 @@ class PortalSessionHandler(SessionHandler):
sessdata = session.get_sync_data()
self.sessions[sessid] = session
# sync with server-side
if self.portal.amp_protocol: # this is a timing issue
if self.portal.amp_protocol: # this is a timing issue
self.portal.amp_protocol.call_remote_ServerAdmin(sessid,
operation=PCONN,
data=sessdata)
def disconnect(self, session):
"""
Called from portal side when the connection is closed from the portal side.
Called from portal side when the connection is closed
from the portal side.
"""
sessid = session.sessid
if sessid in self.sessions:
@ -86,18 +89,22 @@ class PortalSessionHandler(SessionHandler):
self.sessions = {}
def server_logged_in(self, sessid, data):
"The server tells us that the session has been authenticated. Updated it."
"""
The server tells us that the session has been
authenticated. Updated it.
"""
sess = self.get_session(sessid)
sess.load_sync_data(data)
def server_session_sync(self, serversessions):
"""
Server wants to save data to the portal, maybe because it's about to shut down.
We don't overwrite any sessions here, just update them in-place and remove
any that are out of sync (which should normally not be the case)
Server wants to save data to the portal, maybe because it's about
to shut down. We don't overwrite any sessions here, just update
them in-place and remove any that are out of sync (which should
normally not be the case)
serversessions - dictionary {sessid:{property:value},...} describing the properties
to sync on all sessions
serversessions - dictionary {sessid:{property:value},...} describing
the properties to sync on all sessions
"""
to_save = [sessid for sessid in serversessions if sessid in self.sessions]
to_delete = [sessid for sessid in self.sessions if sessid not in to_save]
@ -131,6 +138,7 @@ class PortalSessionHandler(SessionHandler):
self.portal.amp_protocol.call_remote_MsgPortal2Server(session.sessid,
msg=text,
data=kwargs)
def announce_all(self, message):
"""
Send message to all connection sessions
@ -138,7 +146,6 @@ class PortalSessionHandler(SessionHandler):
for session in self.sessions.values():
session.data_out(message)
def data_out(self, sessid, text=None, **kwargs):
"""
Called by server for having the portal relay messages and data

View file

@ -26,7 +26,7 @@ from twisted.python import components
from django.conf import settings
from src.server import session
from src.players.models import PlayerDB
from src.utils import ansi, utils, logger
from src.utils import ansi, utils
ENCODINGS = settings.ENCODINGS
@ -35,6 +35,7 @@ CTRL_D = '\x04'
CTRL_BACKSLASH = '\x1c'
CTRL_L = '\x0c'
class SshProtocol(Manhole, session.Session):
"""
Each player connecting over ssh gets this protocol assigned to
@ -47,7 +48,8 @@ class SshProtocol(Manhole, session.Session):
login automatically.
"""
self.authenticated_player = starttuple[0]
self.cfactory = starttuple[1] # obs may not be called self.factory, it gets overwritten!
# obs must not be called self.factory, that gets overwritten!
self.cfactory = starttuple[1]
def terminalSize(self, width, height):
"""
@ -95,7 +97,6 @@ class SshProtocol(Manhole, session.Session):
self.terminal.write("KeyboardInterrupt")
self.terminal.nextLine()
def handle_EOF(self):
"""
Handles EOF generally used to exit.
@ -105,7 +106,6 @@ class SshProtocol(Manhole, session.Session):
else:
self.handle_QUIT()
def handle_FF(self):
"""
Handle a 'form feed' byte - generally used to request a screen
@ -114,14 +114,12 @@ class SshProtocol(Manhole, session.Session):
self.terminal.eraseDisplay()
self.terminal.cursorHome()
def handle_QUIT(self):
"""
Quit, end, and lose the connection.
"""
self.terminal.loseConnection()
def connectionLost(self, reason=None):
"""
This is executed when the connection is lost for
@ -140,9 +138,7 @@ class SshProtocol(Manhole, session.Session):
"""
return self.terminal.transport.getPeer()
def lineReceived(self, string):
"""
Communication Player -> Evennia. Any line return indicates a
command for the purpose of the MUD. So we take the user input
@ -159,10 +155,10 @@ class SshProtocol(Manhole, session.Session):
"""
for line in string.split('\n'):
self.terminal.write(line) #this is the telnet-specific method for sending
#this is the telnet-specific method for sending
self.terminal.write(line)
self.terminal.nextLine()
# session-general method hooks
def disconnect(self, reason="Connection closed. Goodbye for now."):
@ -175,7 +171,8 @@ class SshProtocol(Manhole, session.Session):
def data_out(self, text=None, **kwargs):
"""
Data Evennia -> Player access hook. 'data' argument is a dict parsed for string settings.
Data Evennia -> Player access hook. 'data' argument is a dict
parsed for string settings.
ssh flags:
raw=True - leave all ansi markup and tokens unparsed
@ -190,9 +187,9 @@ class SshProtocol(Manhole, session.Session):
raw = kwargs.get("raw", False)
nomarkup = kwargs.get("nomarkup", False)
if raw:
self.lineSend(string)
self.lineSend(text)
else:
self.lineSend(ansi.parse_ansi(string.strip("{r") + "{r", strip_ansi=nomarkup))
self.lineSend(ansi.parse_ansi(text.strip("{r") + "{r", strip_ansi=nomarkup))
class ExtraInfoAuthServer(SSHUserAuthServer):
@ -209,6 +206,7 @@ class ExtraInfoAuthServer(SSHUserAuthServer):
return self.portal.login(c, None, IConchUser).addErrback(
self._ebPassword)
class PlayerDBPasswordChecker(object):
"""
Checks the django db for the correct credentials for
@ -232,6 +230,7 @@ class PlayerDBPasswordChecker(object):
res = (player, self.factory)
return defer.succeed(res)
class PassAvatarIdTerminalRealm(TerminalRealm):
"""
Returns an avatar that passes the avatarId through to the
@ -244,7 +243,7 @@ class PassAvatarIdTerminalRealm(TerminalRealm):
sess = self.sessionFactory(comp)
sess.transportFactory = self.transportFactory
sess.chainedProtocolFactory = lambda : self.chainedProtocolFactory(avatarId)
sess.chainedProtocolFactory = lambda: self.chainedProtocolFactory(avatarId)
comp.setComponent(iconch.IConchUser, user)
comp.setComponent(iconch.ISession, sess)
@ -252,7 +251,6 @@ class PassAvatarIdTerminalRealm(TerminalRealm):
return user
class TerminalSessionTransport_getPeer:
"""
Taken from twisted's TerminalSessionTransport which doesn't
@ -345,4 +343,4 @@ def makeFactory(configdict):
factory.portal.registerChecker(PlayerDBPasswordChecker(factory))
return factory
return factory

View file

@ -3,7 +3,8 @@ This is a simple context factory for auto-creating
SSL keys and certificates.
"""
import os, sys
import os
import sys
from twisted.internet import ssl as twisted_ssl
try:
import OpenSSL
@ -13,6 +14,7 @@ except ImportError:
from src.server.portal.telnet import TelnetProtocol
class SSLProtocol(TelnetProtocol):
"""
Communication is the same as telnet, except data transfer
@ -20,6 +22,7 @@ class SSLProtocol(TelnetProtocol):
"""
pass
def verify_SSL_key_and_cert(keyfile, certfile):
"""
This function looks for RSA key and certificate in the current
@ -41,24 +44,27 @@ def verify_SSL_key_and_cert(keyfile, certfile):
rsaKey = Key(RSA.generate(KEY_LENGTH))
keyString = rsaKey.toString(type="OPENSSH")
file(keyfile, 'w+b').write(keyString)
except Exception,e:
except Exception, e:
print "rsaKey error: %(e)s\n WARNING: Evennia could not auto-generate SSL private key." % {'e': e}
print "If this error persists, create game/%(keyfile)s yourself using third-party tools." % {'keyfile': keyfile}
sys.exit(5)
# try to create the certificate
CERT_EXPIRE = 365 * 20 # twenty years validity
CERT_EXPIRE = 365 * 20 # twenty years validity
# default:
#openssl req -new -x509 -key ssl.key -out ssl.cert -days 7300
exestring = "openssl req -new -x509 -key %s -out %s -days %s" % (keyfile, certfile, CERT_EXPIRE)
#print "exestring:", exestring
try:
err = subprocess.call(exestring)#, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
#, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
subprocess.call(exestring)
except OSError, e:
string = "\n".join([
" %s\n" % e,
" Evennia's SSL context factory could not automatically create an SSL certificate game/%(cert)s." % {'cert': certfile},
" A private key 'ssl.key' was already created. Please create %(cert)s manually using the commands valid" % {'cert': certfile},
" Evennia's SSL context factory could not automatically",
" create an SSL certificate game/%(cert)s." % {'cert': certfile},
" A private key 'ssl.key' was already created. Please",
" create %(cert)s manually using the commands valid" % {'cert': certfile},
" for your operating system.",
" Example (linux, using the openssl program): ",
" %s" % exestring])
@ -66,6 +72,7 @@ def verify_SSL_key_and_cert(keyfile, certfile):
sys.exit(5)
print "done."
def getSSLContext():
"""
Returns an SSL context (key and certificate). This function

View file

@ -9,15 +9,14 @@ sessions etc.
import re
from twisted.conch.telnet import Telnet, StatefulTelnetProtocol, IAC, LINEMODE
from twisted.internet.defer import inlineCallbacks, returnValue
from src.server.session import Session
from src.server.portal import ttype, mssp, msdp
from src.server.portal.mccp import Mccp, mccp_compress, MCCP
from src.utils import utils, ansi, logger
from src.utils.utils import make_iter, is_iter
_RE_N = re.compile(r"\{n$")
class TelnetProtocol(Telnet, StatefulTelnetProtocol, Session):
"""
Each player connecting over telnet (ie using most traditional mud
@ -127,7 +126,7 @@ class TelnetProtocol(Telnet, StatefulTelnetProtocol, Session):
def _write(self, data):
"hook overloading the one used in plain telnet"
#print "_write (%s): %s" % (self.state, " ".join(str(ord(c)) for c in data))
# print "_write (%s): %s" % (self.state, " ".join(str(ord(c)) for c in data))
data = data.replace('\n', '\r\n').replace('\r\r\n', '\r\n')
#data = data.replace('\n', '\r\n')
super(TelnetProtocol, self)._write(mccp_compress(self, data))
@ -147,7 +146,6 @@ class TelnetProtocol(Telnet, StatefulTelnetProtocol, Session):
"""
self.data_in(text=string)
# Session hooks
def disconnect(self, reason=None):
@ -172,11 +170,13 @@ class TelnetProtocol(Telnet, StatefulTelnetProtocol, Session):
through the telnet connection.
valid telnet kwargs:
raw=True - pass string through without any ansi processing (i.e. include Evennia
ansi markers but do not convert them into ansi tokens)
raw=True - pass string through without any ansi
processing (i.e. include Evennia ansi markers but do
not convert them into ansi tokens)
nomarkup=True - strip all ansi markup
The telnet ttype negotiation flags, if any, are used if no kwargs are given.
The telnet ttype negotiation flags, if any, are used if no kwargs
are given.
"""
try:
text = utils.to_str(text if text else "", encoding=self.encoding)
@ -200,6 +200,7 @@ class TelnetProtocol(Telnet, StatefulTelnetProtocol, Session):
# no processing whatsoever
self.sendLine(text)
else:
# we need to make sure to kill the color at the end in order to match the webclient output.
#print "telnet data out:", self.protocol_flags, id(self.protocol_flags), id(self)
# we need to make sure to kill the color at the end in order
# to match the webclient output.
# print "telnet data out:", self.protocol_flags, id(self.protocol_flags), id(self)
self.sendLine(ansi.parse_ansi(_RE_N.sub("", text) + "{n", strip_ansi=nomarkup, xterm256=ttype.get('256 COLORS')))

View file

@ -12,12 +12,12 @@ under the 'TTYPE' key.
"""
# telnet option codes
TTYPE = chr(24)
TTYPE = chr(24)
IS = chr(0)
SEND = chr(1)
# terminal capabilities and their codes
MTTS = [(128,'PROXY'),
MTTS = [(128, 'PROXY'),
(64, 'SCREEN READER'),
(32, 'OSC COLOR PALETTE'),
(16, 'MOUSE TRACKING'),
@ -27,8 +27,8 @@ MTTS = [(128,'PROXY'),
(1, 'ANSI')]
# some clients sends erroneous strings instead
# of capability numbers. We try to convert back.
MTTS_invert = {"PROXY":128,
"SCREEN COLOR PALETTE":64,
MTTS_invert = {"PROXY": 128,
"SCREEN COLOR PALETTE": 64,
"OSC COLOR PALETTE": 32,
"MOUSE TRACKING": 16,
"256 COLORS": 8,
@ -36,6 +36,7 @@ MTTS_invert = {"PROXY":128,
"VT100": 2,
"ANSI": 1}
class Ttype(object):
"""
Handles ttype negotiations. Called and initiated by the
@ -51,7 +52,7 @@ class Ttype(object):
"""
self.ttype_step = 0
self.protocol = protocol
self.protocol.protocol_flags['TTYPE'] = {"init_done":False}
self.protocol.protocol_flags['TTYPE'] = {"init_done": False}
# setup protocol to handle ttype initialization and negotiation
self.protocol.negotiationMap[TTYPE] = self.do_ttype
# ask if client will ttype, connect callback if it does.
@ -61,7 +62,7 @@ class Ttype(object):
"""
Callback if ttype is not supported by client.
"""
self.protocol.protocol_flags['TTYPE'] = {"init_done":True}
self.protocol.protocol_flags['TTYPE'] = {"init_done": True}
def do_ttype(self, option):
"""
@ -95,9 +96,9 @@ class Ttype(object):
try:
option = int(option.strip('MTTS '))
except ValueError:
# it seems some clients don't send MTTS according to protocol
# specification, but instead just sends the data as plain
# strings. We try to convert back.
# it seems some clients don't send MTTS according to
# protocol specification, but instead just sends
# the data as plain strings. We try to convert back.
option = MTTS_invert.get(option.strip('MTTS ').upper())
if not option:
# no conversion possible. Give up.

View file

@ -20,19 +20,19 @@ import time
from hashlib import md5
from twisted.web import server, resource
from twisted.internet import defer, reactor
from django.utils import simplejson
from django.utils.functional import Promise
from django.utils.encoding import force_unicode
from django.conf import settings
from src.utils import utils, logger, ansi
from src.utils import utils, logger
from src.utils.text2html import parse_html
from src.server import session
SERVERNAME = settings.SERVERNAME
ENCODINGS = settings.ENCODINGS
# defining a simple json encoder for returning
# django data to the client. Might need to
# extend this if one wants to send more
@ -43,6 +43,8 @@ class LazyEncoder(simplejson.JSONEncoder):
if isinstance(obj, Promise):
return force_unicode(obj)
return super(LazyEncoder, self).default(obj)
def jsonify(obj):
return utils.to_str(simplejson.dumps(obj, ensure_ascii=False, cls=LazyEncoder))
@ -84,23 +86,23 @@ class WebClient(resource.Resource):
request = self.requests.get(suid)
if request:
# we have a request waiting. Return immediately.
request.write(jsonify({'msg':string, 'data':data}))
request.write(jsonify({'msg': string, 'data': data}))
request.finish()
del self.requests[suid]
else:
# no waiting request. Store data in buffer
dataentries = self.databuffer.get(suid, [])
dataentries.append(jsonify({'msg':string, 'data':data}))
dataentries.append(jsonify({'msg': string, 'data': data}))
self.databuffer[suid] = dataentries
def client_disconnect(self, suid):
"""
Disconnect session with given suid.
"""
if self.requests.has_key(suid):
if suid in self.requests:
self.requests[suid].finish()
del self.requests[suid]
if self.databuffer.has_key(suid):
if suid in self.databuffer:
del self.databuffer[suid]
def mode_init(self, request):
@ -108,7 +110,8 @@ class WebClient(resource.Resource):
This is called by render_POST when the client
requests an init mode operation (at startup)
"""
#csess = request.getSession() # obs, this is a cookie, not an evennia session!
#csess = request.getSession() # obs, this is a cookie, not
# an evennia session!
#csees.expireCallbacks.append(lambda : )
suid = request.args.get('suid', ['0'])[0]
@ -124,7 +127,7 @@ class WebClient(resource.Resource):
sess.init_session("webclient", remote_addr, self.sessionhandler)
sess.suid = suid
sess.sessionhandler.connect(sess)
return jsonify({'msg':host_string, 'suid':suid})
return jsonify({'msg': host_string, 'suid': suid})
def mode_input(self, request):
"""
@ -158,8 +161,8 @@ class WebClient(resource.Resource):
if dataentries:
return dataentries.pop(0)
request.notifyFinish().addErrback(self._responseFailed, suid, request)
if self.requests.has_key(suid):
self.requests[suid].finish() # Clear any stale request.
if suid in self.requests:
self.requests[suid].finish() # Clear any stale request.
self.requests[suid] = request
return server.NOT_DONE_YET
@ -206,6 +209,7 @@ class WebClient(resource.Resource):
# this should not happen if client sends valid data.
return ''
#
# A session type handling communication over the
# web client interface.
@ -241,7 +245,8 @@ class WebClientSession(session.Session):
if raw:
self.client.lineSend(self.suid, text)
else:
self.client.lineSend(self.suid, parse_html(text, strip_ansi=nomarkup))
self.client.lineSend(self.suid,
parse_html(text, strip_ansi=nomarkup))
return
except Exception:
logger.log_trace()

View file

@ -32,12 +32,11 @@ from src.server.sessionhandler import SESSIONS
# setting up server-side field cache
from django.db.models.signals import pre_save, post_save
from django.db.models.signals import post_save
from src.server.caches import field_pre_save
#pre_save.connect(field_pre_save, dispatch_uid="fieldcache")
post_save.connect(field_pre_save, dispatch_uid="fieldcache")
from src.typeclasses.models import TypedObject
#from src.server.caches import post_attr_update
#from django.db.models.signals import m2m_changed
@ -104,7 +103,7 @@ class Evennia(object):
# create a store of services
self.services = service.IServiceCollection(application)
self.amp_protocol = None # set by amp factory
self.amp_protocol = None # set by amp factory
self.sessions = SESSIONS
self.sessions.server = self
@ -121,7 +120,8 @@ class Evennia(object):
# set a callback if the server is killed abruptly,
# by Ctrl-C, reboot etc.
reactor.addSystemEventTrigger('before', 'shutdown', self.shutdown, _reactor_stopping=True)
reactor.addSystemEventTrigger('before', 'shutdown',
self.shutdown, _reactor_stopping=True)
self.game_running = True
@ -146,38 +146,48 @@ class Evennia(object):
def update_defaults(self):
"""
We make sure to store the most important object defaults here, so we can catch if they
change and update them on-objects automatically. This allows for changing default cmdset locations
and default typeclasses in the settings file and have them auto-update all already existing
objects.
We make sure to store the most important object defaults here, so
we can catch if they change and update them on-objects automatically.
This allows for changing default cmdset locations and default
typeclasses in the settings file and have them auto-update all
already existing objects.
"""
# setting names
settings_names = ("CMDSET_CHARACTER", "CMDSET_PLAYER", "BASE_PLAYER_TYPECLASS", "BASE_OBJECT_TYPECLASS",
"BASE_CHARACTER_TYPECLASS", "BASE_ROOM_TYPECLASS", "BASE_EXIT_TYPECLASS", "BASE_SCRIPT_TYPECLASS")
settings_names = ("CMDSET_CHARACTER", "CMDSET_PLAYER",
"BASE_PLAYER_TYPECLASS", "BASE_OBJECT_TYPECLASS",
"BASE_CHARACTER_TYPECLASS", "BASE_ROOM_TYPECLASS",
"BASE_EXIT_TYPECLASS", "BASE_SCRIPT_TYPECLASS")
# get previous and current settings so they can be compared
settings_compare = zip([ServerConfig.objects.conf(name) for name in settings_names],
[settings.__getattr__(name) for name in settings_names])
mismatches = [i for i, tup in enumerate(settings_compare) if tup[0] and tup[1] and tup[0] != tup[1]]
if len(mismatches): # can't use any() since mismatches may be [0] which reads as False for any()
# we have a changed default. Import relevant objects and run the update
if len(mismatches): # can't use any() since mismatches may be [0] which reads as False for any()
# we have a changed default. Import relevant objects and
# run the update
from src.objects.models import ObjectDB
#from src.players.models import PlayerDB
for i, prev, curr in ((i, tup[0], tup[1]) for i, tup in enumerate(settings_compare) if i in mismatches):
# update the database
print " %s:\n '%s' changed to '%s'. Updating unchanged entries in database ..." % (settings_names[i], prev, curr)
if i == 0: [obj.__setattr__("cmdset_storage", curr) for obj in ObjectDB.objects.filter(db_cmdset_storage__exact=prev)]
if i == 1: [ply.__setattr__("cmdset_storage", curr) for ply in PlayerDB.objects.filter(db_cmdset_storage__exact=prev)]
if i == 2: [ply.__setattr__("typeclass_path", curr) for ply in PlayerDB.objects.filter(db_typeclass_path__exact=prev)]
if i in (3,4,5,6): [obj.__setattr__("typeclass_path",curr)
for obj in ObjectDB.objects.filter(db_typeclass_path__exact=prev)]
if i == 7: [scr.__setattr__("typeclass_path", curr) for scr in ScriptDB.objects.filter(db_typeclass_path__exact=prev)]
if i == 0:
[obj.__setattr__("cmdset_storage", curr) for obj in ObjectDB.objects.filter(db_cmdset_storage__exact=prev)]
if i == 1:
[ply.__setattr__("cmdset_storage", curr) for ply in PlayerDB.objects.filter(db_cmdset_storage__exact=prev)]
if i == 2:
[ply.__setattr__("typeclass_path", curr) for ply in PlayerDB.objects.filter(db_typeclass_path__exact=prev)]
if i in (3, 4, 5, 6):
[obj.__setattr__("typeclass_path", curr) for obj in ObjectDB.objects.filter(db_typeclass_path__exact=prev)]
if i == 7:
[scr.__setattr__("typeclass_path", curr) for scr in ScriptDB.objects.filter(db_typeclass_path__exact=prev)]
# store the new default and clean caches
ServerConfig.objects.conf(settings_names[i], curr)
ObjectDB.flush_instance_cache()
PlayerDB.flush_instance_cache()
ScriptDB.flush_instance_cache()
# if this is the first start we might not have a "previous" setup saved. Store it now.
[ServerConfig.objects.conf(settings_names[i], tup[1]) for i, tup in enumerate(settings_compare) if not tup[0]]
# if this is the first start we might not have a "previous"
# setup saved. Store it now.
[ServerConfig.objects.conf(settings_names[i], tup[1])
for i, tup in enumerate(settings_compare) if not tup[0]]
def run_initial_setup(self):
"""
@ -191,7 +201,7 @@ class Evennia(object):
# i.e. this is an empty DB that needs populating.
print ' Server started for the first time. Setting defaults.'
initial_setup.handle_setup(0)
print '-'*50
print '-' * 50
elif int(last_initial_setup_step) >= 0:
# a positive value means the setup crashed on one of its
# modules and setup will resume from this step, retrying
@ -200,7 +210,7 @@ class Evennia(object):
print ' Resuming initial setup from step %(last)s.' % \
{'last': last_initial_setup_step}
initial_setup.handle_setup(int(last_initial_setup_step))
print '-'*50
print '-' * 50
def run_init_hooks(self):
"""
@ -244,7 +254,7 @@ class Evennia(object):
Either way, the active restart setting (Restart=True/False) is
returned so the server knows which more it's in.
"""
if mode == None:
if mode is None:
with open(SERVER_RESTART, 'r') as f:
# mode is either shutdown, reset or reload
mode = f.read()
@ -259,16 +269,20 @@ class Evennia(object):
Shuts down the server from inside it.
mode - sets the server restart mode.
'reload' - server restarts, no "persistent" scripts are stopped, at_reload hooks called.
'reset' - server restarts, non-persistent scripts stopped, at_shutdown hooks called.
'reload' - server restarts, no "persistent" scripts
are stopped, at_reload hooks called.
'reset' - server restarts, non-persistent scripts stopped,
at_shutdown hooks called.
'shutdown' - like reset, but server will not auto-restart.
None - keep currently set flag from flag file.
_reactor_stopping - this is set if server is stopped by a kill command OR this method was already called
once - in both cases the reactor is dead/stopping already.
_reactor_stopping - this is set if server is stopped by a kill
command OR this method was already called
once - in both cases the reactor is
dead/stopping already.
"""
if _reactor_stopping and hasattr(self, "shutdown_complete"):
# this means we have already passed through this method once; we don't need
# to run the shutdown procedure again.
# this means we have already passed through this method
# once; we don't need to run the shutdown procedure again.
defer.returnValue(None)
mode = self.set_restart_mode(mode)
@ -280,9 +294,12 @@ class Evennia(object):
if mode == 'reload':
# call restart hooks
yield [(o.typeclass, o.at_server_reload()) for o in ObjectDB.get_all_cached_instances()]
yield [(p.typeclass, p.at_server_reload()) for p in PlayerDB.get_all_cached_instances()]
yield [(s.typeclass, s.pause(), s.at_server_reload()) for s in ScriptDB.get_all_cached_instances()]
yield [(o.typeclass, o.at_server_reload())
for o in ObjectDB.get_all_cached_instances()]
yield [(p.typeclass, p.at_server_reload())
for p in PlayerDB.get_all_cached_instances()]
yield [(s.typeclass, s.pause(), s.at_server_reload())
for s in ScriptDB.get_all_cached_instances()]
yield self.sessions.all_sessions_portal_sync()
ServerConfig.objects.conf("server_restart_mode", "reload")
@ -294,14 +311,20 @@ class Evennia(object):
else:
if mode == 'reset':
# don't unset the is_connected flag on reset, otherwise same as shutdown
yield [(o.typeclass, o.at_server_shutdown()) for o in ObjectDB.get_all_cached_instances()]
else: # shutdown
yield [_SA(p, "is_connected", False) for p in PlayerDB.get_all_cached_instances()]
yield [(o.typeclass, o.at_server_shutdown()) for o in ObjectDB.get_all_cached_instances()]
# don't unset the is_connected flag on reset, otherwise
# same as shutdown
yield [(o.typeclass, o.at_server_shutdown())
for o in ObjectDB.get_all_cached_instances()]
else: # shutdown
yield [_SA(p, "is_connected", False)
for p in PlayerDB.get_all_cached_instances()]
yield [(o.typeclass, o.at_server_shutdown())
for o in ObjectDB.get_all_cached_instances()]
yield [(p.typeclass, p.unpuppet_all(), p.at_server_shutdown()) for p in PlayerDB.get_all_cached_instances()]
yield [(s.typeclass, s.at_server_shutdown()) for s in ScriptDB.get_all_cached_instances()]
yield [(p.typeclass, p.unpuppet_all(), p.at_server_shutdown())
for p in PlayerDB.get_all_cached_instances()]
yield [(s.typeclass, s.at_server_shutdown())
for s in ScriptDB.get_all_cached_instances()]
yield ObjectDB.objects.clear_all_sessids()
ServerConfig.objects.conf("server_restart_mode", "reset")
@ -310,12 +333,14 @@ class Evennia(object):
if SERVER_STARTSTOP_MODULE:
SERVER_STARTSTOP_MODULE.at_server_stop()
# if _reactor_stopping is true, reactor does not need to be stopped again.
# if _reactor_stopping is true, reactor does not need to
# be stopped again.
if os.name == 'nt' and os.path.exists(SERVER_PIDFILE):
# for Windows we need to remove pid files manually
os.remove(SERVER_PIDFILE)
if not _reactor_stopping:
# this will also send a reactor.stop signal, so we set a flag to avoid loops.
# this will also send a reactor.stop signal, so we set a
# flag to avoid loops.
self.shutdown_complete = True
reactor.callLater(0, reactor.stop)
@ -415,7 +440,7 @@ for plugin_module in SERVER_SERVICES_PLUGIN_MODULES:
# external plugin protocols
plugin_module.start_plugin_services(EVENNIA)
print '-' * 50 # end of terminal output
print '-' * 50 # end of terminal output
# clear server startup mode
ServerConfig.objects.conf("server_starting_mode", delete=True)

View file

@ -10,7 +10,7 @@ are stored on the Portal side)
import time
from datetime import datetime
from django.conf import settings
from src.scripts.models import ScriptDB
#from src.scripts.models import ScriptDB
from src.comms.models import ChannelDB
from src.utils import logger, utils
from src.utils.utils import make_iter, to_str
@ -53,7 +53,8 @@ class ServerSession(Session):
self.cmdset = cmdsethandler.CmdSetHandler(self)
def __cmdset_storage_get(self):
return [path.strip() for path in self.cmdset_storage_string.split(',')]
return [path.strip() for path in self.cmdset_storage_string.split(',')]
def __cmdset_storage_set(self, value):
self.cmdset_storage_string = ",".join(str(val).strip() for val in make_iter(value))
cmdset_storage = property(__cmdset_storage_get, __cmdset_storage_set)
@ -61,8 +62,8 @@ class ServerSession(Session):
def at_sync(self):
"""
This is called whenever a session has been resynced with the portal.
At this point all relevant attributes have already been set and self.player
been assigned (if applicable).
At this point all relevant attributes have already been set and
self.player been assigned (if applicable).
Since this is often called after a server restart we need to set up
the session as it was.
@ -78,7 +79,8 @@ class ServerSession(Session):
self.cmdset.update(init_mode=True)
if self.puid:
# reconnect puppet (puid is only set if we are coming back from a server reload)
# reconnect puppet (puid is only set if we are coming
# back from a server reload)
obj = _ObjectDB.objects.get(id=self.puid)
self.player.puppet_object(self.sessid, obj, normal_mode=False)
@ -139,7 +141,8 @@ class ServerSession(Session):
def get_puppet_or_player(self):
"""
Returns session if not logged in; puppet if one exists, otherwise return the player.
Returns session if not logged in; puppet if one exists,
otherwise return the player.
"""
if self.logged_in:
return self.puppet if self.puppet else self.player
@ -192,7 +195,8 @@ class ServerSession(Session):
# merge, give prio to the lowest level (puppet)
nicks = list(puppet.db_attributes.filter(db_category__in=("nick_inputline", "nick_channel"))) + list(nicks)
raw_list = text.split(None)
raw_list = [" ".join(raw_list[:i+1]) for i in range(len(raw_list)) if raw_list[:i+1]]
raw_list = [" ".join(raw_list[:i + 1])
for i in range(len(raw_list)) if raw_list[:i + 1]]
for nick in nicks:
if nick.db_key in raw_list:
text = text.replace(nick.db_key, nick.db_strvalue, 1)
@ -209,7 +213,7 @@ class ServerSession(Session):
if funcname:
_OOB_HANDLER.execute_cmd(self, funcname, *args, **kwargs)
execute_cmd = data_in # alias
execute_cmd = data_in # alias
def data_out(self, text=None, **kwargs):
"""
@ -255,7 +259,6 @@ class ServerSession(Session):
"alias for at_data_out"
self.data_out(text=text, **kwargs)
# Dummy API hooks for use during non-loggedin operation
def at_cmdset_get(self):
@ -282,6 +285,7 @@ class ServerSession(Session):
def all(self):
return [val for val in self.__dict__.keys()
if not val.startswith['_']]
def __getattribute__(self, key):
# return None if no matching attribute was found.
try:
@ -290,12 +294,14 @@ class ServerSession(Session):
return None
self._ndb_holder = NdbHolder()
return self._ndb_holder
#@ndb.setter
def ndb_set(self, value):
"Stop accidentally replacing the db object"
string = "Cannot assign directly to ndb object! "
string = "Use ndb.attr=value instead."
raise Exception(string)
#@ndb.deleter
def ndb_del(self):
"Stop accidental deletion."

View file

@ -6,6 +6,7 @@ on Portal and Server side) should inherit from this class.
import time
#------------------------------------------------------------
# Server Session
#------------------------------------------------------------
@ -18,23 +19,24 @@ class Session(object):
Each connection will see two session instances created:
1) A Portal session. This is customized for the respective connection
protocols that Evennia supports, like Telnet, SSH etc. The Portal session
must call init_session() as part of its initialization. The respective
hook methods should be connected to the methods unique for the respective
protocol so that there is a unified interface to Evennia.
2) A Server session. This is the same for all connected players, regardless
of how they connect.
protocols that Evennia supports, like Telnet, SSH etc. The Portal
session must call init_session() as part of its initialization. The
respective hook methods should be connected to the methods unique
for the respective protocol so that there is a unified interface
to Evennia.
2) A Server session. This is the same for all connected players,
regardless of how they connect.
The Portal and Server have their own respective sessionhandlers. These are synced
whenever new connections happen or the Server restarts etc, which means much of the
same information must be stored in both places e.g. the portal can re-sync with the
server when the server reboots.
The Portal and Server have their own respective sessionhandlers. These
are synced whenever new connections happen or the Server restarts etc,
which means much of the same information must be stored in both places
e.g. the portal can re-sync with the server when the server reboots.
"""
# names of attributes that should be affected by syncing.
_attrs_to_sync = ('protocol_key', 'address', 'suid', 'sessid', 'uid', 'uname',
'logged_in', 'puid', 'encoding',
_attrs_to_sync = ('protocol_key', 'address', 'suid', 'sessid', 'uid',
'uname', 'logged_in', 'puid', 'encoding',
'conn_time', 'cmd_last', 'cmd_last_visible', 'cmd_total',
'protocol_flags', 'server_data', "cmdset_storage_string")
@ -55,7 +57,7 @@ class Session(object):
self.suid = None
# unique id for this session
self.sessid = 0 # no sessid yet
self.sessid = 0 # no sessid yet
# database id for the user connected to this session
self.uid = None
# user name, for easier tracking of sessions
@ -84,7 +86,8 @@ class Session(object):
"""
Return all data relevant to sync the session
"""
return dict((key, value) for key, value in self.__dict__.items() if key in self._attrs_to_sync)
return dict((key, value) for key, value in self.__dict__.items()
if key in self._attrs_to_sync)
def load_sync_data(self, sessdata):
"""
@ -124,4 +127,3 @@ class Session(object):
hook for protocols to send incoming data to the engine.
"""
pass

View file

@ -15,7 +15,7 @@ There are two similar but separate stores of sessions:
import time
from django.conf import settings
from src.commands.cmdhandler import CMD_LOGINSTART
from src.utils.utils import variable_from_module, to_str
from src.utils.utils import variable_from_module
try:
import cPickle as pickle
except ImportError:
@ -29,14 +29,14 @@ _ScriptDB = None
# AMP signals
PCONN = chr(1) # portal session connect
PDISCONN = chr(2) # portal session disconnect
PSYNC = chr(3) # portal session sync
SLOGIN = chr(4) # server session login
SDISCONN = chr(5) # server session disconnect
SDISCONNALL = chr(6) # server session disconnect all
SSHUTD = chr(7) # server shutdown
SSYNC = chr(8) # server session sync
PCONN = chr(1) # portal session connect
PDISCONN = chr(2) # portal session disconnect
PSYNC = chr(3) # portal session sync
SLOGIN = chr(4) # server session login
SDISCONN = chr(5) # server session disconnect
SDISCONNALL = chr(6) # server session disconnect all
SSHUTD = chr(7) # server shutdown
SSYNC = chr(8) # server session sync
# i18n
from django.utils.translation import ugettext as _
@ -45,6 +45,7 @@ SERVERNAME = settings.SERVERNAME
MULTISESSION_MODE = settings.MULTISESSION_MODE
IDLE_TIMEOUT = settings.IDLE_TIMEOUT
def delayed_import():
"Helper method for delayed import of all needed entities"
global _ServerSession, _PlayerDB, _ServerConfig, _ScriptDB
@ -61,6 +62,7 @@ def delayed_import():
# including once to avoid warnings in Python syntax checkers
_ServerSession, _PlayerDB, _ServerConfig, _ScriptDB
#-----------------------------------------------------------
# SessionHandler base class
#------------------------------------------------------------
@ -110,7 +112,8 @@ class SessionHandler(object):
(cmdname, (args,), {kwargs})
((cmdname, (arg1,arg2)), cmdname, (cmdname, (arg1,)))
outputs an ordered structure on the form
((cmdname, (args,), {kwargs}), ...), where the two last parts of each tuple may be empty
((cmdname, (args,), {kwargs}), ...), where the two last
parts of each tuple may be empty
"""
def _parse(oobstruct):
slen = len(oobstruct)
@ -119,8 +122,9 @@ class SessionHandler(object):
elif not hasattr(oobstruct, "__iter__"):
# a singular command name, without arguments or kwargs
return (oobstruct.lower(), (), {})
# regardless of number of args/kwargs, the first element must be the function name.
# we will not catch this error if not, but allow it to propagate.
# regardless of number of args/kwargs, the first element must be
# the function name. We will not catch this error if not, but
# allow it to propagate.
if slen == 1:
return (oobstruct[0].lower(), (), {})
elif slen == 2:
@ -135,7 +139,9 @@ class SessionHandler(object):
return (oobstruct[0].lower(), tuple(oobstruct[1]), dict(oobstruct[2]))
if hasattr(oobstruct, "__iter__"):
# differentiate between (cmdname, cmdname), (cmdname, args, kwargs) and ((cmdname,args,kwargs), (cmdname,args,kwargs), ...)
# differentiate between (cmdname, cmdname),
# (cmdname, args, kwargs) and ((cmdname,args,kwargs),
# (cmdname,args,kwargs), ...)
if oobstruct and isinstance(oobstruct[0], basestring):
return (tuple(_parse(oobstruct)),)
@ -146,6 +152,7 @@ class SessionHandler(object):
return (tuple(out),)
return (_parse(oobstruct),)
#------------------------------------------------------------
# Server-SessionHandler class
#------------------------------------------------------------
@ -171,7 +178,7 @@ class ServerSessionHandler(SessionHandler):
"""
self.sessions = {}
self.server = None
self.server_data = {"servername":SERVERNAME}
self.server_data = {"servername": SERVERNAME}
def portal_connect(self, portalsession):
"""
@ -189,7 +196,8 @@ class ServerSessionHandler(SessionHandler):
sess.sessionhandler = self
sess.load_sync_data(portalsession)
if sess.logged_in and sess.uid:
# this can happen in the case of auto-authenticating protocols like SSH
# this can happen in the case of auto-authenticating
# protocols like SSH
sess.player = _PlayerDB.objects.get_player_from_uid(sess.uid)
sess.at_sync()
# validate all script
@ -216,17 +224,19 @@ class ServerSessionHandler(SessionHandler):
def portal_session_sync(self, portalsessions):
"""
Syncing all session ids of the portal with the ones of the server. This is instantiated
by the portal when reconnecting.
Syncing all session ids of the portal with the ones of the
server. This is instantiated by the portal when reconnecting.
portalsessions is a dictionary {sessid: {property:value},...} defining
each session and the properties in it which should be synced.
each session and the properties in it which should
be synced.
"""
delayed_import()
global _ServerSession, _PlayerDB, _ServerConfig, _ScriptDB
for sess in self.sessions.values():
# we delete the old session to make sure to catch eventual lingering references.
# we delete the old session to make sure to catch eventual
# lingering references.
del sess
for sessid, sessdict in portalsessions.items():
@ -238,7 +248,8 @@ class ServerSessionHandler(SessionHandler):
self.sessions[sessid] = sess
sess.at_sync()
# after sync is complete we force-validate all scripts (this also starts them)
# after sync is complete we force-validate all scripts
# (this also starts them)
init_mode = _ServerConfig.objects.conf("server_restart_mode", default=None)
_ScriptDB.objects.validate(init_mode=init_mode)
_ServerConfig.objects.conf("server_restart_mode", delete=True)
@ -333,7 +344,6 @@ class ServerSessionHandler(SessionHandler):
operation=SSYNC,
data=sessdata)
def disconnect_all_sessions(self, reason=_("You have been disconnected.")):
"""
Cleanly disconnect all of the connected sessions.
@ -346,7 +356,8 @@ class ServerSessionHandler(SessionHandler):
operation=SDISCONNALL,
data=reason)
def disconnect_duplicate_sessions(self, curr_session, reason = _("Logged in from elsewhere. Disconnecting.") ):
def disconnect_duplicate_sessions(self, curr_session,
reason=_("Logged in from elsewhere. Disconnecting.")):
"""
Disconnects any existing sessions with the same user.
"""
@ -364,7 +375,7 @@ class ServerSessionHandler(SessionHandler):
and see if any are dead.
"""
tcurr = time.time()
reason= _("Idle timeout exceeded, disconnecting.")
reason = _("Idle timeout exceeded, disconnecting.")
for session in (session for session in self.sessions.values()
if session.logged_in and IDLE_TIMEOUT > 0
and (tcurr - session.cmd_last) > IDLE_TIMEOUT):
@ -407,7 +418,6 @@ class ServerSessionHandler(SessionHandler):
return self.sessions.get(sessid)
return None
def announce_all(self, message):
"""
Send message to all connected sessions
@ -422,6 +432,7 @@ class ServerSessionHandler(SessionHandler):
self.server.amp_protocol.call_remote_MsgServer2Portal(sessid=session.sessid,
msg=text,
data=kwargs)
def data_in(self, sessid, text="", **kwargs):
"""
Data Portal -> Server

View file

@ -15,7 +15,7 @@ import urlparse
from urllib import quote as urlquote
from twisted.web import resource, http
from twisted.internet import reactor
from twisted.application import service, internet
from twisted.application import internet
from twisted.web.proxy import ReverseProxyResource
from twisted.web.server import NOT_DONE_YET
@ -24,6 +24,7 @@ from django.core.handlers.wsgi import WSGIHandler
from settings import UPSTREAM_IPS
#
# X-Forwarded-For Handler
#
@ -40,13 +41,14 @@ class HTTPChannelWithXForwardedFor(http.HTTPChannel):
proxy_chain = req.getHeader('X-FORWARDED-FOR')
if proxy_chain and client_ip in UPSTREAM_IPS:
forwarded = proxy_chain.split(', ', 1)[CLIENT]
self.transport.client = (forwarded, port)
self.transport.client = (forwarded, port)
# Monkey-patch Twisted to handle X-Forwarded-For.
http.HTTPFactory.protocol = HTTPChannelWithXForwardedFor
class EvenniaReverseProxyResource(ReverseProxyResource):
def getChild(self, path, request):
"""
@ -58,7 +60,6 @@ class EvenniaReverseProxyResource(ReverseProxyResource):
self.host, self.port, self.path + '/' + urlquote(path, safe=""),
self.reactor)
def render(self, request):
"""
Render a request by forwarding it to the proxied server.
@ -77,6 +78,7 @@ class EvenniaReverseProxyResource(ReverseProxyResource):
self.reactor.connectTCP(self.host, self.port, clientFactory)
return NOT_DONE_YET
#
# Website server resource
#
@ -92,7 +94,7 @@ class DjangoWebRoot(resource.Resource):
Setup the django+twisted resource
"""
resource.Resource.__init__(self)
self.wsgi_resource = WSGIResource(reactor, pool , WSGIHandler())
self.wsgi_resource = WSGIResource(reactor, pool, WSGIHandler())
def getChild(self, path, request):
"""
@ -102,6 +104,8 @@ class DjangoWebRoot(resource.Resource):
path0 = request.prepath.pop(0)
request.postpath.insert(0, path0)
return self.wsgi_resource
#
# Threaded Webserver
#
@ -114,14 +118,16 @@ class WSGIWebServer(internet.TCPServer):
call with WSGIWebServer(threadpool, port, wsgi_resource)
"""
def __init__(self, pool, *args, **kwargs ):
def __init__(self, pool, *args, **kwargs):
"This just stores the threadpool"
self.pool = pool
internet.TCPServer.__init__(self, *args, **kwargs)
def startService(self):
"Start the pool after the service"
internet.TCPServer.startService(self)
self.pool.start()
def stopService(self):
"Safely stop the pool after service stop."
internet.TCPServer.stopService(self)