From bc2777031d6198e9603f1c2853bf5c53fe003ec1 Mon Sep 17 00:00:00 2001 From: Griatch Date: Sat, 14 Sep 2019 20:18:07 +0200 Subject: [PATCH] More fixes to text2ansi regexes. Resolve #1920. Resolve #1924. --- evennia/objects/objects.py | 4 ++-- evennia/utils/text2html.py | 21 +++++++++++++++++---- 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/evennia/objects/objects.py b/evennia/objects/objects.py index ad3a0be68e..72bd9ceb36 100644 --- a/evennia/objects/objects.py +++ b/evennia/objects/objects.py @@ -1946,6 +1946,7 @@ class DefaultCharacter(DefaultObject): Kwargs: description (str): Brief description for this object. ip (str): IP address of creator (for object auditing). + All other kwargs will be passed into the create_object call. Returns: character (Object): A newly created Character of the given typeclass. @@ -1953,8 +1954,7 @@ class DefaultCharacter(DefaultObject): """ errors = [] - obj = None - + obj = Noneo # Get IP address of creator, if available ip = kwargs.pop('ip', '') diff --git a/evennia/utils/text2html.py b/evennia/utils/text2html.py index af12a39e98..4933edd9ab 100644 --- a/evennia/utils/text2html.py +++ b/evennia/utils/text2html.py @@ -99,8 +99,9 @@ class TextToHTMLparser(object): re_uline = re.compile("(?:%s)(.*?)(?=%s|%s)" % (underline.replace("[", r"\["), fgstop, bgstop)) re_blink = re.compile("(?:%s)(.*?)(?=%s|%s)" % (blink.replace("[", r"\["), fgstop, bgstop)) re_inverse = re.compile("(?:%s)(.*?)(?=%s|%s)" % (inverse.replace("[", r"\["), fgstop, bgstop)) - re_string = re.compile(r'(?P[<&>])|(?P(?<=\S) {2,})|(?P [ \t]+)|' + re_string = re.compile(r'(?P[<&>])|(?P(?<=\S) )|(?P [ \t]+)|' r'(?P^ )|(?P\r\n|\r|\n)', re.S | re.M | re.I) + re_dblspace = re.compile(r' {2,}', re.M) re_url = re.compile(r'((?:ftp|www|https?)\W+(?:(?!\.(?:\s|$)|&\w+;)[^"\',;$*^\\(){}<>\[\]\s])+)(\.(?:\s|$)|&\w+;|)') re_mxplink = re.compile(r'\|lc(.*?)\|lt(.*?)\|le', re.DOTALL) @@ -259,6 +260,13 @@ class TextToHTMLparser(object): # change pages (and losing our webclient session). return self.re_url.sub(r'\1\2', text) + def re_double_space(self, text): + """ + HTML will swallow any normal space after the first, so if any slipped + through we must make sure to replace them with "  " + """ + return self.re_dblspace.sub(self.sub_dblspace, text) + def sub_mxp_links(self, match): """ Helper method to be passed to re.sub, @@ -296,15 +304,19 @@ class TextToHTMLparser(object): return '
' elif cdict['firstspace']: return '  ' - elif cdict['space'] == '\t': - return ' ' * self.tabstop - elif cdict['space'] or cdict["spacestart"] or cdict['firstspace']: + text = match.group() + return ' ' if tabstop == 1 else ' ' + " " * tabstop + elif cdict['space'] or cdict["spacestart"]: text = match.group().replace('\t', ' ' * self.tabstop) text = text.replace(' ', ' ') return text return None + def sub_dblspace(self, match): + "clean up double-spaces" + return ' ' + ' ' * (len(match.group()) - 1) + def parse(self, text, strip_ansi=False): """ Main access function, converts a text containing ANSI codes @@ -331,6 +343,7 @@ class TextToHTMLparser(object): result = self.convert_linebreaks(result) result = self.remove_backspaces(result) result = self.convert_urls(result) + result = self.re_double_space(result) # clean out eventual ansi that was missed # result = parse_ansi(result, strip_ansi=True)