widelands-dev team mailing list archive
-
widelands-dev team
-
Mailing list archive
-
Message #15294
Re: [Merge] lp:~widelands-dev/widelands-website/update_beautifulsoup4 into lp:widelands-website
2 comments.
Since smilies aren't being used much, I'd be OK with changing the code for the devil.
Diff comments:
> === modified file 'mainpage/templatetags/wl_markdown.py'
> --- mainpage/templatetags/wl_markdown.py 2017-11-14 16:54:28 +0000
> +++ mainpage/templatetags/wl_markdown.py 2018-11-09 18:39:50 +0000
> @@ -105,67 +117,91 @@
> external = False
> break
> if external:
> - return {'class': 'externalLink', 'title': 'This link refers to outer space'}
> + tag['class'] = "externalLink"
> + tag['title'] = "This link refers to outer space"
> + return
>
> if '/profile/' in (tag['href']):
> - return {'class': 'userLink', 'title': 'This link refers to a userpage'}
> + tag['class'] = "userLink"
> + tag['title'] = "This link refers to a userpage"
> + return
>
> if check_for_missing_wikipages and href.startswith('/wiki/'):
>
> # Check for missing wikilink /wiki/PageName[/additionl/stuff]
> # Using href because we need cAsEs here
> - pn = urllib.unquote(tag['href'][6:].split('/', 1)[0])
> + article_name = urllib.unquote(tag['href'][6:].split('/', 1)[0])
>
> - if not len(pn): # Wiki root link is not a page
> - return {'class': 'wrongLink', 'title': 'This Link misses an articlename'}
> + if not len(article_name): # Wiki root link is not a page
> + tag['class'] = "wrongLink"
> + tag['title'] = "This Link misses an articlename"
> + return
>
> # Wiki special pages are also not counted
> - if pn in ['list', 'search', 'history', 'feeds', 'observe', 'edit']:
> - return {'class': 'specialLink'}
> + if article_name in ['list', 'search', 'history', 'feeds', 'observe', 'edit']:
> + tag['class'] = "specialLink"
> + return
>
> # Check for a redirect
> try:
> # try to get the article id; if this fails an IndexError is raised
> a_id = ChangeSet.objects.filter(
> - old_title=pn).values_list('article_id')[0]
> + old_title=article_name).values_list('article_id')[0]
>
> # get actual title of article
> act_t = Article.objects.get(id=a_id[0]).title
> - if pn != act_t:
> - return {'title': "This is a redirect and points to \"" + act_t + "\""}
> + if article_name != act_t:
> + tag['title'] = "This is a redirect and points to \"" + act_t + "\""
> + return
> else:
> - return None
> + return
> except IndexError:
> pass
>
> # article missing (or misspelled)
> - if Article.objects.filter(title=pn).count() == 0:
> - return {'class': 'missingLink', 'title': 'This Link is misspelled or missing. Click to create it anyway.'}
> -
> - return None
> -
> -
> -def _clickable_image(tag):
> + if Article.objects.filter(title=article_name).count() == 0:
> + tag['class'] = "missingLink"
> + tag['title'] = "This Link is misspelled or missing. Click to create it anyway."
> + return
> + return
> +
> +
> +def _make_clickable_images(tag):
> # is external link?
> if tag['src'].startswith('http'):
> - # is allways a link?
> + # Do not change if it is allready a link
allready -> already
> if tag.parent.name != 'a':
> # add link to image
> - text = '<a href=' + tag['src'] + \
> - '><img src=' + tag['src'] + '></a>'
> - return text
> - return None
> -
> + new_link = BeautifulSoup(features="lxml").new_tag('a')
> + new_link['href'] = tag['src']
> + new_img = BeautifulSoup(features="lxml").new_tag('img')
> + new_img['src'] = tag['src']
> + new_img['alt'] = tag['alt']
> + new_link.append(new_img)
> + tag.replace_with(new_link)
> + return
> +
> +
> +def find_smiley_Strings(bs4_string):
> + """Find strings that contain a smiley symbol"""
> +
> + if bs4_string.parent.name.lower() == 'code':
> + return False
> +
> + #for element in bs4_string.parent.contents:
> + for sc in SMILEYS:
> + if sc[0] in bs4_string:
> + return True
> + return False
>
> # Predefine the markdown extensions here to have a clean code in
> # do_wl_markdown()
> md_extensions = ['extra', 'toc', SemanticWikiLinkExtension()]
>
> def do_wl_markdown(value, *args, **keyw):
> - # Do Preescaping for markdown, so that some things stay intact
> - # This is currently only needed for this smiley ">:-)"
> - value = _insert_smiley_preescaping(value)
> - custom = keyw.pop('custom', True)
> + """Apply wl specific things, like smileys or colored links."""
> +
> + beautify = keyw.pop('beautify', True)
> html = smart_str(markdown(value, extensions=md_extensions))
>
> # Sanitize posts from potencial untrusted users (Forum/Wiki/Maps)
>
> === modified file 'pybb/util.py'
> --- pybb/util.py 2018-10-01 16:41:29 +0000
> +++ pybb/util.py 2018-11-09 18:39:50 +0000
> @@ -145,6 +145,16 @@
> return form
>
>
> +PLAIN_LINK_RE = re.compile(r'(http[s]?:\/\/[-a-zA-Z0-9@:%._\+~#=/?]+)')
Do we also want to support ftp(s) here?
> +def exclude_code_tag(bs4_string):
> + if bs4_string.parent.name == 'code':
> + return False
> + m = PLAIN_LINK_RE.search(bs4_string)
> + if m:
> + return True
> + return False
> +
> +
> def urlize(data):
> """Urlize plain text links in the HTML contents.
>
--
https://code.launchpad.net/~widelands-dev/widelands-website/update_beautifulsoup4/+merge/358571
Your team Widelands Developers is requested to review the proposed merge of lp:~widelands-dev/widelands-website/update_beautifulsoup4 into lp:widelands-website.
References