launchpad-reviewers team mailing list archive
-
launchpad-reviewers team
-
Mailing list archive
-
Message #17182
Re: [Merge] lp:~cjohnston/launchpad/short-ic-emails into lp:launchpad
Diff comments:
> === modified file 'lib/lp/code/mail/codereviewcomment.py'
> --- lib/lp/code/mail/codereviewcomment.py 2014-05-21 08:52:53 +0000
> +++ lib/lp/code/mail/codereviewcomment.py 2014-07-16 22:44:34 +0000
> @@ -1,4 +1,4 @@
> -# Copyright 2009 Canonical Ltd. This software is licensed under the
> +# Copyright 2009-2014 Canonical Ltd. This software is licensed under the
> # GNU Affero General Public License version 3 (see the file LICENSE).
>
> """Email notifications for code review comments."""
> @@ -26,6 +26,7 @@
> append_footer,
> format_address,
> )
> +from lp.services.patches import parse_patches
> from lp.services.webapp import canonical_url
>
>
> @@ -169,18 +170,66 @@
> content, content_type=content_type, filename=filename)
>
>
> +def nlstrip(s):
> + """ strip a single trailing newline if one exists. """
> + if s[-1] == u'\n' or s[-1] == '\n': # handle unicode stuff here as well
s.endswith('\n')
> + return s[:-1]
> +
> + return s
> +
> +
> def build_inline_comments_section(comments, diff_text):
> """Return a formatted text section with contextualized comments."""
> - result_lines = []
> - diff_lines = diff_text.splitlines()
> - for num, line in enumerate(diff_lines, 1):
> - result_lines.append(u'> {0}'.format(line.decode('utf-8', 'replace')))
> - comment = comments.get(str(num))
> - if comment is not None:
> - result_lines.append('')
> - result_lines.extend(comment.splitlines())
> - result_lines.append('')
> -
> - result_text = u'\n'.join(result_lines)
> + hunk_code = []
> + lines = diff_text.splitlines(True)
> + patches = parse_patches(lines, allow_dirty=True)
> + line_count = 0
> + for patch in patches:
> + patch_code = []
> + patch_has_ic = False
> + headers = nlstrip(patch.get_header())
The nlstrip here blindly removes a linefeed that may or may not exist. Is this compatible with our requirement of not skipping lines? How do we know if we're trimming a line here, so we know whether to increment line_count?
> + has_header_ic = False
> + header_code = []
> + for line in headers.split('\n'):
> + line_count += 1
> + comment = comments.get(str(line_count))
> + header_code.append(u'> {0}'.format(
> + line.decode('utf-8', 'replace')))
> + if comment is not None:
> + header_code.append('')
> + header_code.extend(comment.splitlines())
> + header_code.append('')
> + has_header_ic = True
> +
> + for p in patch.hunks:
> + this_hunk_code = []
> + has_ic = False
> + # This uses split('\n') due to the requirement of keeping the blank
> + # line after a hunk
> + diff_code = nlstrip(str(p)).split('\n')
Likewise with the nlstrip here.
> + for num, line in enumerate(diff_code, 1):
> + line_count += 1
> + this_hunk_code.append(u'> {0}'.format(
> + line.decode('utf-8', 'replace')))
> + comment = comments.get(str(line_count))
> + if comment is not None:
> + this_hunk_code.append('')
> + this_hunk_code.extend(comment.splitlines())
> + this_hunk_code.append('')
> + has_ic = True
> + if has_ic:
> + patch_code += this_hunk_code
> + patch_has_ic = True
> + else:
> + patch_code.append('')
Will this add an additional blank line for every hunk? eg. if I skip three hunks I'll get "\n\n\n\n" rather than "\n\n". We'll end up with walls of blank lines.
> + if has_header_ic or patch_has_ic:
> + # Since we keep blank lines preceeding new patch lines
> + # (i.e. '=== ' or 'Binary ...') we need to strip the preceeding
> + # newlines when the first comment is not in the first patch.
> + if len(hunk_code) == 0 and header_code[0] == u'> ':
> + header_code = header_code[1:]
> + hunk_code += header_code + patch_code
I don't exactly understand the case you describe here. This seems like a bit of a hack.
> +
> + result_text = u'\n'.join(hunk_code)
>
> return '\n\nDiff comments:\n\n%s\n\n' % result_text
>
> === modified file 'lib/lp/code/mail/tests/test_codereviewcomment.py'
> --- lib/lp/code/mail/tests/test_codereviewcomment.py 2014-05-21 09:33:04 +0000
> +++ lib/lp/code/mail/tests/test_codereviewcomment.py 2014-07-16 22:44:34 +0000
> @@ -1,11 +1,12 @@
> -# Copyright 2009-2011 Canonical Ltd. This software is licensed under the
> +# Copyright 2009-2014 Canonical Ltd. This software is licensed under the
> # GNU Affero General Public License version 3 (see the file LICENSE).
>
> """Test CodeReviewComment emailing functionality."""
>
> -
> +import os
There should be a newline here, after the standard library imports.
> import testtools
> import transaction
> +
There should be no newline in the middle of the third-part imports. utilities/format-new-and-modified-imports will fix both of these issues.
> from zope.component import getUtility
> from zope.security.proxy import removeSecurityProxy
>
> @@ -15,6 +16,7 @@
> CodeReviewVote,
> )
> from lp.code.mail.codereviewcomment import (
> + nlstrip,
> build_inline_comments_section,
> CodeReviewCommentMailer,
Not alphabetically sorted.
> )
> @@ -31,6 +33,12 @@
> from lp.testing.layers import LaunchpadFunctionalLayer
>
>
> +def datafile(filename):
> + data_path = os.path.join(os.path.dirname(__file__),
> + "../../../services/tests/data", filename)
> + return file(data_path, "rb")
> +
> +
> class TestCodeReviewComment(TestCaseWithFactory):
> """Test that comments are generated as expected."""
>
> @@ -214,14 +222,39 @@
> self.assertEqual(ctrl.body.splitlines()[2:-3],
> mailer.message.text_contents.splitlines())
>
> + def test_nlstrip_no_newline(self):
> + string = 'foobar'
> + self.assertEqual(string, nlstrip(string))
> +
> + def test_nlstrip_one_newline(self):
> + string = 'foobar\n'
> + self.assertEqual('foobar', nlstrip(string))
> +
> + def test_nlstrip_two_newlines(self):
> + string = 'foobar\n\n'
> + self.assertEqual('foobar\n', nlstrip(string))
> +
> + def test_nlstrip_unicode_no_newline(self):
> + string = u'foobar'
> + self.assertEqual(string, nlstrip(string))
> +
> + def test_nlstrip_unicode_one_newline(self):
> + string = u'foobar\n'
> + self.assertEqual(u'foobar', nlstrip(string))
> +
> + def test_nlstrip_unicode_two_newlines(self):
> + string = u'foobar\n\n'
> + self.assertEqual(u'foobar\n', nlstrip(string))
> +
> def makeCommentWithInlineComments(self, subject=None, content=None,
> - inline_comments=None):
> + inline_comments=None, diff_text=None):
> """Create a `CodeReviewComment` with inline (diff) comments."""
> bmp = self.factory.makeBranchMergeProposal()
> bmp.source_branch.subscribe(bmp.registrant,
> BranchSubscriptionNotificationLevel.NOEMAIL, None,
> CodeReviewNotificationLevel.FULL, bmp.registrant)
> - previewdiff = self.factory.makePreviewDiff(merge_proposal=bmp)
> + previewdiff = self.factory.makePreviewDiff(merge_proposal=bmp,
> + diff_text=diff_text)
> transaction.commit()
> if subject is None:
> subject = 'A comment'
> @@ -235,14 +268,185 @@
> inline_comments=inline_comments)
> return comment
>
> - def test_generateEmailWithInlineComments(self):
> - """Review comments emails consider the inline comments.
> -
> - See `build_inline_comments_section` tests for formatting details.
> - """
> - # Enabled corresponding feature flag.
> - self.useContext(feature_flags())
> - set_feature_flag(u'code.inline_diff_comments.enabled', u'enabled')
> + # For the test_inline_comments_email_* tests, please see
> + # `build_inline_comments_section` tests for formatting details.
> +
> + def test_inline_comments_email_text(self):
> + # Testing that a single file with three hunks is parsed properly and
> + # only the header/hunk combinations with comments are provided.
> +
> + # Enabled corresponding feature flag.
> + self.useContext(feature_flags())
> + set_feature_flag(u'code.inline_diff_comments.enabled', u'enabled')
> + diff = datafile("diff-1").read()
> + expected = datafile("expected-1").read().decode().splitlines()
> +
> + comment = self.makeCommentWithInlineComments(
> + inline_comments={'9': u'Cool',
> + '27': u'Are you sure?'},
> + diff_text=diff)
> + mailer = CodeReviewCommentMailer.forCreation(comment)
> + commenter = comment.branch_merge_proposal.registrant
> + ctrl = mailer.generateEmail(
> + commenter.preferredemail.email, commenter)
> +
> + self.assertEqual(expected, ctrl.body.splitlines()[:-2])
> +
> + def test_inline_comments_email_text_binary(self):
> + # Testing that a single file with three hunks and a binary file is
> + # parsed properly and only the binary header with comments are provided
> +
> + # Enabled corresponding feature flag.
> + self.useContext(feature_flags())
> + set_feature_flag(u'code.inline_diff_comments.enabled', u'enabled')
> + diff_1 = datafile("diff-1").read()
> + diff_2 = datafile("diff-2").read()
> + diff = u'\n'.join([diff_1, diff_2])
> + expected = datafile("expected-2").read().decode().splitlines()
> +
> + comment = self.makeCommentWithInlineComments(
> + inline_comments={'33': u'Why are you adding this?'},
> + diff_text=diff)
> + mailer = CodeReviewCommentMailer.forCreation(comment)
> + commenter = comment.branch_merge_proposal.registrant
> + ctrl = mailer.generateEmail(
> + commenter.preferredemail.email, commenter)
> +
> + self.assertEqual(expected, ctrl.body.splitlines()[:-2])
> +
> + def test_inline_comments_email_binary(self):
> + # Testing that a binary file is parsed properly and only the binary
> + # header with comments are provided
> +
> + # Enabled corresponding feature flag.
> + self.useContext(feature_flags())
> + set_feature_flag(u'code.inline_diff_comments.enabled', u'enabled')
> + diff = datafile("diff-2").read()
> + expected = datafile("expected-2").read().decode().splitlines()
> +
> + comment = self.makeCommentWithInlineComments(
> + inline_comments={'2': u'Why are you adding this?'},
> + diff_text=diff)
> + mailer = CodeReviewCommentMailer.forCreation(comment)
> + commenter = comment.branch_merge_proposal.registrant
> + ctrl = mailer.generateEmail(
> + commenter.preferredemail.email, commenter)
> +
> + self.assertEqual(expected, ctrl.body.splitlines()[:-2])
> +
> + def test_inline_comments_email_text_binary_text(self):
> + # Testing that a single file with three hunks, a binary file and a
> + # text file is parsed properly and only the binary header with comments
> + # are provided
> +
> + # Enabled corresponding feature flag.
> + self.useContext(feature_flags())
> + set_feature_flag(u'code.inline_diff_comments.enabled', u'enabled')
> + diff_1 = datafile("diff-1").read()
> + diff_2 = datafile("diff-2").read()
> + diff_3 = datafile("diff-3").read()
> + diff = u'\n'.join([diff_1, diff_2, diff_3])
> + expected = datafile("expected-3").read().decode().splitlines()
> +
> + comment = self.makeCommentWithInlineComments(
> + inline_comments={'43': u'What is this?'},
> + diff_text=diff)
> + mailer = CodeReviewCommentMailer.forCreation(comment)
> + commenter = comment.branch_merge_proposal.registrant
> + ctrl = mailer.generateEmail(
> + commenter.preferredemail.email, commenter)
> +
> + self.assertEqual(expected, ctrl.body.splitlines()[:-2])
> +
> + def test_inline_comments_email_text_binary_text_includes_two_files(self):
> + # Testing that a single file with three hunks, a binary file and a
> + # text file is parsed properly and a single hunk from the first file
> + # plus a single hunk from the second file with comments are provided
> +
> + # Enabled corresponding feature flag.
> + self.useContext(feature_flags())
> + set_feature_flag(u'code.inline_diff_comments.enabled', u'enabled')
> + diff_1 = datafile("diff-1").read()
> + diff_2 = datafile("diff-2").read()
> + diff_3 = datafile("diff-3").read()
> + diff = u'\n'.join([diff_1, diff_2, diff_3])
> + expected = datafile("expected-5").read().decode().splitlines()
> +
> + comment = self.makeCommentWithInlineComments(
> + inline_comments={'18': u'Why?',
> + '43': u'Good catch!'},
> + diff_text=diff)
> + mailer = CodeReviewCommentMailer.forCreation(comment)
> + commenter = comment.branch_merge_proposal.registrant
> + ctrl = mailer.generateEmail(
> + commenter.preferredemail.email, commenter)
> +
> + self.assertEqual(expected, ctrl.body.splitlines()[:-2])
> +
> + def test_inline_comments_email_text_binary_text_includes_three_files(self):
> + # Testing that a single file with three hunks, a binary file and a
> + # text file is parsed properly and two hunks from the first file,
> + # the second file and a single hunk from the second file with comments
> + # are provided
> +
> + # Enabled corresponding feature flag.
> + self.useContext(feature_flags())
> + set_feature_flag(u'code.inline_diff_comments.enabled', u'enabled')
> + diff_1 = datafile("diff-1").read()
> + diff_2 = datafile("diff-2").read()
> + diff_3 = datafile("diff-3").read()
> + diff = u'\n'.join([diff_1, diff_2, diff_3])
> + expected = datafile("expected-6").read().decode().splitlines()
> +
> + comment = self.makeCommentWithInlineComments(
> + inline_comments={'9': u'Good stuff',
> + '27': u'+1',
> + '33': u'Do we need this?',
> + '43': u'Can we change this?'},
> + diff_text=diff)
> + mailer = CodeReviewCommentMailer.forCreation(comment)
> + commenter = comment.branch_merge_proposal.registrant
> + ctrl = mailer.generateEmail(
> + commenter.preferredemail.email, commenter)
> +
> + self.assertEqual(expected, ctrl.body.splitlines()[:-2])
> +
> + def test_inline_comments_email_text_binary_text_no_nl_files(self):
> + # Testing that a single file with three hunks, a binary file and a
> + # text file which do not have a new line between each other is parsed
> + # properly and two hunks from the first file, the second file and a
> + # single hunk from the second file with comments are provided
> +
> + # Enabled corresponding feature flag.
> + self.useContext(feature_flags())
> + set_feature_flag(u'code.inline_diff_comments.enabled', u'enabled')
> + diff_1 = datafile("diff-1").read()
> + diff_2 = datafile("diff-2").read()
> + diff_3 = datafile("diff-3").read()
> + diff = u''.join([diff_1, diff_2, diff_3])
> + expected = datafile("expected-7").read().decode().splitlines()
> +
> + comment = self.makeCommentWithInlineComments(
> + inline_comments={'9': u'Good stuff',
> + '27': u'+1',
> + '32': u'Do we need this?',
> + '41': u'Can we change this?'},
> + diff_text=diff)
> + mailer = CodeReviewCommentMailer.forCreation(comment)
> + commenter = comment.branch_merge_proposal.registrant
> + ctrl = mailer.generateEmail(
> + commenter.preferredemail.email, commenter)
> +
> + self.assertEqual(expected, ctrl.body.splitlines()[:-2])
> +
> + def test_inline_comments_email_text_comment_in_header(self):
> + # Testing that a single file with comments in the header is parsed
> + # properly and only the headers is provided
> +
> + # Enabled corresponding feature flag.
> + self.useContext(feature_flags())
> + set_feature_flag(u'code.inline_diff_comments.enabled', u'enabled')
> + expected = datafile("expected-4").read().decode('utf-8').splitlines()
>
> comment = self.makeCommentWithInlineComments(
> inline_comments={'2': u'Is this from Planet Earth\xa9 ?'})
> @@ -251,20 +455,7 @@
> ctrl = mailer.generateEmail(
> commenter.preferredemail.email, commenter)
>
> - expected_lines = [
> - '',
> - 'Diff comments:',
> - '',
> - "> === zbqvsvrq svyr 'yvo/yc/pbqr/vagresnprf/qvss.cl'",
> - '> --- yvo/yc/pbqr/vagresnprf/qvss.cl '
> - '2009-10-01 13:25:12 +0000',
> - '',
> - u'Is this from Planet Earth\xa9 ?',
> - '',
> - '> +++ yvo/yc/pbqr/vagresnprf/qvss.cl '
> - '2010-02-02 15:48:56 +0000'
> - ]
> - self.assertEqual(expected_lines, ctrl.body.splitlines()[1:10])
> + self.assertEqual(expected, ctrl.body.splitlines()[:-2])
>
> def test_generateEmailWithInlineComments_feature_disabled(self):
> """Inline comments are not considered if the flag is not enabled."""
>
> === added file 'lib/lp/services/patches.py'
> --- lib/lp/services/patches.py 1970-01-01 00:00:00 +0000
> +++ lib/lp/services/patches.py 2014-07-16 22:44:34 +0000
> @@ -0,0 +1,177 @@
> +# Copyright 2014 Canonical Ltd. This software is licensed under the
> +# GNU Affero General Public License version 3 (see the file LICENSE).
> +
> +# This file is derived from bzrlib.patches in order to extend some
> +# functionality for use by the Launchpad inline comment feature.
> +
> +# In order to reduce the size of a diff being emailed with inline comments
> +# to just the relevant hunks of code, it was needed to break the diff down
> +# and then put the required pieces back together. To do this, the notion of
> +# 'comments' was added to Patch and BinaryPatch in order to return any lines
> +# beginning with '===' which is later added to the file headers and the line
> +# counted.
> +
> +import re
> +from bzrlib.patches import (
> + iter_lines_handle_nl,
> + iter_hunks,
> + binary_files_re,
> + )
> +from bzrlib.errors import (
> + BzrError,
> + MalformedPatchHeader,
> + PatchSyntax,
> + )
> +
> +regex = re.compile(binary_files_re)
> +
> +
> +class BinaryFiles(BzrError):
> +
> + _fmt = 'Binary files section encountered.'
> +
> + def __init__(self, comments, orig_name, mod_name):
> + self.comments = comments
> + self.orig_name = orig_name
> + self.mod_name = mod_name
> +
> +
> +class BinaryPatch(object):
> + def __init__(self, comments, oldname, newname):
> + self.oldname = oldname
> + self.newname = newname
> + self.comments = comments
> + self.hunks = []
> +
> + def __str__(self):
> + return self.get_header()
> +
> + def get_header(self):
> + header = 'Binary files %s and %s differ\n' % (
> + self.oldname, self.newname)
> + comments = ''.join(self.comments + [header])
> + return "%s" % comments
> +
> +
> +class Patch(BinaryPatch):
> +
> + def __init__(self, comments, oldname, newname):
> + BinaryPatch.__init__(self, comments, oldname, newname)
> + self.hunks = []
> + self.comments = comments
> +
> + def __str__(self):
> + ret = self.get_header()
> + ret += "".join([str(h) for h in self.hunks])
> + return ret
> +
> + def get_header(self):
> + comments = ''.join(self.comments)
> + return "%s--- %s\n+++ %s\n" % (comments, self.oldname, self.newname)
> +
> +
> +def get_patch_names(iter_lines):
> + comments = []
> + line = iter_lines.next()
> + while ((line == "\n") or line.startswith('=== ')
> + or line.startswith('*** ') or line.startswith('#')):
> + comments.append(line)
> + line = iter_lines.next()
> + try:
> + match = re.match(binary_files_re, line)
> + if match is not None:
> + raise BinaryFiles(comments, match.group(1), match.group(2))
> + if not line.startswith("--- "):
> + raise MalformedPatchHeader("No orig name", line)
> + else:
> + orig_name = line[4:].rstrip("\n")
> + except StopIteration:
> + raise MalformedPatchHeader("No orig line", "")
> + try:
> + line = iter_lines.next()
> + if not line.startswith("+++ "):
> + raise PatchSyntax("No mod name")
> + else:
> + mod_name = line[4:].rstrip("\n")
> + except StopIteration:
> + raise MalformedPatchHeader("No mod line", "")
> + return (comments, orig_name, mod_name)
> +
> +
> +def iter_file_patch(iter_lines, allow_dirty=False):
> + '''
> + :arg iter_lines: iterable of lines to parse for patches
> + :kwarg allow_dirty: If True, allow comments and other non-patch text
> + before the first patch. Note that the algorithm here can only find
> + such text before any patches have been found. Comments after the
> + first patch are stripped away in iter_hunks() if it is also passed
> + allow_dirty=True. Default False.
> + '''
> + ### FIXME: Docstring is not quite true. We allow certain comments no
> + # matter what, If they startwith '===', '***', or '#' Someone should
> + # reexamine this logic and decide if we should include those in
> + # allow_dirty or restrict those to only being before the patch is found
> + # (as allow_dirty does).
> + saved_lines = []
> + keeper = []
> + for line in iter_lines:
> + # if it's a === or binary it's a new patch, but we might have
> + # both together.
> + if line.startswith('=== '):
> + # assume a new patch until we know differently
> + if len(saved_lines) > 0:
> + if saved_lines[-1] == '\n':
> + # keep any preceeding newlines
> + keeper.append(saved_lines[-1])
> + saved_lines = saved_lines[:-1]
> + yield saved_lines
> + saved_lines = []
> + keeper.append(line)
> + elif regex.match(line):
> + # check for preceeding '=== ' line
> + # a binary patch always yields the previous patch
> + if len(saved_lines) > 0:
> + saved_lines.append(line)
> + else:
> + yield [line]
> + else:
> + saved_lines.append(line)
> + if len(keeper) > 0:
> + saved_lines += keeper
> + keeper = []
> + if len(saved_lines) > 0:
> + yield saved_lines
> +
> +
> +def parse_patch(iter_lines, allow_dirty=False):
> + '''
> + :arg iter_lines: iterable of lines to parse
> + :kwarg allow_dirty: If True, allow the patch to have trailing junk.
> + Default False
> + '''
> + iter_lines = iter_lines_handle_nl(iter_lines)
> + try:
> + (comments, orig_name, mod_name) = get_patch_names(iter_lines)
> + except BinaryFiles, e:
> + return BinaryPatch(e.comments, e.orig_name, e.mod_name)
> + else:
> + patch = Patch(comments, orig_name, mod_name)
> + patch.comments = comments
> + for hunk in iter_hunks(iter_lines, allow_dirty):
> + patch.hunks.append(hunk)
> + return patch
> +
> +
> +def parse_patches(iter_lines, allow_dirty=False):
> + '''
> + :arg iter_lines: iterable of lines to parse for patches
> + :kwarg allow_dirty: If True, allow text that's not part of the patch at
> + selected places. This includes comments before and after a patch
> + for instance. Default False.
> + '''
> + patches = []
> + for f in iter_file_patch(iter_lines, allow_dirty):
> + patch = parse_patch(f.__iter__(), allow_dirty)
> + patches.append(patch)
> +
> + return patches
>
> === added directory 'lib/lp/services/tests/data'
> === added file 'lib/lp/services/tests/data/diff-1'
> --- lib/lp/services/tests/data/diff-1 1970-01-01 00:00:00 +0000
> +++ lib/lp/services/tests/data/diff-1 2014-07-16 22:44:34 +0000
> @@ -0,0 +1,30 @@
> +=== zbqvsvrq svyr 'yvo/yc/pbqr/wninfpevcg/grfgf/grfg_oenapuzretrcebcbfny.vayvarpbzzragf.wf'
> +--- yvo/yc/pbqr/wninfpevcg/grfgf/grfg_oenapuzretrcebcbfny.vayvarpbzzragf.wf 2014-07-08 12:32:44 +0000
> ++++ yvo/yc/pbqr/wninfpevcg/grfgf/grfg_oenapuzretrcebcbfny.vayvarpbzzragf.wf 2014-07-08 23:47:16 +0000
> +@@ -70,7 +70,7 @@
> + "jf.bc=trgVayvarPbzzragf&cerivrjqvss_vq=1",
> + zbpxvb.erdhrfgf[0].pbasvt.qngn);
> + zbpxvb.ynfg_erdhrfg = zbpxvb.erdhrfgf[0];
> +- ine abj = (arj Qngr).inyhrBs()
> ++ ine abj = (arj Qngr).inyhrBs();
> + choyvfurq_pbzzragf = [
> + {'yvar_ahzore': '2',
> + 'crefba': crefba_bow,
> +@@ -80,7 +80,7 @@
> + 'crefba': crefba_bow,
> + 'grkg': 'Guvf vf terng.',
> + 'qngr': (arj Qngr(abj - 12600000)),
> +- },
> ++ }
> + ];
> + zbpxvb.fhpprff({
> + erfcbafrGrkg: L.WFBA.fgevatvsl(choyvfurq_pbzzragf),
> +@@ -88,7 +88,7 @@
> +
> + // Choyvfurq pbzzrag vf qvfcynlrq.
> + ine svefg_pbzzragf = L.bar('#qvss-yvar-2').arkg().bar('qvi');
> +- ine svefg = svefg_pbzzragf.bar('qvi:svefg-puvyq')
> ++ ine svefg = svefg_pbzzragf.bar('qvi:svefg-puvyq');
> + L.Nffreg.nerRdhny(
> + 'Sbb One (anzr16) jebgr ba 2012-08-12:',
> + svefg.bar('.obneqPbzzragQrgnvyf').trg('grkg'));
>
> === added file 'lib/lp/services/tests/data/diff-2'
> --- lib/lp/services/tests/data/diff-2 1970-01-01 00:00:00 +0000
> +++ lib/lp/services/tests/data/diff-2 2014-07-16 22:44:34 +0000
> @@ -0,0 +1,2 @@
> +=== nqqrq svyr 'jrohv/obql_ot.wct'
> +Binary files jrohv/obql_ot.wct1970-01-01 00:00:00 +0000 and jrohv/obql_ot.wct19702014-01-31 16:57:19 +0000 differ
>
> === added file 'lib/lp/services/tests/data/diff-3'
> --- lib/lp/services/tests/data/diff-3 1970-01-01 00:00:00 +0000
> +++ lib/lp/services/tests/data/diff-3 2014-07-16 22:44:34 +0000
> @@ -0,0 +1,12 @@
> +=== zbqvsvrq svyr 'yvo/yc/ncc/wninfpevcg/grfgf/grfg_qngr.wf'
> +--- yvo/yc/ncc/wninfpevcg/grfgf/grfg_qngr.wf 2014-07-08 12:32:44 +0000
> ++++ yvo/yc/ncc/wninfpevcg/grfgf/grfg_qngr.wf 2014-07-08 23:47:16 +0000
> +@@ -3,7 +3,7 @@
> + ine grfgf = L.anzrfcnpr('qngr.grfg');
> + grfgf.fhvgr = arj L.Grfg.Fhvgr("qngr grfgf");
> +
> +- ine abj = (arj Qngr).inyhrBs()
> ++ ine abj = (arj Qngr).inyhrBs();
> + grfgf.fhvgr.nqq(arj L.Grfg.Pnfr({
> + anzr: 'grfg_nccebkvzngrqngr',
> +
>
> === added file 'lib/lp/services/tests/data/expected-1'
> --- lib/lp/services/tests/data/expected-1 1970-01-01 00:00:00 +0000
> +++ lib/lp/services/tests/data/expected-1 2014-07-16 22:44:34 +0000
> @@ -0,0 +1,35 @@
> +
> +
> +Diff comments:
> +
> +> === zbqvsvrq svyr 'yvo/yc/pbqr/wninfpevcg/grfgf/grfg_oenapuzretrcebcbfny.vayvarpbzzragf.wf'
> +> --- yvo/yc/pbqr/wninfpevcg/grfgf/grfg_oenapuzretrcebcbfny.vayvarpbzzragf.wf 2014-07-08 12:32:44 +0000
> +> +++ yvo/yc/pbqr/wninfpevcg/grfgf/grfg_oenapuzretrcebcbfny.vayvarpbzzragf.wf 2014-07-08 23:47:16 +0000
> +> @@ -70,7 +70,7 @@
> +> "jf.bc=trgVayvarPbzzragf&cerivrjqvss_vq=1",
> +> zbpxvb.erdhrfgf[0].pbasvt.qngn);
> +> zbpxvb.ynfg_erdhrfg = zbpxvb.erdhrfgf[0];
> +> - ine abj = (arj Qngr).inyhrBs()
> +> + ine abj = (arj Qngr).inyhrBs();
> +
> +Cool
> +
> +> choyvfurq_pbzzragf = [
> +> {'yvar_ahzore': '2',
> +> 'crefba': crefba_bow,
> +
> +> @@ -88,7 +88,7 @@
> +>
> +> // Choyvfurq pbzzrag vf qvfcynlrq.
> +> ine svefg_pbzzragf = L.bar('#qvss-yvar-2').arkg().bar('qvi');
> +> - ine svefg = svefg_pbzzragf.bar('qvi:svefg-puvyq')
> +> + ine svefg = svefg_pbzzragf.bar('qvi:svefg-puvyq');
> +
> +Are you sure?
> +
> +> L.Nffreg.nerRdhny(
> +> 'Sbb One (anzr16) jebgr ba 2012-08-12:',
> +> svefg.bar('.obneqPbzzragQrgnvyf').trg('grkg'));
> +
> +
> +--
>
> === added file 'lib/lp/services/tests/data/expected-2'
> --- lib/lp/services/tests/data/expected-2 1970-01-01 00:00:00 +0000
> +++ lib/lp/services/tests/data/expected-2 2014-07-16 22:44:34 +0000
> @@ -0,0 +1,12 @@
> +
> +
> +Diff comments:
> +
> +> === nqqrq svyr 'jrohv/obql_ot.wct'
> +> Binary files jrohv/obql_ot.wct1970-01-01 00:00:00 +0000 and jrohv/obql_ot.wct19702014-01-31 16:57:19 +0000 differ
> +
> +Why are you adding this?
> +
> +
> +
> +--
>
> === added file 'lib/lp/services/tests/data/expected-3'
> --- lib/lp/services/tests/data/expected-3 1970-01-01 00:00:00 +0000
> +++ lib/lp/services/tests/data/expected-3 2014-07-16 22:44:34 +0000
> @@ -0,0 +1,22 @@
> +
> +
> +Diff comments:
> +
> +> === zbqvsvrq svyr 'yvo/yc/ncc/wninfpevcg/grfgf/grfg_qngr.wf'
> +> --- yvo/yc/ncc/wninfpevcg/grfgf/grfg_qngr.wf 2014-07-08 12:32:44 +0000
> +> +++ yvo/yc/ncc/wninfpevcg/grfgf/grfg_qngr.wf 2014-07-08 23:47:16 +0000
> +> @@ -3,7 +3,7 @@
> +> ine grfgf = L.anzrfcnpr('qngr.grfg');
> +> grfgf.fhvgr = arj L.Grfg.Fhvgr("qngr grfgf");
> +>
> +> - ine abj = (arj Qngr).inyhrBs()
> +> + ine abj = (arj Qngr).inyhrBs();
> +
> +What is this?
> +
> +> grfgf.fhvgr.nqq(arj L.Grfg.Pnfr({
> +> anzr: 'grfg_nccebkvzngrqngr',
> +>
> +
> +
> +--
>
> === added file 'lib/lp/services/tests/data/expected-4'
> --- lib/lp/services/tests/data/expected-4 1970-01-01 00:00:00 +0000
> +++ lib/lp/services/tests/data/expected-4 2014-07-16 22:44:34 +0000
> @@ -0,0 +1,14 @@
> +
> +
> +Diff comments:
> +
> +> === zbqvsvrq svyr 'yvo/yc/pbqr/vagresnprf/qvss.cl'
> +> --- yvo/yc/pbqr/vagresnprf/qvss.cl 2009-10-01 13:25:12 +0000
> +
> +Is this from Planet Earth© ?
> +
> +> +++ yvo/yc/pbqr/vagresnprf/qvss.cl 2010-02-02 15:48:56 +0000
> +
> +
> +
> +--
>
> === added file 'lib/lp/services/tests/data/expected-5'
> --- lib/lp/services/tests/data/expected-5 1970-01-01 00:00:00 +0000
> +++ lib/lp/services/tests/data/expected-5 2014-07-16 22:44:34 +0000
> @@ -0,0 +1,40 @@
> +
> +
> +Diff comments:
> +
> +> === zbqvsvrq svyr 'yvo/yc/pbqr/wninfpevcg/grfgf/grfg_oenapuzretrcebcbfny.vayvarpbzzragf.wf'
> +> --- yvo/yc/pbqr/wninfpevcg/grfgf/grfg_oenapuzretrcebcbfny.vayvarpbzzragf.wf 2014-07-08 12:32:44 +0000
> +> +++ yvo/yc/pbqr/wninfpevcg/grfgf/grfg_oenapuzretrcebcbfny.vayvarpbzzragf.wf 2014-07-08 23:47:16 +0000
> +
> +> @@ -80,7 +80,7 @@
> +> 'crefba': crefba_bow,
> +> 'grkg': 'Guvf vf terng.',
> +> 'qngr': (arj Qngr(abj - 12600000)),
> +> - },
> +> + }
> +
> +Why?
> +
> +> ];
> +> zbpxvb.fhpprff({
> +> erfcbafrGrkg: L.WFBA.fgevatvsl(choyvfurq_pbzzragf),
> +
> +>
> +> === zbqvsvrq svyr 'yvo/yc/ncc/wninfpevcg/grfgf/grfg_qngr.wf'
> +> --- yvo/yc/ncc/wninfpevcg/grfgf/grfg_qngr.wf 2014-07-08 12:32:44 +0000
> +> +++ yvo/yc/ncc/wninfpevcg/grfgf/grfg_qngr.wf 2014-07-08 23:47:16 +0000
> +> @@ -3,7 +3,7 @@
> +> ine grfgf = L.anzrfcnpr('qngr.grfg');
> +> grfgf.fhvgr = arj L.Grfg.Fhvgr("qngr grfgf");
> +>
> +> - ine abj = (arj Qngr).inyhrBs()
> +> + ine abj = (arj Qngr).inyhrBs();
> +
> +Good catch!
> +
> +> grfgf.fhvgr.nqq(arj L.Grfg.Pnfr({
> +> anzr: 'grfg_nccebkvzngrqngr',
> +>
> +
> +
> +--
>
> === added file 'lib/lp/services/tests/data/expected-6'
> --- lib/lp/services/tests/data/expected-6 1970-01-01 00:00:00 +0000
> +++ lib/lp/services/tests/data/expected-6 2014-07-16 22:44:34 +0000
> @@ -0,0 +1,57 @@
> +
> +
> +Diff comments:
> +
> +> === zbqvsvrq svyr 'yvo/yc/pbqr/wninfpevcg/grfgf/grfg_oenapuzretrcebcbfny.vayvarpbzzragf.wf'
> +> --- yvo/yc/pbqr/wninfpevcg/grfgf/grfg_oenapuzretrcebcbfny.vayvarpbzzragf.wf 2014-07-08 12:32:44 +0000
> +> +++ yvo/yc/pbqr/wninfpevcg/grfgf/grfg_oenapuzretrcebcbfny.vayvarpbzzragf.wf 2014-07-08 23:47:16 +0000
> +> @@ -70,7 +70,7 @@
> +> "jf.bc=trgVayvarPbzzragf&cerivrjqvss_vq=1",
> +> zbpxvb.erdhrfgf[0].pbasvt.qngn);
> +> zbpxvb.ynfg_erdhrfg = zbpxvb.erdhrfgf[0];
> +> - ine abj = (arj Qngr).inyhrBs()
> +> + ine abj = (arj Qngr).inyhrBs();
> +
> +Good stuff
> +
> +> choyvfurq_pbzzragf = [
> +> {'yvar_ahzore': '2',
> +> 'crefba': crefba_bow,
> +
> +> @@ -88,7 +88,7 @@
> +>
> +> // Choyvfurq pbzzrag vf qvfcynlrq.
> +> ine svefg_pbzzragf = L.bar('#qvss-yvar-2').arkg().bar('qvi');
> +> - ine svefg = svefg_pbzzragf.bar('qvi:svefg-puvyq')
> +> + ine svefg = svefg_pbzzragf.bar('qvi:svefg-puvyq');
> +
> ++1
> +
> +> L.Nffreg.nerRdhny(
> +> 'Sbb One (anzr16) jebgr ba 2012-08-12:',
> +> svefg.bar('.obneqPbzzragQrgnvyf').trg('grkg'));
> +>
> +> === nqqrq svyr 'jrohv/obql_ot.wct'
> +> Binary files jrohv/obql_ot.wct1970-01-01 00:00:00 +0000 and jrohv/obql_ot.wct19702014-01-31 16:57:19 +0000 differ
> +
> +Do we need this?
> +
> +>
> +> === zbqvsvrq svyr 'yvo/yc/ncc/wninfpevcg/grfgf/grfg_qngr.wf'
> +> --- yvo/yc/ncc/wninfpevcg/grfgf/grfg_qngr.wf 2014-07-08 12:32:44 +0000
> +> +++ yvo/yc/ncc/wninfpevcg/grfgf/grfg_qngr.wf 2014-07-08 23:47:16 +0000
> +> @@ -3,7 +3,7 @@
> +> ine grfgf = L.anzrfcnpr('qngr.grfg');
> +> grfgf.fhvgr = arj L.Grfg.Fhvgr("qngr grfgf");
> +>
> +> - ine abj = (arj Qngr).inyhrBs()
> +> + ine abj = (arj Qngr).inyhrBs();
> +
> +Can we change this?
> +
> +> grfgf.fhvgr.nqq(arj L.Grfg.Pnfr({
> +> anzr: 'grfg_nccebkvzngrqngr',
> +>
> +
> +
> +--
>
> === added file 'lib/lp/services/tests/data/insert_top.patch'
> --- lib/lp/services/tests/data/insert_top.patch 1970-01-01 00:00:00 +0000
> +++ lib/lp/services/tests/data/insert_top.patch 2014-07-16 22:44:34 +0000
> @@ -0,0 +1,7 @@
> +--- orig/pylon/patches.py
> ++++ mod/pylon/patches.py
> +@@ -1,3 +1,4 @@
> ++#test
> + import util
> + import sys
> + class PatchSyntax(Exception):
>
> === added file 'lib/lp/services/tests/data/tricky-diff.patch'
> --- lib/lp/services/tests/data/tricky-diff.patch 1970-01-01 00:00:00 +0000
> +++ lib/lp/services/tests/data/tricky-diff.patch 2014-07-16 22:44:34 +0000
> @@ -0,0 +1,26 @@
> +=== orig-7
> +--- orig-7
> ++++ mod-7
> +@@ -1,10 +1,10 @@
> + -- a
> +--- b
> ++++ c
> + xx d
> + xx e
> + ++ f
> +-++ g
> ++-- h
> + xx i
> + xx j
> + -- k
> +--- l
> ++++ m
> +=== orig-8
> +--- orig-8
> ++++ mod-8
> +@@ -1 +1 @@
> +--- A
> ++++ B
> +@@ -1 +1 @@
> +--- C
> ++++ D
>
> === added file 'lib/lp/services/tests/test_patches.py'
> --- lib/lp/services/tests/test_patches.py 1970-01-01 00:00:00 +0000
> +++ lib/lp/services/tests/test_patches.py 2014-07-16 22:44:34 +0000
> @@ -0,0 +1,137 @@
> +# Copyright 2014 Canonical Ltd. This software is licensed under the
> +# GNU Affero General Public License version 3 (see the file LICENSE).
> +
> +import os
> +import re
> +
> +from bzrlib.patches import difference_index
> +from bzrlib.errors import MalformedPatchHeader
> +
> +from lp.services.patches import (
> + BinaryPatch,
> + Patch,
> + get_patch_names,
> + parse_patch,
> + parse_patches,
> + )
> +from lp.testing import TestCase
> +
> +
> +def datafile(filename):
> + data_path = os.path.join(os.path.dirname(__file__),
> + "data", filename)
> + return file(data_path, "rb")
> +
> +
> +class TestBzrLibCode(TestCase):
> + """Test code rewritten from bzrlib.patches for diff comment emails"""
> +
> + def assertContainsRe(self, haystack, needle_re, flags=0):
> + """Assert that a contains something matching a regular expression."""
> + if not re.search(needle_re, haystack, flags):
> + if '\n' in haystack or len(haystack) > 60:
> + # a long string, format it in a more readable way
> + raise AssertionError(
> + 'pattern "%s" not found in\n"""\\\n%s"""\n'
> + % (needle_re, haystack))
> + else:
> + raise AssertionError('pattern "%s" not found in "%s"'
> + % (needle_re, haystack))
> +
> + def data_lines(self, filename):
> + data = datafile(filename)
> + try:
> + return data.readlines()
> + finally:
> + data.close()
> +
> + def testValidPatchHeaderWithComments(self):
> + """Parse a valid patch header"""
> + expected_comments = ["=== modified file 'commands.py'"]
> + lines = ("=== modified file 'commands.py'\n--- orig/commands.py\n"
> + "+++ mod/dommands.py\n").split('\n')
> + (comments, orig, mod) = get_patch_names(lines.__iter__())
> + self.assertEqual(comments, expected_comments)
> + self.assertEqual(orig, "orig/commands.py")
> + self.assertEqual(mod, "mod/dommands.py")
> +
> + def testValidPatchHeader(self):
> + """Parse a valid patch header"""
> + expected_comments = []
> + lines = ("--- orig/commands.py\n+++ mod/dommands.py\n").split('\n')
> + (comments, orig, mod) = get_patch_names(lines.__iter__())
> + self.assertEqual(comments, expected_comments)
> + self.assertEqual(orig, "orig/commands.py")
> + self.assertEqual(mod, "mod/dommands.py")
> +
> + def testInvalidPatchHeader(self):
> + """Parse an invalid patch header"""
> + lines = "-- orig/commands.py\n+++ mod/dommands.py".split('\n')
> + self.assertRaises(MalformedPatchHeader, get_patch_names,
> + lines.__iter__())
> +
> + def compare_parsed(self, patchtext):
> + lines = patchtext.splitlines(True)
> + patch = parse_patch(lines.__iter__())
> + pstr = str(patch)
> + i = difference_index(patchtext, pstr)
> + if i is not None:
> + print "%i: \"%s\" != \"%s\"" % (i, patchtext[i], pstr[i])
> + self.assertEqual(patchtext, str(patch))
> +
> + def testAll(self):
> + """Test parsing a whole patch"""
> + patchtext = datafile("diff-1").read()
> + self.compare_parsed(patchtext)
> +
> + def test_parse_binary(self):
> + """Test parsing a whole patch"""
> + diff_1 = datafile("diff-1").read()
> + diff_2 = datafile("diff-2").read()
> + diff = '\n'.join([diff_2, diff_1]).splitlines(True)
> + patches = parse_patches(diff)
> + self.assertIs(BinaryPatch, patches[0].__class__)
> + self.assertIs(Patch, patches[1].__class__)
> + self.assertContainsRe(patches[0].oldname,
> + '^jrohv/obql_ot.wct1970-01-01')
> + self.assertContainsRe(patches[0].newname,
> + '^jrohv/obql_ot.wct19702014-01-31')
> + self.assertContainsRe(
> + str(patches[0]),
> + 'Binary files jrohv/obql_ot.wct1970-01-01.* and '
> + 'jrohv/obql_ot.wct19702014-01-31.* differ\n')
> +
> + def test_parse_binary_after_normal(self):
> + diff_1 = datafile("diff-1").read()
> + diff_2 = datafile("diff-2").read()
> + diff = '\n'.join([diff_1, diff_2]).splitlines(True)
> + patches = parse_patches(diff)
> + self.assertIs(BinaryPatch, patches[1].__class__)
> + self.assertIs(Patch, patches[0].__class__)
> + self.assertContainsRe(patches[1].oldname,
> + '^jrohv/obql_ot.wct1970-01-01')
> + self.assertContainsRe(patches[1].newname,
> + '^jrohv/obql_ot.wct19702014-01-31')
> + self.assertContainsRe(
> + str(patches[1]),
> + 'Binary files jrohv/obql_ot.wct1970-01-01.* and '
> + 'jrohv/obql_ot.wct19702014-01-31.* differ\n')
> +
> + def test_roundtrip_binary(self):
> + diff_1 = datafile("diff-1").read()
> + diff_2 = datafile("diff-2").read()
> + diff = '\n'.join([diff_2, diff_1])
> + patches = parse_patches(diff.splitlines(True))
> + self.assertEqual(diff, ''.join(str(p) for p in patches))
> +
> + def testParsePatchesWithComments(self):
> + """Make sure file names can be extracted from tricky unified diffs
> + with comment lines"""
> + patchtext = ''.join(self.data_lines("tricky-diff.patch"))
> + filenames = [('orig-7', 'mod-7'),
> + ('orig-8', 'mod-8')]
> + patches = parse_patches(patchtext.splitlines(True))
> + patch_files = []
> + for patch in patches:
> + patch_files.append((patch.oldname, patch.newname))
> + self.assertEqual(patch_files, filenames)
>
> === modified file 'lib/lp/testing/factory.py'
> --- lib/lp/testing/factory.py 2014-07-08 06:34:13 +0000
> +++ lib/lp/testing/factory.py 2014-07-16 22:44:34 +0000
> @@ -1540,8 +1540,11 @@
> Diff.fromFile(StringIO(diff_text), len(diff_text)))
>
> def makePreviewDiff(self, conflicts=u'', merge_proposal=None,
> - date_created=None):
> - diff = self.makeDiff()
> + date_created=None, diff_text=None):
> + if diff_text is None:
> + diff = self.makeDiff()
> + else:
> + diff = self.makeDiff(diff_text)
> if merge_proposal is None:
> merge_proposal = self.makeBranchMergeProposal()
> preview_diff = PreviewDiff()
>
--
https://code.launchpad.net/~cjohnston/launchpad/short-ic-emails/+merge/225095
Your team Launchpad code reviewers is subscribed to branch lp:launchpad.
References