← Back to team overview

launchpad-reviewers team mailing list archive

[Merge] lp:~jml/launchpad/split-mail-and-summary into lp:launchpad/devel

 

Jonathan Lange has proposed merging lp:~jml/launchpad/split-mail-and-summary into lp:launchpad/devel.

Requested reviews:
  Launchpad code reviewers (launchpad-reviewers)


Hello,

This branch changes the email sent by ec2 test. Rather than being a mere lump of stuff that we have collected as summary, it's a custom built message derived from the test results.

Examples:
  * http://paste.ubuntu.com/497557/
  * http://paste.ubuntu.com/497555/

Implementation-wise, the main trick is to change EC2Tester, the object responsible for running tests and gathering results, to send its TestResult object back to the logger, rather than a simple boolean. The Logger was then changed to use the new format_result method to compose the result email, rather than simply dumping the summary.

The details:
  * If the test child process exits with a non-zero code and we have no test results, we raise an error, since it's likely to be something weird.

  * Included in the diff are changes from a merge proposal that I should have applied before landing my previous branch. Mostly stylistic.

  * Some tests were incorrect. They used a dictionary to fake an email message, but had the subject stored under the wrong key ("Subject:" rather than "Subject").

I look forward to your review.

jml
-- 
https://code.launchpad.net/~jml/launchpad/split-mail-and-summary/+merge/36111
Your team Launchpad code reviewers is requested to review the proposed merge of lp:~jml/launchpad/split-mail-and-summary into lp:launchpad/devel.
=== modified file 'lib/devscripts/ec2test/remote.py'
--- lib/devscripts/ec2test/remote.py	2010-08-27 14:27:22 +0000
+++ lib/devscripts/ec2test/remote.py	2010-09-21 08:20:23 +0000
@@ -28,6 +28,7 @@
 import optparse
 import os
 import pickle
+from StringIO import StringIO
 import subprocess
 import sys
 import tempfile
@@ -49,6 +50,15 @@
 import subunit
 
 
+class NonZeroExitCode(Exception):
+    """Raised when the child process exits with a non-zero exit code."""
+
+    def __init__(self, retcode):
+        super(NonZeroExitCode, self).__init__(
+            'Test process died with exit code %r, but no tests failed.'
+            % (retcode,))
+
+
 class SummaryResult(unittest.TestResult):
     """Test result object used to generate the summary."""
 
@@ -271,12 +281,16 @@
         self._logger.prepare()
         try:
             popen = self._spawn_test_process()
-            self._gather_test_output(popen.stdout, self._logger)
-            exit_status = popen.wait()
+            result = self._gather_test_output(popen.stdout, self._logger)
+            retcode = popen.wait()
+            # The process could have an error not indicated by an actual test
+            # result nor by a raised exception
+            if result.wasSuccessful() and retcode:
+                raise NonZeroExitCode(retcode)
         except:
             self._logger.error_in_testrunner(sys.exc_info())
         else:
-            self._logger.got_result(not exit_status)
+            self._logger.got_result(result)
 
     def _gather_test_output(self, input_stream, logger):
         """Write the testrunner output to the logs."""
@@ -287,6 +301,7 @@
             subunit_server.lineReceived(line)
             logger.got_line(line)
             summary_stream.flush()
+        return result
 
 
 class Request:
@@ -327,6 +342,57 @@
         """Actually send 'message'."""
         self._smtp_connection.send_email(message)
 
+    def _format_test_list(self, header, tests):
+        if not tests:
+            return []
+        tests = ['  ' + test.id() for test, error in tests]
+        return [header, '-' * len(header)] + tests + ['']
+
+    def format_result(self, result, start_time, end_time):
+        duration = end_time - start_time
+        output = [
+            'Tests started at approximately %s' % start_time,
+            ]
+        source = self.get_source_details()
+        if source:
+            output.append('Source: %s r%s' % source)
+        target = self.get_target_details()
+        if target:
+            output.append('Target: %s r%s' % target)
+        output.extend([
+            '',
+            '%s tests run in %s, %s failures, %s errors' % (
+                result.testsRun, duration, len(result.failures),
+                len(result.errors)),
+            '',
+            ])
+
+        bad_tests = (
+            self._format_test_list('Failing tests', result.failures) +
+            self._format_test_list('Tests with errors', result.errors))
+        output.extend(bad_tests)
+
+        if bad_tests:
+            full_error_stream = StringIO()
+            copy_result = SummaryResult(full_error_stream)
+            for test, error in result.failures:
+                full_error_stream.write(
+                    copy_result._formatError('FAILURE', test, error))
+            for test, error in result.errors:
+                full_error_stream.write(
+                    copy_result._formatError('ERROR', test, error))
+            output.append(full_error_stream.getvalue())
+
+        subject = self._get_pqm_subject()
+        if subject:
+            if result.wasSuccessful():
+                output.append('SUBMITTED TO PQM:')
+            else:
+                output.append('**NOT** submitted to PQM:')
+            output.extend([subject, ''])
+        output.extend(['(See the attached file for the complete log)', ''])
+        return '\n'.join(output)
+
     def get_target_details(self):
         """Return (branch_url, revno) for trunk."""
         branch = bzrlib.branch.Branch.open(self._local_branch_path)
@@ -449,12 +515,15 @@
                     continue
                 yield name, branch.get_parent(), branch.revno()
 
+    def _get_pqm_subject(self):
+        if not self._pqm_message:
+            return
+        return self._pqm_message.get('Subject')
+
     def submit_to_pqm(self, successful):
         """Submit this request to PQM, if successful & configured to do so."""
-        if not self._pqm_message:
-            return
-        subject = self._pqm_message.get('Subject')
-        if successful:
+        subject = self._get_pqm_subject()
+        if subject and successful:
             self._send_email(self._pqm_message)
         return subject
 
@@ -491,6 +560,9 @@
         self._index_filename = index_filename
         self._request = request
         self._echo_to_stdout = echo_to_stdout
+        # Actually set by prepare(), but setting to a dummy value to make
+        # testing easier.
+        self._start_time = datetime.datetime.utcnow()
 
     @classmethod
     def make_in_directory(cls, www_dir, request, echo_to_stdout):
@@ -558,16 +630,16 @@
             if e.errno == errno.ENOENT:
                 return ''
 
-    def got_result(self, successful):
+    def got_result(self, result):
         """The tests are done and the results are known."""
+        self._end_time = datetime.datetime.utcnow()
+        successful = result.wasSuccessful()
         self._handle_pqm_submission(successful)
         if self._request.wants_email:
-            self._write_to_filename(
-                self._summary_filename,
-                '\n(See the attached file for the complete log)\n')
-            summary = self.get_summary_contents()
+            email_text = self._request.format_result(
+                result, self._start_time, self._end_time)
             full_log_gz = gzip_data(self.get_full_log_contents())
-            self._request.send_report_email(successful, summary, full_log_gz)
+            self._request.send_report_email(successful, email_text, full_log_gz)
 
     def _handle_pqm_submission(self, successful):
         subject = self._request.submit_to_pqm(successful)
@@ -614,9 +686,9 @@
             return self._write_to_filename(
                 self._index_filename, textwrap.dedent(html))
 
+        self._start_time = datetime.datetime.utcnow()
         msg = 'Tests started at approximately %(now)s UTC' % {
-            'now': datetime.datetime.utcnow().strftime(
-                '%a, %d %b %Y %H:%M:%S')}
+            'now': self._start_time.strftime('%a, %d %b %Y %H:%M:%S')}
         add_to_html('''\
             <html>
               <head>

=== modified file 'lib/devscripts/ec2test/tests/test_remote.py'
--- lib/devscripts/ec2test/tests/test_remote.py	2010-08-27 14:27:22 +0000
+++ lib/devscripts/ec2test/tests/test_remote.py	2010-09-21 08:20:23 +0000
@@ -5,6 +5,8 @@
 
 __metaclass__ = type
 
+from datetime import datetime, timedelta
+import doctest
 from email.mime.application import MIMEApplication
 from email.mime.text import MIMEText
 import gzip
@@ -21,9 +23,10 @@
 from bzrlib.config import GlobalConfig
 from bzrlib.tests import TestCaseWithTransport
 
-from testtools import TestCase
+from testtools import TestCase, TestResult
 from testtools.content import Content
 from testtools.content_type import ContentType
+from testtools.matchers import DocTestMatches
 
 from devscripts.ec2test.remote import (
     EC2Runner,
@@ -75,7 +78,8 @@
         """Make a request to test, specifying only things we care about.
 
         Note that the returned request object will not ever send email, but
-        will instead log "sent" emails to `request.emails_sent`.
+        will instead append "sent" emails to the list provided here as
+        'emails_sent'.
         """
         if trunk is None:
             trunk = self.make_trunk()
@@ -109,8 +113,6 @@
     def make_tester(self, logger=None, test_directory=None, test_options=()):
         if not logger:
             logger = self.make_logger()
-        if not test_directory:
-            test_directory = 'unspecified-test-directory'
         return LaunchpadTester(logger, test_directory, test_options)
 
     def make_logger(self, request=None, echo_to_stdout=False):
@@ -472,14 +474,14 @@
     def test_submit_to_pqm_unsuccessful(self):
         # submit_to_pqm returns the subject of the PQM mail even if it's
         # handling a failed test run.
-        message = {'Subject:': 'My PQM message'}
+        message = {'Subject': 'My PQM message'}
         req = self.make_request(pqm_message=message)
         subject = req.submit_to_pqm(successful=False)
         self.assertIs(message.get('Subject'), subject)
 
     def test_submit_to_pqm_unsuccessful_no_email(self):
         # submit_to_pqm doesn't send any email if the run was unsuccessful.
-        message = {'Subject:': 'My PQM message'}
+        message = {'Subject': 'My PQM message'}
         log = []
         req = self.make_request(pqm_message=message, emails_sent=log)
         req.submit_to_pqm(successful=False)
@@ -487,7 +489,7 @@
 
     def test_submit_to_pqm_successful(self):
         # submit_to_pqm returns the subject of the PQM mail.
-        message = {'Subject:': 'My PQM message'}
+        message = {'Subject': 'My PQM message'}
         log = []
         req = self.make_request(pqm_message=message, emails_sent=log)
         subject = req.submit_to_pqm(successful=True)
@@ -558,6 +560,124 @@
                 self.assertEqual(
                     expected_part.get_payload(), observed_part.get_payload())
 
+    def test_format_result_success(self):
+        class SomeTest(TestCase):
+            def test_a(self):
+                pass
+            def test_b(self):
+                pass
+            def test_c(self):
+                pass
+        test = unittest.TestSuite(map(SomeTest, ['test_' + x for x in 'abc']))
+        result = TestResult()
+        test.run(result)
+        tree = self.make_trunk()
+        # Fake a merge, giving silly revision ids.
+        tree.add_pending_merge('foo', 'bar')
+        req = self.make_request(
+            branch_url='https://example.com/bzr/thing', revno=42, trunk=tree)
+        source_branch, source_revno = req.get_source_details()
+        target_branch, target_revno = req.get_target_details()
+        start_time = datetime.utcnow()
+        end_time = start_time + timedelta(hours=1)
+        data = {
+            'source_branch': source_branch,
+            'source_revno': source_revno,
+            'target_branch': target_branch,
+            'target_revno': target_revno,
+            'start_time': str(start_time),
+            'duration': str(end_time - start_time),
+            'num_tests': result.testsRun,
+            'num_failures': len(result.failures),
+            'num_errors': len(result.errors),
+            }
+        result_text = req.format_result(result, start_time, end_time)
+        self.assertThat(
+            result_text, DocTestMatches("""\
+Tests started at approximately %(start_time)s
+Source: %(source_branch)s r%(source_revno)s
+Target: %(target_branch)s r%(target_revno)s
+<BLANKLINE>
+%(num_tests)s tests run in %(duration)s, %(num_failures)s failures, %(num_errors)s errors
+<BLANKLINE>
+(See the attached file for the complete log)
+""" % data, doctest.REPORT_NDIFF | doctest.ELLIPSIS))
+
+    def test_format_result_with_errors(self):
+        class SomeTest(TestCase):
+            def test_ok(self):
+                pass
+            def test_fail(self):
+                self.fail("oh no")
+            def test_error(self):
+                1/0
+        fail_test = SomeTest('test_fail')
+        error_test = SomeTest('test_error')
+        test = unittest.TestSuite(
+            [fail_test, error_test, SomeTest('test_ok')])
+        result = TestResult()
+        test.run(result)
+        tree = self.make_trunk()
+        # Fake a merge, giving silly revision ids.
+        tree.add_pending_merge('foo', 'bar')
+        req = self.make_request(
+            branch_url='https://example.com/bzr/thing', revno=42, trunk=tree)
+        source_branch, source_revno = req.get_source_details()
+        target_branch, target_revno = req.get_target_details()
+        start_time = datetime.utcnow()
+        end_time = start_time + timedelta(hours=1)
+        data = {
+            'source_branch': source_branch,
+            'source_revno': source_revno,
+            'target_branch': target_branch,
+            'target_revno': target_revno,
+            'start_time': str(start_time),
+            'duration': str(end_time - start_time),
+            'fail_id': fail_test.id(),
+            'error_id': error_test.id(),
+            'num_tests': result.testsRun,
+            'num_failures': len(result.failures),
+            'num_errors': len(result.errors),
+            }
+        result_text = req.format_result(result, start_time, end_time)
+        self.assertThat(
+            result_text, DocTestMatches("""\
+Tests started at approximately %(start_time)s
+Source: %(source_branch)s r%(source_revno)s
+Target: %(target_branch)s r%(target_revno)s
+<BLANKLINE>
+%(num_tests)s tests run in %(duration)s, %(num_failures)s failures, %(num_errors)s errors
+<BLANKLINE>
+Failing tests
+-------------
+  %(fail_id)s
+<BLANKLINE>
+Tests with errors
+-----------------
+  %(error_id)s
+<BLANKLINE>
+======================================================================
+FAILURE: test_fail...
+----------------------------------------------------------------------
+Text attachment: traceback
+------------
+Traceback (most recent call last):
+...
+------------
+<BLANKLINE>
+======================================================================
+ERROR: test_error...
+----------------------------------------------------------------------
+Text attachment: traceback
+------------
+Traceback (most recent call last):
+...
+------------
+<BLANKLINE>
+<BLANKLINE>
+(See the attached file for the complete log)
+""" % data, doctest.REPORT_NDIFF | doctest.ELLIPSIS))
+
 
 class TestWebTestLogger(TestCaseWithTransport, RequestHelpers):
 
@@ -672,11 +792,13 @@
             False, "who-cares.pid", False, smtp_connection, emails=emails)
 
     def test_run(self):
+        # EC2Runner.run() runs the given function, passing through whatever
+        # arguments and keyword arguments it has been given.
         calls = []
         runner = self.make_ec2runner()
-        runner.run(
-            "boring test method",
-            lambda *a, **kw: calls.append((a, kw)), "foo", "bar", baz="qux")
+        def function(*args, **kwargs):
+            calls.append((args, kwargs))
+        runner.run("boring test method", function, "foo", "bar", baz="qux")
         self.assertEqual([(("foo", "bar"), {'baz': 'qux'})], calls)
 
     def test_email_on_failure_no_emails(self):
@@ -688,7 +810,7 @@
         self.assertEqual([], log)
 
     def test_email_on_failure_some_emails(self):
-        # If no emails are specified, then no email is sent on failure.
+        # If emails *are* specified, then an email is sent on failure.
         log = []
         runner = self.make_ec2runner(
             email_log=log, emails=["foo@xxxxxxxxxxx"])
@@ -701,6 +823,7 @@
         self.assertIn('ZeroDivisionError', str(message))
 
     def test_email_with_launchpad_tester_failure(self):
+        # LaunchpadTester sends email on catastrophic failure.
         email_log = []
         to_emails = ['foo@xxxxxxxxxxx']
         request = self.make_request(emails=to_emails, emails_sent=email_log)
@@ -767,18 +890,37 @@
     def get_body_text(self, email):
         return email.get_payload()[0].get_payload()
 
+    def make_empty_result(self):
+        return TestResult()
+
+    def make_successful_result(self):
+        result = self.make_empty_result()
+        result.startTest(self)
+        result.stopTest(self)
+        return result
+
+    def make_failing_result(self):
+        result = self.make_empty_result()
+        result.startTest(self)
+        try:
+            1/0
+        except ZeroDivisionError:
+            result.addError(self, sys.exc_info())
+        result.stopTest(self)
+        return result
+
     def test_success_no_emails(self):
         log = []
         request = self.make_request(emails=[], emails_sent=log)
         logger = self.make_logger(request=request)
-        logger.got_result(True)
+        logger.got_result(self.make_successful_result())
         self.assertEqual([], log)
 
     def test_failure_no_emails(self):
         log = []
         request = self.make_request(emails=[], emails_sent=log)
         logger = self.make_logger(request=request)
-        logger.got_result(False)
+        logger.got_result(self.make_failing_result())
         self.assertEqual([], log)
 
     def test_submits_to_pqm_on_success(self):
@@ -787,7 +929,7 @@
         request = self.make_request(
             emails=[], pqm_message=message, emails_sent=log)
         logger = self.make_logger(request=request)
-        logger.got_result(True)
+        logger.got_result(self.make_successful_result())
         self.assertEqual([message], log)
 
     def test_records_pqm_submission_in_email(self):
@@ -796,20 +938,20 @@
         request = self.make_request(
             emails=['foo@xxxxxxxxxxx'], pqm_message=message, emails_sent=log)
         logger = self.make_logger(request=request)
-        logger.got_result(True)
+        logger.got_result(self.make_successful_result())
         [pqm_message, user_message] = log
         self.assertEqual(message, pqm_message)
         self.assertIn(
             'SUBMITTED TO PQM:\n%s' % (message['Subject'],),
             self.get_body_text(user_message))
 
-    def test_doesnt_submit_to_pqm_no_failure(self):
+    def test_doesnt_submit_to_pqm_on_failure(self):
         log = []
         message = {'Subject': 'foo'}
         request = self.make_request(
             emails=[], pqm_message=message, emails_sent=log)
         logger = self.make_logger(request=request)
-        logger.got_result(False)
+        logger.got_result(self.make_failing_result())
         self.assertEqual([], log)
 
     def test_records_non_pqm_submission_in_email(self):
@@ -818,7 +960,7 @@
         request = self.make_request(
             emails=['foo@xxxxxxxxxxx'], pqm_message=message, emails_sent=log)
         logger = self.make_logger(request=request)
-        logger.got_result(False)
+        logger.got_result(self.make_failing_result())
         [user_message] = log
         self.assertIn(
             '**NOT** submitted to PQM:\n%s' % (message['Subject'],),
@@ -829,29 +971,25 @@
         request = self.make_request(
             emails=['foo@xxxxxxxxxxx'], emails_sent=log)
         logger = self.make_logger(request=request)
-        logger.got_result(False)
+        logger.got_result(self.make_failing_result())
         [user_message] = log
         self.assertIn(
             '(See the attached file for the complete log)\n',
             self.get_body_text(user_message))
 
-    def test_email_body_is_summary(self):
+    def test_email_body_is_format_result(self):
         # The body of the email sent to the user is the summary file.
-        # XXX: JonathanLange 2010-08-17: We actually want to change this
-        # behaviour so that the summary log stays roughly the same as it is
-        # now and can vary independently from the contents of the sent
-        # email. We probably just want the contents of the email to be a list
-        # of failing tests.
         log = []
         request = self.make_request(
             emails=['foo@xxxxxxxxxxx'], emails_sent=log)
         logger = self.make_logger(request=request)
-        logger.get_summary_stream().write('bar\nbaz\nqux\n')
-        logger.got_result(False)
+        result = self.make_failing_result()
+        logger.got_result(result)
         [user_message] = log
         self.assertEqual(
-            'bar\nbaz\nqux\n\n(See the attached file for the complete log)\n',
-            self.get_body_text(user_message))
+            request.format_result(
+                result, logger._start_time, logger._end_time),
+            self.get_body_text(user_message).decode('quoted-printable'))
 
     def test_gzip_of_full_log_attached(self):
         # The full log is attached to the email.
@@ -861,7 +999,7 @@
         logger = self.make_logger(request=request)
         logger.got_line("output from test process\n")
         logger.got_line("more output\n")
-        logger.got_result(False)
+        logger.got_result(self.make_successful_result())
         [user_message] = log
         [body, attachment] = user_message.get_payload()
         self.assertEqual('application/x-gzip', attachment['Content-Type'])