← Back to team overview

openerp-community-reviewer team mailing list archive

[Merge] lp:~anybox/ocb-server/7.0-test-report into lp:ocb-server

 

Georges Racinet has proposed merging lp:~anybox/ocb-server/7.0-test-report into lp:ocb-server.

Requested reviews:
  OpenERP Community Backports Team (ocb)

For more details, see:
https://code.launchpad.net/~anybox/ocb-server/7.0-test-report/+merge/207978

The core of OpenERP already has a system to report test failures (data loading, YML tests, true unit tests).

This non too-intrusive change enhances it to record more detail, allowing the launcher process to end with a useful summary.

There is a matching branch of openerp-command/7.0 that simply prints the data at the end of the test of all modules. It is I believe already enough to help maintainers quickly get an idea of the errors and dispatch them accordingly.

Here's a sample output for the current broken tests in OCB:

FAIL : 2 failure(s) or error(s) have been recorded
Module account_payment, in test file u'test/payment_order_process.yml': AssertionError in Python code : Due date is not correct.
Module purchase_requisition: Exception during load of legacy data-based tests (yml...)


See http://buildbot.anybox.fr/builders/ocb-7.0-postgresql-9.3/builds/273/steps/testing/logs/stdio for the full log.
-- 
https://code.launchpad.net/~anybox/ocb-server/7.0-test-report/+merge/207978
Your team OpenERP Community Backports Team is requested to review the proposed merge of lp:~anybox/ocb-server/7.0-test-report into lp:ocb-server.
=== modified file 'openerp/modules/loading.py'
--- openerp/modules/loading.py	2013-11-25 10:38:42 +0000
+++ openerp/modules/loading.py	2014-02-24 17:07:10 +0000
@@ -197,13 +197,21 @@
                 # 'data' section, but should probably not alter the data,
                 # as there is no rollback.
                 if tools.config.options['test_enable']:
-                    report.record_result(load_test(module_name, idref, mode))
-
+                    report.record_result(load_test(module_name, idref, mode),
+                                         details=(dict(module=module_name,
+                                                       msg="Exception during load of legacy "
+                                                       "data-based tests (yml...)")))
                     # Run the `fast_suite` and `checks` tests given by the module.
                     if module_name == 'base':
                         # Also run the core tests after the database is created.
-                        report.record_result(openerp.modules.module.run_unit_tests('openerp'))
-                    report.record_result(openerp.modules.module.run_unit_tests(module_name))
+                        report.record_result(openerp.modules.module.run_unit_tests('openerp'),
+                                             details=dict(module='openerp',
+                                                          msg="Failure or error in server core "
+                                                          "unit tests"))
+                    report.record_result(openerp.modules.module.run_unit_tests(module_name),
+                                         details=dict(module=module_name,
+                                                      msg="Failure or error in unit tests, "
+                                                      "check logs for more details"))
 
             processed_modules.append(package.name)
 

=== modified file 'openerp/tools/assertion_report.py'
--- openerp/tools/assertion_report.py	2012-03-02 11:02:27 +0000
+++ openerp/tools/assertion_report.py	2014-02-24 17:07:10 +0000
@@ -8,20 +8,28 @@
     def __init__(self):
         self.successes = 0
         self.failures = 0
+        self.failures_details = []
 
     def record_success(self):
         self.successes += 1
 
-    def record_failure(self):
+    def record_failure(self, details=None):
         self.failures += 1
-
-    def record_result(self, result):
+        if details is not None:
+            self.failures_details.append(details)
+
+    def record_result(self, result, details=None):
+        """Record either success or failure, with the provided details in the latter case.
+
+        :param result: a boolean
+        :param details: a dict with keys ``'module'``, ``'testfile'``, ``'msg'``, ``'msg_args'``
+        """
         if result is None:
             pass
         elif result is True:
             self.record_success()
         elif result is False:
-            self.record_failure()
+            self.record_failure(details=details)
 
     def __str__(self):
         res = 'Assertions report: %s successes, %s failures' % (self.successes, self.failures)

=== modified file 'openerp/tools/convert.py'
--- openerp/tools/convert.py	2013-12-03 09:24:33 +0000
+++ openerp/tools/convert.py	2014-02-24 17:07:10 +0000
@@ -692,13 +692,15 @@
             if rec_src_count:
                 count = int(rec_src_count)
                 if len(ids) != count:
-                    self.assertion_report.record_failure()
                     msg = 'assertion "%s" failed!\n'    \
                           ' Incorrect search count:\n'  \
                           ' expected count: %d\n'       \
-                          ' obtained count: %d\n'       \
-                          % (rec_string, count, len(ids))
-                    _logger.error(msg)
+                          ' obtained count: %d\n'
+                    msg_args = (rec_string, count, len(ids))
+                    _logger.error(msg, msg_args)
+                    self.assertion_report.record_failure(details=dict(module=self.module,
+                                                                      msg=msg,
+                                                                      msg_args=msg_args))
                     return
 
         assert ids is not None,\
@@ -720,13 +722,15 @@
                 expected_value = _eval_xml(self, test, self.pool, cr, uid, self.idref, context=context) or True
                 expression_value = unsafe_eval(f_expr, globals_dict)
                 if expression_value != expected_value: # assertion failed
-                    self.assertion_report.record_failure()
                     msg = 'assertion "%s" failed!\n'    \
                           ' xmltag: %s\n'               \
                           ' expected value: %r\n'       \
-                          ' obtained value: %r\n'       \
-                          % (rec_string, etree.tostring(test), expected_value, expression_value)
-                    _logger.error(msg)
+                          ' obtained value: %r\n'
+                    msg_args = (rec_string, etree.tostring(test), expected_value, expression_value)
+                    self.assertion_report.record_failure(details=dict(module=self.module,
+                                                                      msg=msg,
+                                                                      msg_args=msg_args))
+                    _logger.error(msg, msg_args)
                     return
         else: # all tests were successful for this assertion tag (no break)
             self.assertion_report.record_success()

=== modified file 'openerp/tools/yaml_import.py'
--- openerp/tools/yaml_import.py	2012-12-01 11:35:24 +0000
+++ openerp/tools/yaml_import.py	2014-02-24 17:07:10 +0000
@@ -1,10 +1,12 @@
 # -*- coding: utf-8 -*-
+import os
 import threading
 import types
 import time # used to eval time.strftime expressions
 from datetime import datetime, timedelta
 import logging
 
+from copy import deepcopy
 import openerp.pooler as pooler
 import openerp.sql_db as sql_db
 import misc
@@ -193,7 +195,15 @@
         return node
 
     def _log_assert_failure(self, msg, *args):
-        self.assertion_report.record_failure()
+        from openerp import tools  # does not work in module prelude (loop?)
+        pyinit = tools.file_open('/'.join((self.module, '__init__.py')))
+        basepath = os.path.dirname(pyinit.name)
+        pyinit.close()
+        self.assertion_report.record_failure(
+            details=dict(module=self.module,
+                         testfile=os.path.relpath(self.filename, basepath),
+                         msg=msg,
+                         msg_args=deepcopy(args)))
         _logger.error(msg, *args)
 
     def _get_assertion_id(self, assertion):


Follow ups