launchpad-reviewers team mailing list archive
-
launchpad-reviewers team
-
Mailing list archive
-
Message #08125
[Merge] lp:~allenap/maas/try-out-saucelabs into lp:maas
Gavin Panella has proposed merging lp:~allenap/maas/try-out-saucelabs into lp:maas.
Requested reviews:
Launchpad code reviewers (launchpad-reviewers)
Related bugs:
Bug #996260 in MAAS: "JS tests are not run on different browser types"
https://bugs.launchpad.net/maas/+bug/996260
For more details, see:
https://code.launchpad.net/~allenap/maas/try-out-saucelabs/+merge/106217
* Introduces some fixtures for integration with the SauceLabs Connect
and OnDemand services, and also for patching this into SST. SST does
have some support for this, but I went the route of bypassing its
set-up and tear-down routines.
* Gets SST tests to report page by page, instead of all at once. It
does this by overriding TestCase.__call__ to multiply up the test
for each browser, in a similar way to testscenarios method of
operation.
* Gets the SST tests running remotely, via the OnDemand/Connect
service. The browsers to use can be selected by setting the
MAAS_REMOTE_TEST_BROWSERS to one or more members of {ie7, ie8, ie9,
chrome}. Not all the SST tests pass.
* When bringing up the SauceConnectFixture, Sauce-Connect.jar will be
downloaded if it's not already. It's quite a big file so it's best
not to put it in the tree.
* Works around an annoying wart in nose, so that the actually running
test is now reported. See `active_test`.
--
https://code.launchpad.net/~allenap/maas/try-out-saucelabs/+merge/106217
Your team Launchpad code reviewers is requested to review the proposed merge of lp:~allenap/maas/try-out-saucelabs into lp:maas.
=== modified file 'buildout.cfg'
--- buildout.cfg 2012-05-21 16:15:19 +0000
+++ buildout.cfg 2012-05-23 10:38:21 +0000
@@ -68,6 +68,7 @@
nose
nose-subunit
python-subunit
+ saucelabsfixture
sst
testresources
testscenarios
=== modified file 'src/maasserver/tests/test_js.py'
--- src/maasserver/tests/test_js.py 2012-05-15 13:28:31 +0000
+++ src/maasserver/tests/test_js.py 2012-05-23 10:38:21 +0000
@@ -10,21 +10,35 @@
)
__metaclass__ = type
-__all__ = [
- 'TestYUIUnitTests',
- ]
+__all__ = []
-import BaseHTTPServer
+from abc import (
+ ABCMeta,
+ abstractmethod,
+ )
+from glob import glob
import json
import logging
import os
-from os.path import dirname
-import SimpleHTTPServer
-import SocketServer
-import string
+from os.path import (
+ abspath,
+ dirname,
+ join,
+ )
+from urlparse import urljoin
from fixtures import Fixture
+from maastesting import yui3
+from maastesting.httpd import HTTPServerFixture
+from maastesting.testcase import TestCase
+from maastesting.utils import extract_word_list
+from nose.tools import nottest
from pyvirtualdisplay import Display
+from saucelabsfixture import (
+ SauceConnectFixture,
+ SSTOnDemandFixture,
+ )
+from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from sst.actions import (
assert_text,
get_element,
@@ -33,13 +47,16 @@
stop,
wait_for,
)
-from testscenarios import TestWithScenarios
-from testtools import TestCase
+from testtools import clone_test_with_new_id
# Base path where the HTML files will be searched.
BASE_PATH = 'src/maasserver/static/js/tests/'
+# Nose is over-zealous.
+nottest(clone_test_with_new_id)
+
+
class LoggerSilencerFixture(Fixture):
"""Fixture to change the log level of loggers.
@@ -79,32 +96,17 @@
self.addCleanup(self.display.stop)
-class ThreadingHTTPServer(SocketServer.ThreadingMixIn,
- BaseHTTPServer.HTTPServer):
- """A simple HTTP Server that whill run in it's own thread."""
-
-
-class SilentHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
- # SimpleHTTPRequestHandler logs to stdout: silence it.
- log_request = lambda *args, **kwargs: None
- log_error = lambda *args, **kwargs: None
-
-
class SSTFixture(Fixture):
"""Setup a javascript-enabled testing browser instance with SST."""
logger_names = ['selenium.webdriver.remote.remote_connection']
- def __init__(self, driver):
- self.driver = driver
+ def __init__(self, browser_name):
+ self.browser_name = browser_name
def setUp(self):
super(SSTFixture, self).setUp()
- start(
- self.driver, '', 'ANY', session_name=None,
- javascript_disabled=False,
- assume_trusted_cert_issuer=False,
- webdriver_remote=None)
+ start(self.browser_name)
self.useFixture(LoggerSilencerFixture(self.logger_names))
self.addCleanup(stop)
@@ -112,57 +114,143 @@
project_home = dirname(dirname(dirname(dirname(__file__))))
-def get_drivers_from_env():
- """Parse the environment variable MAAS_TEST_BROWSERS to get a list of
+def get_browser_names_from_env():
+ """Parse the environment variable ``MAAS_TEST_BROWSERS`` to get a list of
the browsers to use for the JavaScript tests.
+
Returns ['Firefox'] if the environment variable is not present.
"""
- return map(
- string.strip,
- os.environ.get('MAAS_TEST_BROWSERS', 'Firefox').split(','))
-
-
-class TestYUIUnitTests(TestWithScenarios, TestCase):
-
- scenarios = [
- (driver, dict(driver=driver)) for driver in get_drivers_from_env()]
-
- def setUp(self):
- super(TestYUIUnitTests, self).setUp()
- self.useFixture(DisplayFixture())
- self.useFixture(SSTFixture(self.driver))
-
- def _get_failed_tests_message(self, results):
- """Return a readable error message with the list of the failed tests.
-
- Given a YUI3 results_ json object, return a readable error message.
-
- .. _results: http://yuilibrary.com/yui/docs/test/
+ names = os.environ.get('MAAS_TEST_BROWSERS', 'Firefox')
+ return extract_word_list(names)
+
+
+# See <https://saucelabs.com/docs/ondemand/browsers/env/python/se2/linux> for
+# more information on browser/platform choices.
+remote_browsers = {
+ "ie7": dict(
+ DesiredCapabilities.INTERNETEXPLORER,
+ version="7", platform="XP"),
+ "ie8": dict(
+ DesiredCapabilities.INTERNETEXPLORER,
+ version="8", platform="XP"),
+ "ie9": dict(
+ DesiredCapabilities.INTERNETEXPLORER,
+ version="9", platform="VISTA"),
+ "chrome": dict(
+ DesiredCapabilities.CHROME,
+ platform="VISTA"),
+ }
+
+
+def get_remote_browser_names_from_env():
+ """Parse the environment variable ``MAAS_REMOTE_TEST_BROWSERS`` to get a
+ list of the browsers to use for the JavaScript tests.
+
+ Returns [] if the environment variable is not present.
+ """
+ names = os.environ.get('MAAS_REMOTE_TEST_BROWSERS', '')
+ names = [name.lower() for name in extract_word_list(names)]
+ unrecognised = set(names).difference(remote_browsers)
+ if len(unrecognised) > 0:
+ raise ValueError("Unrecognised browsers: %r" % unrecognised)
+ return names
+
+
+class YUIUnitBase:
+ """Base class for running YUI3 tests in a variety of browsers.
+
+ Calls to instance of this class are intercepted. If the call is to a clone
+ the superclass is called, and thus the test executes as normal. Otherwise
+ the `multiply` method is called. This method can then arrange for the
+ testcase to be run in multiple environments, cloning the test for each.
+
+ In this way it can efficiently set-up and tear-down resources for the
+ tests, and also report on a per-test basis. If test resources were fully
+ working for MAAS tests this might not be necessary, but at the time of
+ implementation this was a solution with the lowest friction (at least,
+ lower than ripping nose out, or teaching it about test resources).
+ """
+
+ __metaclass__ = ABCMeta
+
+ test_paths = glob(join(BASE_PATH, "*.html"))
+
+ # Indicates if this test has been cloned.
+ cloned = False
+
+ def clone(self, suffix):
+ # Clone this test with a new suffix.
+ test = clone_test_with_new_id(
+ self, "%s#%s" % (self.id(), suffix))
+ test.cloned = True
+ return test
+
+ @abstractmethod
+ def multiply(self, result):
+ """Run the test for each of a specified range of browsers.
+
+ This method should sort out shared fixtures.
"""
- result = []
- suites = [item for item in results.values() if isinstance(item, dict)]
- for suite in suites:
- if suite['failed'] != 0:
- tests = [item for item in suite.values()
- if isinstance(item, dict)]
- for test in tests:
- if test['result'] != 'pass':
- result.append('\n%s.%s: %s\n' % (
- suite['name'], test['name'], test['message']))
- return ''.join(result)
+
+ def __call__(self, result=None):
+ if self.cloned:
+ # This test has been cloned; just call-up to run the test.
+ super(YUIUnitBase, self).__call__(result)
+ else:
+ self.multiply(result)
def test_YUI3_unit_tests(self):
- # Find all the HTML files in BASE_PATH.
- for fname in os.listdir(BASE_PATH):
- if fname.endswith('.html'):
- # Load the page and then wait for #suite to contain
- # 'done'. Read the results in '#test_results'.
- file_path = os.path.join(project_home, BASE_PATH, fname)
- go_to('file://%s' % file_path)
- wait_for(assert_text, 'suite', 'done')
- results = json.loads(get_element(id='test_results').text)
- if results['failed'] != 0:
- raise AssertionError(
- '%d test(s) failed.\n%s' % (
- results['failed'],
- self._get_failed_tests_message(results)))
+ # Load the page and then wait for #suite to contain
+ # 'done'. Read the results in '#test_results'.
+ go_to(self.test_url)
+ wait_for(assert_text, 'suite', 'done')
+ results = json.loads(get_element(id='test_results').text)
+ if results['failed'] != 0:
+ message = '%d test(s) failed.\n\n%s' % (
+ results['failed'], yui3.get_failed_tests_message(results))
+ self.fail(message)
+
+
+class YUIUnitTestsLocal(YUIUnitBase, TestCase):
+
+ scenarios = tuple(
+ (path, {"test_url": "file://%s" % abspath(path)})
+ for path in YUIUnitBase.test_paths)
+
+ def multiply(self, result):
+ # Run this test locally for each browser requested. Use the same
+ # display fixture for all browsers. This is done here so that all
+ # scenarios are played out for each browser in turn; starting and
+ # stopping browsers is costly.
+ with DisplayFixture():
+ for browser_name in get_browser_names_from_env():
+ browser_test = self.clone("local:%s" % browser_name)
+ with SSTFixture(browser_name):
+ browser_test.__call__(result)
+
+
+class YUIUnitTestsRemote(YUIUnitBase, TestCase):
+
+ def multiply(self, result):
+ # Now run this test remotely for each requested Sauce OnDemand
+ # browser requested.
+ browser_names = get_remote_browser_names_from_env()
+ if len(browser_names) == 0:
+ return
+
+ # A web server is needed so the OnDemand service can obtain local
+ # tests. Be careful when choosing web server ports; only a scattering
+ # are proxied. See <https://saucelabs.com/docs/ondemand/connect>.
+ with HTTPServerFixture(port=5555) as httpd:
+ scenarios = tuple(
+ (path, {"test_url": urljoin(httpd.url, path)})
+ for path in self.test_paths)
+ with SauceConnectFixture() as sauce_connect:
+ for browser_name in browser_names:
+ capabilities = remote_browsers[browser_name]
+ sst_ondemand = SSTOnDemandFixture(
+ capabilities, sauce_connect.control_url)
+ with sst_ondemand:
+ browser_test = self.clone("remote:%s" % browser_name)
+ browser_test.scenarios = scenarios
+ browser_test(result)
=== modified file 'src/maastesting/__init__.py'
--- src/maastesting/__init__.py 2012-05-15 13:28:31 +0000
+++ src/maastesting/__init__.py 2012-05-23 10:38:21 +0000
@@ -1,29 +0,0 @@
-# Copyright 2012 Canonical Ltd. This software is licensed under the
-# GNU Affero General Public License version 3 (see the file LICENSE).
-
-"""maastesting initialization."""
-
-from __future__ import (
- absolute_import,
- print_function,
- unicode_literals,
- )
-
-__metaclass__ = type
-__all__ = [
- ]
-
-
-# Nose is currently incompatible with testscenarios because of the assertions
-# it makes about test names (see bug 872887 for details).
-# Here we monkey patch node.ResultProxy.assertMyTest to turn it into
-# a no-op. Note that assertMyTest would already be a no-op if we were
-# running python with -O.
-def assertMyTest(self, test):
- pass
-
-
-from nose.proxy import ResultProxy
-
-
-ResultProxy.assertMyTest = assertMyTest
=== added file 'src/maastesting/httpd.py'
--- src/maastesting/httpd.py 1970-01-01 00:00:00 +0000
+++ src/maastesting/httpd.py 2012-05-23 10:38:21 +0000
@@ -0,0 +1,53 @@
+# Copyright 2012 Canonical Ltd. This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+"""HTTP server fixture."""
+
+from __future__ import (
+ absolute_import,
+ print_function,
+ unicode_literals,
+ )
+
+__metaclass__ = type
+__all__ = [
+ "HTTPServerFixture",
+ ]
+
+from BaseHTTPServer import HTTPServer
+from SimpleHTTPServer import SimpleHTTPRequestHandler
+from SocketServer import ThreadingMixIn
+import threading
+
+from fixtures import Fixture
+
+
+class ThreadingHTTPServer(ThreadingMixIn, HTTPServer):
+ """A simple HTTP server that will run in its own thread."""
+
+
+class SilentHTTPRequestHandler(SimpleHTTPRequestHandler):
+ # SimpleHTTPRequestHandler logs to stdout: silence it.
+ log_request = lambda *args, **kwargs: None
+ log_error = lambda *args, **kwargs: None
+
+
+class HTTPServerFixture(Fixture):
+ """Bring up a very simple, threaded, web server.
+
+ Files are served from the current working directory and below.
+ """
+
+ def __init__(self, host="localhost", port=0):
+ super(HTTPServerFixture, self).__init__()
+ self.server = ThreadingHTTPServer(
+ (host, port), SilentHTTPRequestHandler)
+
+ @property
+ def url(self):
+ return "http://%s:%d/" % self.server.server_address
+
+ def setUp(self):
+ super(HTTPServerFixture, self).setUp()
+ threading.Thread(target=self.server.serve_forever).start()
+ self.addCleanup(self.server.shutdown)
=== modified file 'src/maastesting/testcase.py'
--- src/maastesting/testcase.py 2012-05-16 14:34:11 +0000
+++ src/maastesting/testcase.py 2012-05-23 10:38:21 +0000
@@ -14,15 +14,40 @@
'TestCase',
]
+from contextlib import contextmanager
import unittest
from fixtures import TempDir
from maastesting.factory import factory
from maastesting.scenarios import WithScenarios
+from nose.proxy import ResultProxy
+from nose.tools import nottest
import testresources
import testtools
+@nottest
+@contextmanager
+def active_test(result, test):
+ """Force nose to report for the test that's running.
+
+ Nose presents a proxy result and passes on results using only the
+ top-level test, rather than the actual running test. This attempts to undo
+ this dubious choice.
+
+ If the result is not a nose proxy then this is a no-op.
+ """
+ if isinstance(result, ResultProxy):
+ orig = result.test.test
+ result.test.test = test
+ try:
+ yield
+ finally:
+ result.test.test = orig
+ else:
+ yield
+
+
class TestCase(WithScenarios, testtools.TestCase):
"""Base `TestCase` for MAAS.
@@ -86,3 +111,11 @@
# Django's implementation for this seems to be broken and was
# probably only added to support compatibility with python 2.6.
assertItemsEqual = unittest.TestCase.assertItemsEqual
+
+ def run(self, result=None):
+ with active_test(result, self):
+ super(TestCase, self).run(result)
+
+ def __call__(self, result=None):
+ with active_test(result, self):
+ super(TestCase, self).__call__(result)
=== added file 'src/maastesting/tests/test_httpd.py'
--- src/maastesting/tests/test_httpd.py 1970-01-01 00:00:00 +0000
+++ src/maastesting/tests/test_httpd.py 2012-05-23 10:38:21 +0000
@@ -0,0 +1,52 @@
+# Copyright 2012 Canonical Ltd. This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+"""Tests for `maastesting.httpd`."""
+
+from __future__ import (
+ absolute_import,
+ print_function,
+ unicode_literals,
+ )
+
+__metaclass__ = type
+__all__ = []
+
+from contextlib import closing
+from socket import (
+ gethostbyname,
+ gethostname,
+ )
+from urllib2 import urlopen
+from urlparse import urljoin
+
+from maastesting.httpd import (
+ HTTPServerFixture,
+ ThreadingHTTPServer,
+ )
+from maastesting.testcase import TestCase
+from testtools.matchers import FileExists
+
+
+class TestHTTPServerFixture(TestCase):
+
+ def test_init(self):
+ host = gethostname()
+ fixture = HTTPServerFixture(host=host)
+ self.assertIsInstance(fixture.server, ThreadingHTTPServer)
+ expected_url = "http://%s:%d/" % (
+ gethostbyname(host), fixture.server.server_port)
+ self.assertEqual(expected_url, fixture.url)
+
+ def test_use(self):
+ filename = "setup.py"
+ self.assertThat(filename, FileExists())
+ with HTTPServerFixture() as httpd:
+ url = urljoin(httpd.url, filename)
+ with closing(urlopen(url)) as http_in:
+ http_data_in = http_in.read()
+ with open(filename, "rb") as file_in:
+ file_data_in = file_in.read()
+ self.assertEqual(
+ file_data_in, http_data_in,
+ "The content of %s differs from %s." % (url, filename))
=== added file 'src/maastesting/tests/test_utils.py'
--- src/maastesting/tests/test_utils.py 1970-01-01 00:00:00 +0000
+++ src/maastesting/tests/test_utils.py 2012-05-23 10:38:21 +0000
@@ -0,0 +1,34 @@
+# Copyright 2012 Canonical Ltd. This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+"""Tests for testing helpers."""
+
+from __future__ import (
+ absolute_import,
+ print_function,
+ unicode_literals,
+ )
+
+__metaclass__ = type
+__all__ = []
+
+from maastesting.testcase import TestCase
+from maastesting.utils import extract_word_list
+
+
+class TestFunctions(TestCase):
+
+ def test_extract_word_list(self):
+ expected = {
+ "one 2": ["one", "2"],
+ ", one ; 2": ["one", "2"],
+ "one,2": ["one", "2"],
+ "one;2": ["one", "2"],
+ "\none\t 2;": ["one", "2"],
+ "\none-two\t 3;": ["one-two", "3"],
+ }
+ observed = {
+ string: extract_word_list(string)
+ for string in expected
+ }
+ self.assertEqual(expected, observed)
=== added file 'src/maastesting/tests/test_yui3.py'
--- src/maastesting/tests/test_yui3.py 1970-01-01 00:00:00 +0000
+++ src/maastesting/tests/test_yui3.py 2012-05-23 10:38:21 +0000
@@ -0,0 +1,154 @@
+# Copyright 2012 Canonical Ltd. This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+"""Tests for `maasserver.testing.yui3`."""
+
+from __future__ import (
+ absolute_import,
+ print_function,
+ unicode_literals,
+ )
+
+__metaclass__ = type
+__all__ = []
+
+from maastesting.testcase import TestCase
+from maastesting.yui3 import (
+ extract_tests,
+ gen_failed_test_messages,
+ get_failed_tests_message,
+ )
+from nose.tools import nottest
+
+# Nose is over-zealous.
+nottest(extract_tests)
+nottest(gen_failed_test_messages)
+nottest(get_failed_tests_message)
+
+
+# From http://yuilibrary.com/yui/docs/test/#testsuite-level-events
+example_results = {
+ 'failed': 3,
+ 'ignored': 0,
+ 'name': 'Test Suite 0',
+ 'passed': 3,
+ 'testCase0': {
+ 'failed': 1,
+ 'ignored': 0,
+ 'name': 'testCase0',
+ 'passed': 1,
+ 'test0': {
+ 'message': 'Test passed.',
+ 'name': 'test0',
+ 'result': 'pass',
+ 'type': 'test',
+ },
+ 'test1': {
+ 'message': 'Assertion failed.',
+ 'name': 'test1',
+ 'result': 'fail',
+ 'type': 'test',
+ },
+ 'total': 2,
+ 'type': 'testcase',
+ },
+ 'testCase1': {
+ 'failed': 1,
+ 'ignored': 0,
+ 'name': 'testCase1',
+ 'passed': 1,
+ 'test0': {
+ 'message': 'Test passed.',
+ 'name': 'test0',
+ 'result': 'pass',
+ 'type': 'test',
+ },
+ 'test1': {
+ 'message': 'Assertion failed.',
+ 'name': 'test1',
+ 'result': 'fail',
+ 'type': 'test',
+ },
+ 'total': 2,
+ 'type': 'testcase',
+ },
+ 'testSuite0': {
+ 'failed': 1,
+ 'ignored': 0,
+ 'name': 'testSuite0',
+ 'passed': 1,
+ 'testCase2': {
+ 'failed': 1,
+ 'ignored': 0,
+ 'name': 'testCase2',
+ 'passed': 1,
+ 'test0': {
+ 'message': 'Test passed.',
+ 'name': 'test0',
+ 'result': 'pass',
+ 'type': 'test',
+ },
+ 'test1': {
+ 'message': 'Assertion failed.',
+ 'name': 'test1',
+ 'result': 'fail',
+ 'type': 'test',
+ },
+ 'total': 2,
+ 'type': 'testcase'},
+ 'total': 2,
+ 'type': 'testsuite'},
+ 'total': 6,
+ 'type': 'testsuite',
+ }
+
+
+class TestFunctions(TestCase):
+
+ def test_extract_tests_names(self):
+ expected_names = {
+ "testCase0.test0",
+ "testCase0.test1",
+ "testCase1.test0",
+ "testCase1.test1",
+ "testSuite0.testCase2.test0",
+ "testSuite0.testCase2.test1",
+ }
+ observed_tests = extract_tests(example_results)
+ observed_test_names = set(observed_tests)
+ self.assertSetEqual(expected_names, observed_test_names)
+
+ def test_extract_tests(self):
+ expected_results = {
+ "testCase0.test0": "pass",
+ "testCase0.test1": "fail",
+ "testCase1.test0": "pass",
+ "testCase1.test1": "fail",
+ "testSuite0.testCase2.test0": "pass",
+ "testSuite0.testCase2.test1": "fail",
+ }
+ observed_results = {
+ name: test["result"]
+ for name, test in extract_tests(example_results).items()
+ }
+ self.assertDictEqual(expected_results, observed_results)
+
+ def test_gen_failed_test_messages(self):
+ expected_messages = {
+ "testCase0.test1: Assertion failed.",
+ "testCase1.test1: Assertion failed.",
+ "testSuite0.testCase2.test1: Assertion failed.",
+ }
+ observed_messages = gen_failed_test_messages(example_results)
+ self.assertSetEqual(expected_messages, set(observed_messages))
+
+ def test_get_failed_tests_message(self):
+ expected_message = (
+ "testCase0.test1: Assertion failed."
+ "\n\n"
+ "testCase1.test1: Assertion failed."
+ "\n\n"
+ "testSuite0.testCase2.test1: Assertion failed."
+ )
+ observed_message = get_failed_tests_message(example_results)
+ self.assertEqual(expected_message, observed_message)
=== added file 'src/maastesting/utils.py'
--- src/maastesting/utils.py 1970-01-01 00:00:00 +0000
+++ src/maastesting/utils.py 2012-05-23 10:38:21 +0000
@@ -0,0 +1,76 @@
+# Copyright 2012 Canonical Ltd. This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+"""Testing utilities."""
+
+from __future__ import (
+ absolute_import,
+ print_function,
+ unicode_literals,
+ )
+
+__metaclass__ = type
+__all__ = [
+ "content_from_file",
+ "extract_word_list",
+ "preexec_fn",
+ "retries",
+ ]
+
+import re
+import signal
+from time import (
+ sleep,
+ time,
+ )
+
+from testtools.content import Content
+from testtools.content_type import UTF8_TEXT
+
+
+def content_from_file(path):
+ """Alternative to testtools' version.
+
+ This keeps an open file-handle, so it can obtain the log even when the
+ file has been unlinked.
+ """
+ fd = open(path, "rb")
+
+ def iterate():
+ fd.seek(0)
+ return iter(fd)
+
+ return Content(UTF8_TEXT, iterate)
+
+
+def extract_word_list(string):
+ """Return a list of words from a string.
+
+ Words are any string of 1 or more characters, not including commas,
+ semi-colons, or whitespace.
+ """
+ return re.findall("[^,;\s]+", string)
+
+
+def preexec_fn():
+ # Revert Python's handling of SIGPIPE. See
+ # http://bugs.python.org/issue1652 for more info.
+ signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+
+
+def retries(timeout=30, delay=1):
+ """Helper for retrying something, sleeping between attempts.
+
+ Yields ``(elapsed, remaining)`` tuples, giving times in seconds.
+
+ @param timeout: From now, how long to keep iterating, in seconds.
+ @param delay: The sleep between each iteration, in seconds.
+ """
+ start = time()
+ end = start + timeout
+ for now in iter(time, None):
+ if now < end:
+ yield now - start, end - now
+ sleep(min(delay, end - now))
+ else:
+ break
=== added file 'src/maastesting/yui3.py'
--- src/maastesting/yui3.py 1970-01-01 00:00:00 +0000
+++ src/maastesting/yui3.py 2012-05-23 10:38:21 +0000
@@ -0,0 +1,61 @@
+# Copyright 2012 Canonical Ltd. This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+"""Helpers for dealing with YUI3."""
+
+from __future__ import (
+ absolute_import,
+ print_function,
+ unicode_literals,
+ )
+
+__metaclass__ = type
+__all__ = [
+ "extract_tests",
+ "gen_failed_test_messages",
+ "get_failed_tests_message",
+ ]
+
+
+def extract_tests(results):
+ """Extract tests from a YUI3 test result object.
+
+ See `TestSuite-Level Events`_ for details of the test result object form.
+
+ .. _TestSuite-Level Events:
+ http://yuilibrary.com/yui/docs/test/#testsuite-level-events
+
+ """
+ accumulator = {}
+ _extract_tests(results, accumulator)
+ return accumulator
+
+
+def _extract_tests(results, accumulator, *stack):
+ """Helper for `extract_tests`."""
+ if isinstance(results, dict):
+ if results["type"] == "test":
+ name = ".".join(reversed(stack))
+ accumulator[name] = results
+ else:
+ for name, value in results.items():
+ _extract_tests(value, accumulator, name, *stack)
+
+
+def gen_failed_test_messages(results):
+ """Yield test failure messages from the given results.
+
+ @param results: See `extract_tests`.
+ """
+ for name, test in extract_tests(results).items():
+ if test["result"] != "pass":
+ yield "%s: %s" % (name, test["message"])
+
+
+def get_failed_tests_message(results):
+ """Return a complete error message for the given results.
+
+ @param results: See `extract_tests`.
+ """
+ messages = gen_failed_test_messages(results)
+ return "\n\n".join(sorted(messages))
=== modified file 'versions.cfg'
--- versions.cfg 2012-05-21 14:43:55 +0000
+++ versions.cfg 2012-05-23 10:38:21 +0000
@@ -35,6 +35,7 @@
pymongo = 2.1.1
python-dateutil = 1.5
PyYAML = 3.10
+saucelabsfixture = 0.1
setuptools = 0.6.24
South = 0.7.3
Twisted = 11.1.0
@@ -169,7 +170,8 @@
# Required by:
# sst==0.2.1
-selenium = 2.21.2
+# saucelabsfixture=0.1
+selenium = 2.21.3
# Required by:
# sst==0.2.1
Follow ups