← Back to team overview

opencompute-developers team mailing list archive

[Merge] lp:~jeffmarcom/opencompute/checkbox-ocp_update-plainbox into lp:opencompute/checkbox

 

Jeff Marcom has proposed merging lp:~jeffmarcom/opencompute/checkbox-ocp_update-plainbox into lp:opencompute/checkbox.

Requested reviews:
  Open Compute Developers (opencompute-developers)

For more details, see:
https://code.launchpad.net/~jeffmarcom/opencompute/checkbox-ocp_update-plainbox/+merge/185549

This updates the included version of plainbox within checkbox for the opencompute project.
-- 
The attached diff has been truncated due to its size.
https://code.launchpad.net/~jeffmarcom/opencompute/checkbox-ocp_update-plainbox/+merge/185549
Your team Open Compute Developers is requested to review the proposed merge of lp:~jeffmarcom/opencompute/checkbox-ocp_update-plainbox into lp:opencompute/checkbox.
=== modified file 'debian/changelog'
--- debian/changelog	2013-08-21 16:23:56 +0000
+++ debian/changelog	2013-09-13 17:12:45 +0000
@@ -1,3 +1,12 @@
+
+checkbox (1.16.7~OCP) UNRELEASED; urgency=low
+
+  [ Jeff Marcom ]
+  * Updated plainbox based on version 0.4.dev in lp:checkbox (16.12)
+
+ -- Jeff Marcom <jeff.marcom@xxxxxxxxxxxxx>  Fri, 13 Sept 2013 10:13:04 -0400
+
+
 checkbox (1.16.6~OCP) UNRELEASED; urgency=low
 
   [ Jeff Marcom ]

=== modified file 'plainbox/MANIFEST.in'
--- plainbox/MANIFEST.in	2013-05-09 18:39:35 +0000
+++ plainbox/MANIFEST.in	2013-09-13 17:12:45 +0000
@@ -1,9 +1,10 @@
 include README.md
 include COPYING
 include mk-interesting-graphs.sh
-recursive-include plainbox/test-data/ *.json *.xml *.txt
+recursive-include plainbox/test-data *.json *.xml *.txt
 recursive-include docs *.rst
 include docs/conf.py
-include plainbox/data/report/hardware-1_0.rng
+recursive-include plainbox/data/report *.rng *.css *.xsl *.js
+recursive-include plainbox/data/report/images *.png
 include contrib/policykit_yes/org.freedesktop.policykit.pkexec.policy
 include contrib/policykit_auth_admin_keep/org.freedesktop.policykit.pkexec.policy

=== added file 'plainbox/contrib/com.canonical.certification.PlainBox1.service'
--- plainbox/contrib/com.canonical.certification.PlainBox1.service	1970-01-01 00:00:00 +0000
+++ plainbox/contrib/com.canonical.certification.PlainBox1.service	2013-09-13 17:12:45 +0000
@@ -0,0 +1,3 @@
+[D-BUS Service]
+Name=com.canonical.certification.PlainBox1
+Exec=/usr/bin/plainbox service

=== added file 'plainbox/contrib/dbus-mini-client.py'
--- plainbox/contrib/dbus-mini-client.py	1970-01-01 00:00:00 +0000
+++ plainbox/contrib/dbus-mini-client.py	2013-09-13 17:12:45 +0000
@@ -0,0 +1,430 @@
+#!/usr/bin/env python3
+########
+#This simple script provides a small reference and example of how to
+#invoke plainbox methods through d-bus to accomplish useful tasks.
+#
+#Use of the d-feet tool is suggested for interactive exploration of
+#the plainbox objects, interfaces and d-bus API. However, d-feet can be
+#cumbersome to use for more advanced testing and experimentation.
+#
+#This script can be adapted to fit other testing needs as well.
+#
+#To run it, first launch plainbox in service mode using the stub provider:
+#    $ plainbox -c stub service
+#
+#then run the script itself. It does the following things:
+#
+# 1- Obtain a whitelist and a job provider
+# 2- Use the whitelist to "qualify" jobs offered by the provider,
+#    In essence filtering them to obtain a desired list of jobs to run.
+# 3- Run each job in the run_list, this implicitly updates the session's
+#    results and state map.
+# 4- Print the job names and outcomes and some other data
+# 5- Export the session's data (job results) to xml in /tmp.
+#####
+
+import dbus
+from gi.repository import GObject
+from dbus.mainloop.glib import DBusGMainLoop
+from plainbox.abc import IJobResult
+
+bus = dbus.SessionBus(mainloop=DBusGMainLoop())
+
+# TODO: Create a class to remove all global var.
+current_job_path = None
+service = None
+session_object_path = None
+session_object = None
+run_list = None
+desired_job_list = None
+whitelist = None
+exports_count = 0
+
+def main():
+    global service
+    global session_object_path
+    global session_object
+    global run_list
+    global desired_job_list
+    global whitelist
+
+    whitelist = bus.get_object(
+        'com.canonical.certification.PlainBox1',
+        '/plainbox/whitelist/stub'
+    )
+
+    provider = bus.get_object(
+        'com.canonical.certification.PlainBox1',
+        '/plainbox/provider/stubbox'
+    )
+
+    #whitelist = bus.get_object(
+    #    'com.canonical.certification.PlainBox1',
+    #    '/plainbox/whitelist/default'
+    #)
+
+    #provider = bus.get_object(
+    #    'com.canonical.certification.PlainBox1',
+    #    '/plainbox/provider/checkbox'
+    #)
+
+    #A provider manages objects other than jobs.
+    provider_objects = provider.GetManagedObjects(
+        dbus_interface='org.freedesktop.DBus.ObjectManager')
+
+    #Create a session and "seed" it with my job list:
+    job_list = [k for k, v in provider_objects.items() if not 'whitelist' in k]
+    service = bus.get_object(
+        'com.canonical.certification.PlainBox1',
+        '/plainbox/service1'
+    )
+    session_object_path = service.CreateSession(
+        job_list,
+        dbus_interface='com.canonical.certification.PlainBox.Service1'
+    )
+    session_object = bus.get_object(
+        'com.canonical.certification.PlainBox1',
+        session_object_path
+    )
+
+    if session_object.PreviousSessionFile():
+        if ask_for_resume():
+            session_object.Resume()
+        else:
+            session_object.Clean()
+
+    #to get only the *jobs* that are designated by the whitelist.
+    desired_job_list = [
+        object for object in provider_objects if whitelist.Designates(
+            object,
+            dbus_interface='com.canonical.certification.PlainBox.WhiteList1')]
+
+    desired_local_job_list = sorted([
+        object for object in desired_job_list if
+        bus.get_object('com.canonical.certification.PlainBox1', object).Get(
+            'com.canonical.certification.CheckBox.JobDefinition1',
+            'plugin') == 'local'
+    ])
+
+    #Now I update the desired job list.
+    session_object.UpdateDesiredJobList(
+        desired_local_job_list,
+        dbus_interface='com.canonical.certification.PlainBox.Session1'
+    )
+
+    #Now, the run_list contains the list of jobs I actually need to run \o/
+    run_list = session_object.Get(
+        'com.canonical.certification.PlainBox.Session1',
+        'run_list'
+    )
+
+    # Add some signal receivers
+    bus.add_signal_receiver(
+        catchall_local_job_result_available_signals_handler,
+        dbus_interface="com.canonical.certification.PlainBox.Service1",
+        signal_name="JobResultAvailable")
+
+    # Start running jobs
+    print("[ Running All Local Jobs ]".center(80, '='))
+    run_local_jobs()
+
+    #PersistentSave can be called at any point to checkpoint session state.
+    #In here, we're just calling it at the end, as an example.
+    print("[ Saving the session ]".center(80, '='))
+    session_object.PersistentSave()
+
+
+def ask_for_outcome(prompt=None, allowed=None):
+    if prompt is None:
+        prompt = "what is the outcome? "
+    if allowed is None:
+        allowed = (IJobResult.OUTCOME_PASS, "p",
+                   IJobResult.OUTCOME_FAIL, "f",
+                   IJobResult.OUTCOME_SKIP, "s")
+    answer = None
+    while answer not in allowed:
+        print("Allowed answers are: {}".format(", ".join(allowed)))
+        answer = input(prompt)
+        # Useful shortcuts for testing
+        if answer == "f":
+            answer = IJobResult.OUTCOME_FAIL
+        if answer == "p":
+            answer = IJobResult.OUTCOME_PASS
+        if answer == "s":
+            answer = IJobResult.OUTCOME_SKIP
+    return answer
+
+
+def ask_for_test(prompt=None, allowed=None):
+    if prompt is None:
+        prompt = "Run the test command? "
+    if allowed is None:
+        allowed = ("y",
+                   "n",
+                   )
+    answer = None
+    while answer not in allowed:
+        print("Allowed answers are: {}".format(", ".join(allowed)))
+        answer = input(prompt)
+    return answer
+
+def ask_for_resume():
+    prompt = "Do you want to resume the previous session [Y/n]? "
+    allowed = ('', 'y', 'Y', 'n', 'N')
+    answer = None
+    while answer not in allowed:
+        answer = input(prompt)
+    return False if answer in ('n', 'N') else True
+
+
+# Asynchronous calls need reply handlers
+def handle_export_reply(s):
+    print("Export to buffer: I got {} bytes of export data".format(len(s)))
+    maybe_quit_after_export()
+
+def handle_export_to_file_reply(s):
+    print("Export to file: completed to {}".format(s))
+    maybe_quit_after_export()
+
+def maybe_quit_after_export():
+    # Two asynchronous callbacks calling this may result in a race
+    # condition. Don't do this at home, use a semaphore or lock.
+    global exports_count
+    exports_count += 1
+    if exports_count >= 2:
+        loop.quit()
+
+def handle_error(e):
+    print(str(e))
+    loop.quit()
+
+
+def catchall_ask_for_outcome_signals_handler(current_runner_path):
+    global current_job_path
+    job_def_object = bus.get_object(
+        'com.canonical.certification.PlainBox1', current_job_path)
+    job_cmd = job_def_object.Get(
+        'com.canonical.certification.CheckBox.JobDefinition1',
+        'command')
+    job_runner_object = bus.get_object(
+        'com.canonical.certification.PlainBox1', current_runner_path)
+    if job_cmd:
+        run_test = ask_for_test()
+        if run_test == 'y':
+            job_runner_object.RunCommand()
+            return
+    outcome_from_command = job_runner_object.Get(
+            'com.canonical.certification.PlainBox.RunningJob1',
+            'outcome_from_command')
+    print("Return code from the command indicates: {} ".format(
+          outcome_from_command))
+    outcome = ask_for_outcome()
+    comments = 'Test plainbox comments'
+    job_runner_object.SetOutcome(
+        outcome,
+        comments,
+        dbus_interface='com.canonical.certification.PlainBox.RunningJob1')
+
+
+def catchall_io_log_generated_signals_handler(offset, name, data):
+    try:
+        print("(<{}:{:05}>) {}".format(
+            name, int(offset), data.decode('UTF-8').rstrip()))
+    except UnicodeDecodeError:
+        pass
+
+
+def catchall_local_job_result_available_signals_handler(job, result):
+    # XXX: check if the job path actually matches the current_job_path
+     # Update the session job state map and run new jobs
+    global session_object
+    session_object.UpdateJobResult(
+        job, result,
+        reply_handler=run_local_jobs,
+        error_handler=handle_error,
+        dbus_interface='com.canonical.certification.PlainBox.Session1')
+
+
+def catchall_job_result_available_signals_handler(job, result):
+    # XXX: check if the job path actually matches the current_job_path
+     # Update the session job state map and run new jobs
+    global session_object
+    session_object.UpdateJobResult(
+        job, result,
+        reply_handler=run_jobs,
+        error_handler=handle_error,
+        dbus_interface='com.canonical.certification.PlainBox.Session1')
+
+
+def run_jobs():
+    global run_list
+    #Now the actual run, job by job.
+    if run_list:
+        job_path = run_list.pop(0)
+        global current_job_path
+        global session_object_path
+        current_job_path = job_path
+        job_def_object = bus.get_object(
+            'com.canonical.certification.PlainBox', current_job_path)
+        job_name = job_def_object.Get(
+            'com.canonical.certification.PlainBox.JobDefinition1', 'name')
+        job_desc = job_def_object.Get(
+            'com.canonical.certification.PlainBox.JobDefinition1',
+            'description')
+        print("[ {} ]".format(job_name).center(80, '-'))
+        if job_desc:
+            print(job_desc)
+            print("^" * len(job_desc.splitlines()[-1]))
+            print()
+        service.RunJob(session_object_path, job_path)
+    else:
+        show_results()
+
+
+def run_local_jobs():
+    global run_list
+    global desired_job_list
+    global whitelist
+    if run_list:
+        job_path = run_list.pop(0)
+        global current_job_path
+        global session_object_path
+        current_job_path = job_path
+        job_def_object = bus.get_object(
+            'com.canonical.certification.PlainBox1', current_job_path)
+        job_name = job_def_object.Get(
+            'com.canonical.certification.PlainBox.JobDefinition1', 'name')
+        job_desc = job_def_object.Get(
+            'com.canonical.certification.PlainBox.JobDefinition1',
+            'description')
+        print("[ {} ]".format(job_name).center(80, '-'))
+        if job_desc:
+            print(job_desc)
+        service.RunJob(session_object_path, job_path)
+    else:
+        #Now I update the desired job list to get jobs created from local jobs.
+        session_object.UpdateDesiredJobList(
+            desired_job_list,
+            dbus_interface='com.canonical.certification.PlainBox.Session1'
+        )
+        bus.add_signal_receiver(
+            catchall_ask_for_outcome_signals_handler,
+            dbus_interface="com.canonical.certification.PlainBox.Service1",
+            signal_name="AskForOutcome")
+
+        bus.add_signal_receiver(
+            catchall_io_log_generated_signals_handler,
+            dbus_interface="com.canonical.certification.PlainBox.Service1",
+            signal_name="IOLogGenerated",
+            byte_arrays=True)  # To easily convert the byte arrays to strings
+
+        # Replace the job result handler we created for local jobs for by the
+        # one dedicated to regular job types
+        bus.remove_signal_receiver(
+            catchall_local_job_result_available_signals_handler,
+            dbus_interface="com.canonical.certification.PlainBox.Service1",
+            signal_name="JobResultAvailable")
+
+        bus.add_signal_receiver(
+            catchall_job_result_available_signals_handler,
+            dbus_interface="com.canonical.certification.PlainBox.Service1",
+            signal_name="JobResultAvailable")
+
+        job_list = session_object.Get(
+            'com.canonical.certification.PlainBox.Session1',
+            'job_list'
+        )
+
+        #to get only the *jobs* that are designated by the whitelist.
+        desired_job_list = [
+            object for object in job_list if whitelist.Designates(
+                object,
+                dbus_interface=
+                'com.canonical.certification.PlainBox.WhiteList1')]
+
+        #Now I update the desired job list.
+        # XXX: Remove previous local jobs from this list to avoid evaluating
+        # them twice
+        session_object.UpdateDesiredJobList(
+            desired_job_list,
+            dbus_interface='com.canonical.certification.PlainBox.Session1'
+        )
+
+        #Now, the run_list contains the list of jobs I actually need to run \o/
+        run_list = session_object.Get(
+            'com.canonical.certification.PlainBox.Session1',
+            'run_list'
+        )
+
+        print("[ Running All Jobs ]".center(80, '='))
+        run_jobs()
+
+
+def show_results():
+    global session_object_path
+    session_object = bus.get_object(
+        'com.canonical.certification.PlainBox1',
+        session_object_path
+    )
+    job_state_map = session_object.Get(
+        'com.canonical.certification.PlainBox.Session1', 'job_state_map')
+    print("[ Results ]".center(80, '='))
+    for k, job_state_path in job_state_map.items():
+        job_state_object = bus.get_object(
+            'com.canonical.certification.PlainBox1',
+            job_state_path
+        )
+        # Get the job definition object and some properties
+        job_def_path = job_state_object.Get(
+            'com.canonical.certification.PlainBox.JobState1', 'job')
+        job_def_object = bus.get_object(
+            'com.canonical.certification.PlainBox1', job_def_path)
+        job_name = job_def_object.Get(
+            'com.canonical.certification.PlainBox.JobDefinition1', 'name')
+        # Ask the via value (e.g. to comptute job categories)
+        # if a job is a child of a local job
+        job_via = job_def_object.Get(
+            'com.canonical.certification.CheckBox.JobDefinition1', 'via')
+
+        # Get the current job result object and the outcome property
+        job_result_path = job_state_object.Get(
+            'com.canonical.certification.PlainBox.JobState1', 'result')
+        job_result_object = bus.get_object(
+            'com.canonical.certification.PlainBox1', job_result_path)
+        outcome = job_result_object.Get(
+            'com.canonical.certification.PlainBox.Result1', 'outcome')
+        comments = job_result_object.Get(
+            'com.canonical.certification.PlainBox.Result1', 'comments')
+        io_log = job_result_object.Get(
+            'com.canonical.certification.PlainBox.Result1',
+            'io_log', byte_arrays=True)
+
+        print("{:55s} {:15s} {}".format(job_name, outcome, comments))
+    export_session()
+
+def export_session():
+    service.ExportSessionToFile(
+        session_object_path,
+        "xml",
+        [''],
+        "/tmp/report.xml",
+        reply_handler=handle_export_to_file_reply,
+        error_handler=handle_error
+    )
+    # The exports will apparently run in parallel. The callbacks
+    # are responsible for ensuring exiting after this.
+    service.ExportSession(
+        session_object_path,
+        "xml",
+        [''],
+        reply_handler=handle_export_reply,
+        error_handler=handle_error
+    )
+
+# Start the first call after a short delay
+GObject.timeout_add(5, main)
+loop = GObject.MainLoop()
+loop.run()
+
+# Stop the Plainbox dbus service
+service.Exit()

=== added file 'plainbox/docs/author/checkbox-job-format.rst'
--- plainbox/docs/author/checkbox-job-format.rst	1970-01-01 00:00:00 +0000
+++ plainbox/docs/author/checkbox-job-format.rst	2013-09-13 17:12:45 +0000
@@ -0,0 +1,168 @@
+===================================
+Checkbox job file format and fields
+===================================
+
+This file contains NO examples, this is on purpose since the jobs
+directory contains several hundred examples showcasing all the features
+described here.
+
+File format and location
+------------------------
+Jobs are expressed as sections in text files that conform somewhat to
+the rfc822 specification format. Each section defines a single job. The
+section is delimited with an empty newline. Within each section, each
+field starts with the field name, a colon, a space and then the field
+contents. Multiple-line fields can be input by having a newline right
+after the colon, and then entering text lines after that, each line
+should start with at least one space.
+
+Fields that can be used on a job
+--------------------------------
+:name:
+    (mandatory) - A name for the job. Should be unique, an error will
+    be generated if there are duplicates. Should contain characters in 
+    [a-z0-9/-].
+    
+:plugin:
+
+    (mandatory) - For historical reasons it's called "plugin" but it's
+    better thought of as describing the "type" of job. The allowed types
+    are:
+
+     :manual: jobs that require the user to perform an action and then
+          decide on the test's outcome.
+     :shell: jobs that run without user intervention and
+         automatically set the test's outcome.
+     :user-interact: jobs that require the user to perform an
+         interaction, after which the outcome is automatically set.
+     :user-verify: jobs that automatically perform an action or test
+         and then request the user to decide on the test's outcome.
+     :attachment: jobs whose command output will be attached to the
+         test report or submission.
+     :local: a job whose command output needs to be in :term:`CheckBox` job
+         format. Jobs output by a local job will be added to the set of
+         available jobs to be run.
+     :resource: A job whose command output results in a set of rfc822
+          records, containing key/value pairs, and that can be used in other
+          jobs' ``requires`` expressions.
+
+:requires:
+    (optional). If specified, the job will only run if the conditions
+    expressed in this field are met.
+
+    Conditions are of the form ``<resource>.<key> <comparison-operator>
+    'value' (and|or) ...`` . Comparison operators can be ==, != and ``in``.
+    Values to compare to can be scalars or (in the case of the ``in``
+    operator) arrays or tuples. The ``not in`` operator is explicitly
+    unsupported.
+    
+    Requirements can be logically chained with ``or`` and
+    ``and`` operators. They can also be placed in multiple lines,
+    respecting the rfc822 multi-line syntax, in which case all
+    requirements must be met for the job to run ( ``and`` ed).
+    
+    The :term:`PlainBox` resource program evaluator is extensively documented,
+    to see a detailed description including rationale and implementation of
+    :term:`CheckBox` "legacy" compatibility, see :ref:`Resources in Plainbox
+    <resources>`.
+
+:depends:
+    (optional). If specified, the job will only run if all the listed
+    jobs have run and passed. Multiple job names, separated by spaces,
+    can be specified.
+
+:command:
+    (optional). A command can be provided, to be executed under specific
+    circumstances. For ``manual``, ``user-interact`` and ``user-verify``
+    jobs, the command will be executed when the user presses a "test"
+    button present in the user interface. For ``shell`` jobs, the
+    command will be executed unconditionally as soon as the job is
+    started. In both cases the exit code from the command (0 for
+    success, !0 for failure) will be used to set the test's outcome. For
+    ``manual``, ``user-interact`` and ``user-verify`` jobs, the user can
+    override the command's outcome.  The command will be run using the
+    default system shell. If a specific shell is needed it should be
+    instantiated in the command. A multi-line command or shell script
+    can be used with the usual multi-line syntax.
+
+    Note that a ``shell`` job without a command will do nothing.
+
+:description:
+    (mandatory). Provides a textual description for the job. This is
+    mostly to aid people reading job descriptions in figuring out what a
+    job does. 
+    
+    The description field, however, is used specially in ``manual``,
+    ``user-interact`` and ``user-verify`` jobs. For these jobs, the
+    description will be shown in the user interface, and in these cases
+    it's expected to contain instructions for the user to follow, as
+    well as criteria for him to decide whether the job passes or fails.
+    For these types of jobs, the description needs to contain a few
+    sub-fields, in order:
+
+    :PURPOSE: This indicates the purpose or intent of the test.
+    :STEPS: A numbered list of steps for the user to follow.
+    :INFO:
+        (optional). Additional information about the test. This is
+        commonly used to present command output for the user to validate.
+        For this purpose, the ``$output`` substitution variable can be used
+        (actually, it can be used anywhere in the description). If present,
+        it will be replaced by the standard output generated from running
+        the job's command (commonly when the user presses the "Test"
+        button).
+    :VERIFICATION:
+        A question for the user to answer, deciding whether the test
+        passes or fails. The question should be phrased in such a way
+        that an answer of **Yes** means the test passed, and an answer of
+        **No** means it failed.
+:user:
+    (optional). If specified, the job will be run as the user specified
+    here. This is most commonly used to run jobs as the superuser
+    (root).
+
+:environ:
+    (optional). If specified, the listed environment variables
+    (separated by spaces) will be taken from the invoking environment
+    (i.e. the one :term:`CheckBox` is run under) and set to that value on the
+    job execution environment (i.e.  the one the job will run under).
+    Note that only the *variable names* should be listed, not the
+    *values*, which will be taken from the existing environment. This
+    only makes sense for jobs that also have the ``user`` attribute.
+    This key provides a mechanism to account for security policies in
+    ``sudo`` and ``pkexec``, which provide a sanitized execution
+    environment, with the downside that useful configuration specified
+    in environment variables may be lost in the process.
+
+:estimated_duration:
+    (optional) This field contains metadata about how long the job is
+    expected to run for, as a positive float value indicating
+    the estimated job duration in seconds.
+
+===========================
+Extension of the job format
+===========================
+
+The :term:`CheckBox` job format can be considered "extensible", in that
+additional keys can be added to existing jobs to contain additional
+data that may be needed.
+
+In order for these extra fields to be exposed through the API (i.e. as
+properties of JobDefinition instances), they need to be declared as
+properties in (:mod:`plainbox.impl.job`). This is a good place to document,
+via a docstring, what the field is for and how to interpret it.
+
+Implementation note: if additional fields are added, *:term:`CheckBox`* needs
+to be also told about them, the reason is that :term:`CheckBox` *does* perform
+validation of the job descriptions, ensuring they contain only known fields and
+that fields contain expected data types. The jobs_info plugin contains the job
+schema declaration and can be consulted to verify the known fields, whether
+they are optional or mandatory, and the type of data they're expected to
+contain.
+
+Also, :term:`CheckBox` validates that fields contain data of a specific type,
+so care must be taken not to simply change contents of fields if
+:term:`CheckBox` compatibility of jobs is desired.
+
+:term:`PlainBox` does this validation on a per-accessor basis, so data in each
+field must make sense as defined by that field's accessor. There is no need,
+however, to declare field type beforehand.

=== modified file 'plainbox/docs/author/index.rst'
--- plainbox/docs/author/index.rst	2013-03-25 16:40:57 +0000
+++ plainbox/docs/author/index.rst	2013-09-13 17:12:45 +0000
@@ -11,6 +11,9 @@
     is a guiding point for subsequent editions that will expand and provide
     real value.
 
+.. toctree::
+   checkbox-job-format.rst 
+
 Personas and stories
 --------------------
 

=== modified file 'plainbox/docs/dev/reference.rst'
--- plainbox/docs/dev/reference.rst	2013-05-14 09:04:30 +0000
+++ plainbox/docs/dev/reference.rst	2013-09-13 17:12:45 +0000
@@ -94,6 +94,11 @@
     :undoc-members:
     :show-inheritance:
 
+.. automodule:: plainbox.impl.exporter.html
+    :members:
+    :undoc-members:
+    :show-inheritance:
+
 .. automodule:: plainbox.impl.secure
     :members:
     :undoc-members:
@@ -144,11 +149,6 @@
     :undoc-members:
     :show-inheritance:
 
-.. automodule:: plainbox.impl.mock_job
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
 .. automodule:: plainbox.impl.resource
     :members:
     :undoc-members:
@@ -173,6 +173,43 @@
     :undoc-members:
     :show-inheritance:
 
+.. automodule:: plainbox.impl.session.state
+    :members:
+    :undoc-members:
+    :show-inheritance:
+
+.. automodule:: plainbox.impl.session.jobs
+    :members:
+    :undoc-members:
+    :show-inheritance:
+
+.. automodule:: plainbox.impl.session.storage
+    :members:
+    :undoc-members:
+    :show-inheritance:
+
+.. automodule:: plainbox.impl.session.suspend
+    :members:
+    :undoc-members:
+    :private-members:
+    :show-inheritance:
+
+.. automodule:: plainbox.impl.session.resume
+    :members:
+    :undoc-members:
+    :private-members:
+    :show-inheritance:
+
+.. automodule:: plainbox.impl.session.legacy
+    :members:
+    :undoc-members:
+    :show-inheritance:
+
+.. automodule:: plainbox.impl.session.manager
+    :members:
+    :undoc-members:
+    :show-inheritance:
+
 .. automodule:: plainbox.impl.testing_utils
     :members:
     :undoc-members:

=== modified file 'plainbox/docs/dev/resources.rst'
--- plainbox/docs/dev/resources.rst	2013-05-11 13:56:36 +0000
+++ plainbox/docs/dev/resources.rst	2013-09-13 17:12:45 +0000
@@ -1,3 +1,5 @@
+.. _resources:
+
 Resources
 =========
 

=== modified file 'plainbox/plainbox/abc.py'
--- plainbox/plainbox/abc.py	2013-02-25 11:02:58 +0000
+++ plainbox/plainbox/abc.py	2013-09-13 17:12:45 +0000
@@ -114,13 +114,46 @@
     # XXX: We could also store stuff like job duration and other meta-data but
     # I wanted to avoid polluting this proposal with mundane details
 
-    @abstractproperty
-    def job(self):
-        """
-        Definition of the job
+    # The outcome of a job is a one-word classification how how it ran.  There
+    # are several values that were not used in the original implementation but
+    # their existence helps to organize and implement plainbox. They are
+    # discussed below to make their intended meaning more detailed than is
+    # possible from the variable name alone.
+    #
+    # The None outcome - a job that basically did not run at all.
+    OUTCOME_NONE = None
+    # The pass and fail outcomes are the two most essential, and externally
+    # visible, job outcomes. They can be provided by either automated or manual
+    # "classifier" - a script or a person that clicks a "pass" or "fail"
+    # button.
+    OUTCOME_PASS = 'pass'
+    OUTCOME_FAIL = 'fail'
+    # The skip outcome is used when the operator selected a job but then
+    # skipped it. This is typically used for a manual job that is tedious or
+    # was selected by accident.
+    OUTCOME_SKIP = 'skip'
+    # The not supported outcome is used when a job was about to run but a
+    # dependency or resource requirement prevent it from running.  XXX: perhaps
+    # this should be called "not available", not supported has the "unsupported
+    # code" feeling associated with it.
+    OUTCOME_NOT_SUPPORTED = 'not-supported'
+    # A temporary state that should be removed later on, used to indicate that
+    # job runner is not implemented but the job "ran" so to speak.
+    OUTCOME_NOT_IMPLEMENTED = 'not-implemented'
+    # A temporary state before the user decides on the outcome of a manual
+    # job or any other job that requires manual verification
+    OUTCOME_UNDECIDED = 'undecided'
 
-        The object implements IJobDefinition
-        """
+    # List of all valid values of OUTCOME_xxx
+    ALL_OUTCOME_LIST = [
+        OUTCOME_NONE,
+        OUTCOME_PASS,
+        OUTCOME_FAIL,
+        OUTCOME_SKIP,
+        OUTCOME_NOT_SUPPORTED,
+        OUTCOME_NOT_IMPLEMENTED,
+        OUTCOME_UNDECIDED,
+    ]
 
     @abstractproperty
     def outcome(self):
@@ -204,3 +237,79 @@
         May raise NotImplementedError if the user interface cannot provide this
         answer.
         """
+
+
+class IProviderBackend1(metaclass=ABCMeta):
+    """
+    Provider for the current type of tests.
+
+    This class provides the APIs required by the internal implementation
+    that are not considered normal public APIs. The only consumer of the
+    those methods and properties are internal to plainbox.
+    """
+
+    @abstractproperty
+    def CHECKBOX_SHARE(self):
+        """
+        Return the required value of CHECKBOX_SHARE environment variable.
+
+        .. note::
+            This variable is only required by one script.
+            It would be nice to remove this later on.
+        """
+
+    @abstractproperty
+    def extra_PYTHONPATH(self):
+        """
+        Return additional entry for PYTHONPATH, if needed.
+
+        This entry is required for CheckBox scripts to import the correct
+        CheckBox python libraries.
+
+        .. note::
+            The result may be None
+        """
+
+    @abstractproperty
+    def extra_PATH(self):
+        """
+        Return additional entry for PATH
+
+        This entry is required to lookup CheckBox scripts.
+        """
+
+
+class IProvider1(metaclass=ABCMeta):
+    """
+    Provider for the current type of tests
+
+    Also known as the 'checkbox-like' provider.
+    """
+
+    @abstractproperty
+    def name(self):
+        """
+        name of this provider
+
+        This name should be dbus-friendly. It should not be localizable.
+        """
+
+    @abstractproperty
+    def description(self):
+        """
+        description of this providr
+
+        This name should be dbus-friendly. It should not be localizable.
+        """
+
+    @abstractmethod
+    def get_builtin_jobs(self):
+        """
+        Load all the built-in jobs and return them
+        """
+
+    @abstractmethod
+    def get_builtin_whitelists(self):
+        """
+        Load all the built-in whitelists and return them
+        """

=== added file 'plainbox/plainbox/data/report/checkbox.js'
--- plainbox/plainbox/data/report/checkbox.js	1970-01-01 00:00:00 +0000
+++ plainbox/plainbox/data/report/checkbox.js	2013-09-13 17:12:45 +0000
@@ -0,0 +1,16 @@
+function showHide(what) {
+    var heading = document.getElementById(what);
+    var contents = document.getElementById(what + "-contents");
+    var headingcontents = heading.innerHTML;
+    var newcontents;
+
+    if (contents.style.display != "block") {
+        newcontents = headingcontents.replace("closed", "open");
+        contents.style.display = "block";
+    } else {
+        newcontents = headingcontents.replace("open", "closed");
+        contents.style.display = "none";
+    }
+
+    heading.innerHTML = newcontents;
+}

=== added directory 'plainbox/plainbox/data/report/images'
=== added file 'plainbox/plainbox/data/report/images/body_bg.png'
Binary files plainbox/plainbox/data/report/images/body_bg.png	1970-01-01 00:00:00 +0000 and plainbox/plainbox/data/report/images/body_bg.png	2013-09-13 17:12:45 +0000 differ
=== added file 'plainbox/plainbox/data/report/images/bullet.png'
Binary files plainbox/plainbox/data/report/images/bullet.png	1970-01-01 00:00:00 +0000 and plainbox/plainbox/data/report/images/bullet.png	2013-09-13 17:12:45 +0000 differ
=== added file 'plainbox/plainbox/data/report/images/closed.png'
Binary files plainbox/plainbox/data/report/images/closed.png	1970-01-01 00:00:00 +0000 and plainbox/plainbox/data/report/images/closed.png	2013-09-13 17:12:45 +0000 differ
=== added file 'plainbox/plainbox/data/report/images/fail.png'
Binary files plainbox/plainbox/data/report/images/fail.png	1970-01-01 00:00:00 +0000 and plainbox/plainbox/data/report/images/fail.png	2013-09-13 17:12:45 +0000 differ
=== added file 'plainbox/plainbox/data/report/images/header_bg.png'
Binary files plainbox/plainbox/data/report/images/header_bg.png	1970-01-01 00:00:00 +0000 and plainbox/plainbox/data/report/images/header_bg.png	2013-09-13 17:12:45 +0000 differ
=== added file 'plainbox/plainbox/data/report/images/open.png'
Binary files plainbox/plainbox/data/report/images/open.png	1970-01-01 00:00:00 +0000 and plainbox/plainbox/data/report/images/open.png	2013-09-13 17:12:45 +0000 differ
=== added file 'plainbox/plainbox/data/report/images/pass.png'
Binary files plainbox/plainbox/data/report/images/pass.png	1970-01-01 00:00:00 +0000 and plainbox/plainbox/data/report/images/pass.png	2013-09-13 17:12:45 +0000 differ
=== added file 'plainbox/plainbox/data/report/images/skip.png'
Binary files plainbox/plainbox/data/report/images/skip.png	1970-01-01 00:00:00 +0000 and plainbox/plainbox/data/report/images/skip.png	2013-09-13 17:12:45 +0000 differ
=== added file 'plainbox/plainbox/data/report/styles.css'
--- plainbox/plainbox/data/report/styles.css	1970-01-01 00:00:00 +0000
+++ plainbox/plainbox/data/report/styles.css	2013-09-13 17:12:45 +0000
@@ -0,0 +1,258 @@
+body {
+    font-family: "Ubuntu Beta", "Bitstream Vera Sans", DejaVu Sans, Tahoma, sans-serif;
+    color: #333;
+    background: white url(report/images/body_bg.png);
+    font-size: 12px;
+    line-height: 14px;
+    margin: 0px;
+    padding: 0px;
+}
+#container {
+    background: #f7f6f5;
+    margin: 0px auto 20px;
+    padding: 0px;
+    width: 976px;
+}
+#container-inner {
+    background-color: #dfdcd9;
+}
+#header, #container-inner {
+    -moz-border-radius: 0px 0px 5px 5px;
+    -webkit-border-bottom-left-radius: 5px;
+    -webkit-border-bottom-right-radius: 5px;
+    -moz-box-shadow: #bbb 0px 0px 5px;
+    -webkit-box-shadow: #bbb 0px 0px 5px;
+}
+#header {
+    background: #dd4814 url(report/images/header_bg.png) top left repeat-x;
+    height: 64px;
+    margin: 0px;
+    padding: 0px;
+    position: relative;
+}
+
+#menu-search {
+    height: 40px;
+    margin: 0 16px;
+}
+
+#title {
+    padding: 28px 24px;
+}
+
+#content {
+    /*padding: 32px 80px 32px 80px;*/
+    padding: 32px 240px 32px 160px;
+    margin: 0 16px 16px;
+    width: 544px;
+    background-color: #fff;
+    -moz-border-radius: 4px;
+    -webkit-border-radius: 4px;
+}
+#end-content {
+    clear: both;
+}
+
+#content-panel {
+    width: 446px;
+    margin: 0px 0px 0px 0px;
+    padding: 8px 8px 32px 8px;
+    background-color: #fff;
+    -moz-border-radius: 4px;
+    -webkit-border-radius: 4px;
+}
+
+#copyright {
+    background-position: 803px 40px;
+    background-repeat: no-repeat;
+    text-align: center;
+    margin: 0 16px;
+    padding: 40px 0 0 0;
+    height: 32px;
+}
+#copyright p {
+    color: #aea79f;
+    font-size: 10px;
+    line-height: 14px;
+    margin: 2px 0;
+}
+
+#footer {
+    padding-top: 16px;
+}
+#footer * {
+    font-size: 10px;
+    line-height: 14px;
+}
+#footer p {
+    margin: 0;
+    padding-bottom: 3px;
+    border-bottom: 1px dotted #aea79f;
+}
+#footer p.footer-title {
+    font-weight: bold;
+}
+#footer .footer-div {
+    width: 144px;
+    float: left;
+    margin-left: 16px;
+}
+#footer .last-div {
+    margin-right: 16px;
+}
+#footer ul {
+    list-style: none;
+    margin: 0;
+    padding: 0;
+}
+#footer li {
+    margin: 0;
+    padding: 3px 0;
+    border-bottom: 1px dotted #aea79f;
+}
+
+h1, h2, h3, h4, h5 {
+    padding: 0;
+    margin: 0;
+    font-weight: normal;
+}
+h1 {
+    font-size: 36px;
+    line-height: 40px;
+    color: #dd4814;
+}
+h2 {
+    font-size: 24px;
+    line-height: 28px;
+    margin-bottom: 8px;
+}
+h3 {
+    font-size: 16px;
+    line-height: 20px;
+    margin-bottom: 8px;
+}
+h3.link-other {
+    color: #333;
+}
+h3.link-services {
+    color: #fff;
+}
+h4 {
+    font-size: 12px;
+    line-height: 14px;
+}
+h4.partners {
+    color: #333;
+    font-size: 16px;
+    line-height: 20px;
+}
+h5 {
+    color: #333;
+    font-size: 10px;
+    line-height: 14px;
+}
+h1 span.grey, h2 span.grey, h1 span, h2 span{
+    color: #aea79f;
+}
+p {
+    font-size: 12px;
+    line-height: 14px;
+    margin-bottom: 8px;
+}
+strong {
+    font-weight: bold;
+}
+
+a {
+    color: #333;
+    text-decoration: none;
+}
+a:hover {
+    color: #dd4814;
+    text-decoration: underline;
+}
+div.footer-div:hover a, div#content:hover a {
+    color: #dd4814;
+    text-decoration: none;
+}
+div.footer-div:hover a:hover, div#content:hover a:hover {
+    color: #dd4814;
+    text-decoration: underline;
+}
+
+ul {
+    margin-bottom: 16px;
+    list-style-image: url(report/images/bullet.png);
+}
+ul li {
+    margin-bottom: 8px;
+    line-height: 14px;
+}
+ul li:last-child {
+    margin-bottom: 0px;
+}
+
+p.call-to-action {
+    color: #333;
+}
+p.case-study {
+    color: #333;
+}
+p.highlight {
+    font-size: 16px;
+    line-height: 20px;
+}
+p.introduction {
+    color: #333;
+    font-size: 16px;
+    line-height: 20px;
+}
+p.services {
+    color: #fff;
+}
+p.small-text {
+    color: #333;
+    font-size: 10px;
+}
+
+/* Clearing floats without extra markup
+Based on How To Clear Floats Without Structural Markup by PiE
+[http://www.positioniseverything.net/easyclearing.html] */
+.clearfix:after {
+    content: ".";
+    display: block;
+    height: 0;
+    clear: both;
+    visibility: hidden;
+}
+.clearfix {
+    -moz-border-radius: 5px 5px 5px 5px;
+    -webkit-border-bottom-top-radius: 5px;
+    -webkit-border-bottom-left-radius: 5px;
+    -webkit-border-bottom-bottom-radius: 5px;
+    -webkit-border-bottom-right-radius: 5px;
+    -moz-box-shadow: #bbb 0px 0px 5px;
+    -webkit-box-shadow: #bbb 0px 0px 5px;
+    display: inline-block;
+}  /* for IE/Mac */
+td
+{
+    margin: 0;
+    padding-bottom: 3px;
+    border-bottom: 1px dotted #aea79f;
+    font-size: 10px;
+    line-height: 14px;
+}
+.resultimg
+{
+    height: 12px;
+}
+.disclosureimg
+{
+	height:		.75em;
+	vertical-align:	middle;
+}
+.data
+{
+	display:	none;
+}

=== modified file 'plainbox/plainbox/impl/applogic.py'
--- plainbox/plainbox/impl/applogic.py	2013-06-22 06:06:21 +0000
+++ plainbox/plainbox/impl/applogic.py	2013-09-13 17:12:45 +0000
@@ -30,8 +30,9 @@
 import os
 import re
 
+from plainbox.abc import IJobResult
 from plainbox.impl import config
-from plainbox.impl.result import JobResult
+from plainbox.impl.result import MemoryJobResult
 
 
 class IJobQualifier(metaclass=ABCMeta):
@@ -66,6 +67,13 @@
         self._pattern = re.compile(pattern)
         self._pattern_text = pattern
 
+    @property
+    def pattern_text(self):
+        """
+        text of the regular expression embedded in this qualifier
+        """
+        return self._pattern_text
+
     def designates(self, job):
         return self._pattern.match(job.name)
 
@@ -113,6 +121,86 @@
         return False
 
 
+# NOTE: using CompositeQualifier seems strange but it's a tested proven
+# component so all we have to ensure is that we read the whitelist files
+# correctly.
+class WhiteList(CompositeQualifier):
+    """
+    A qualifier that understands checkbox whitelist files.
+
+    A whitelist file is a plain text, line oriented file. Each line represents
+    a regular expression pattern that can be matched against the name of a job.
+
+    The file can contain simple shell-style comments that begin with the pound
+    or hash key (#). Those are ignored. Comments can span both a fraction of a
+    line as well as the whole line.
+
+    For historical reasons each pattern has an implicit '^' and '$' prepended
+    and appended (respectively) to the actual pattern specified in the file.
+    """
+
+    def __init__(self, pattern_list, name=None):
+        """
+        Initialize a whitelist object with the specified list of patterns.
+
+        The patterns must be already mangled with '^' and '$'.
+        """
+        inclusive = [RegExpJobQualifier(pattern) for pattern in pattern_list]
+        exclusive = ()
+        super(WhiteList, self).__init__(inclusive, exclusive)
+        self._name = name
+
+    def __repr__(self):
+        return "<{} name:{!r}>".format(self.__class__.__name__, self.name)
+
+    @property
+    def name(self):
+        """
+        name of this WhiteList (might be None)
+        """
+        return self._name
+
+    @classmethod
+    def from_file(cls, pathname):
+        """
+        Load and initialize the WhiteList object from the specified file.
+
+        :param pathname: file to load
+        :returns: a fresh WhiteList object
+        """
+        pattern_list = cls._load_patterns(pathname)
+        name = os.path.splitext(os.path.basename(pathname))[0]
+        return cls(pattern_list, name=name)
+
+    @classmethod
+    def _load_patterns(self, pathname):
+        """
+        Load whitelist patterns from the specified file
+        """
+        pattern_list = []
+        # Load the file
+        with open(pathname, "rt", encoding="UTF-8") as stream:
+            for line in stream:
+                # Strip shell-style comments if there are any
+                try:
+                    index = line.index("#")
+                except ValueError:
+                    pass
+                else:
+                    line = line[:index]
+                # Strip whitespace
+                line = line.strip()
+                # Skip empty lines (especially after stripping comments)
+                if line == "":
+                    continue
+                # Surround the pattern with ^ and $
+                # so that it wont just match a part of the job name.
+                regexp_pattern = r"^{pattern}$".format(pattern=line)
+                # Accumulate patterns into the list
+                pattern_list.append(regexp_pattern)
+        return pattern_list
+
+
 def get_matching_job_list(job_list, qualifier):
     """
     Get a list of jobs that are designated by the specified qualifier.
@@ -137,16 +225,15 @@
         # OUTCOME_NOT_SUPPORTED _except_ if any of the inhibitors point to
         # a job with an OUTCOME_SKIP outcome, if that is the case mirror
         # that outcome. This makes 'skip' stronger than 'not-supported'
-        outcome = JobResult.OUTCOME_NOT_SUPPORTED
+        outcome = IJobResult.OUTCOME_NOT_SUPPORTED
         for inhibitor in job_state.readiness_inhibitor_list:
             if inhibitor.cause != inhibitor.FAILED_DEP:
                 continue
             related_job_state = session.job_state_map[
                 inhibitor.related_job.name]
-            if related_job_state.result.outcome == JobResult.OUTCOME_SKIP:
-                outcome = JobResult.OUTCOME_SKIP
-        job_result = JobResult({
-            'job': job,
+            if related_job_state.result.outcome == IJobResult.OUTCOME_SKIP:
+                outcome = IJobResult.OUTCOME_SKIP
+        job_result = MemoryJobResult({
             'outcome': outcome,
             'comments': job_state.get_readiness_description()
         })
@@ -177,9 +264,20 @@
         section="sru",
         help_text="Location of the fallback file")
 
+    whitelist = config.Variable(
+        section="sru",
+        help_text="Optional whitelist with which to run SRU testing")
+
     environment = config.Section(
         help_text="Environment variables for scripts and jobs")
 
+    default_provider = config.Variable(
+        section="common",
+        help_text="Name of the default provider to use",
+        validator_list=[
+            config.ChoiceValidator(['auto', 'src', 'deb', 'stub', 'ihv'])],
+        default="auto")
+
     class Meta:
 
         # TODO: properly depend on xdg and use real code that also handles

=== modified file 'plainbox/plainbox/impl/box.py'
--- plainbox/plainbox/impl/box.py	2013-06-22 06:06:21 +0000
+++ plainbox/plainbox/impl/box.py	2013-09-13 17:12:45 +0000
@@ -26,55 +26,28 @@
     THIS MODULE DOES NOT HAVE STABLE PUBLIC API
 """
 
-import argparse
-import errno
 import logging
-import pdb
-import sys
 
 from plainbox import __version__ as version
 from plainbox.impl.applogic import PlainBoxConfig
-from plainbox.impl.checkbox import CheckBox
+from plainbox.impl.commands import PlainBoxToolBase
 from plainbox.impl.commands.check_config import CheckConfigCommand
 from plainbox.impl.commands.dev import DevCommand
 from plainbox.impl.commands.run import RunCommand
 from plainbox.impl.commands.selftest import SelfTestCommand
+from plainbox.impl.commands.service import ServiceCommand
 from plainbox.impl.commands.sru import SRUCommand
-from plainbox.impl.logging import setup_logging, adjust_logging
+from plainbox.impl.logging import setup_logging
 
 
 logger = logging.getLogger("plainbox.box")
 
 
-class PlainBox:
+class PlainBoxTool(PlainBoxToolBase):
     """
     Command line interface to PlainBox
     """
 
-    def __init__(self):
-        """
-        Initialize all the variables, real stuff happens in main()
-        """
-        self._early_parser = None  # set in _early_init()
-        self._config = None  # set in _late_init()
-        self._checkbox = None  # set in _late_init()
-        self._parser = None  # set in _late_init()
-
-    def main(self, argv=None):
-        """
-        Run as if invoked from command line directly
-        """
-        self.early_init()
-        early_ns = self._early_parser.parse_args(argv)
-        self.late_init(early_ns)
-        logger.debug("parsed early namespace: %s", early_ns)
-        # parse the full command line arguments, this is also where we
-        # do argcomplete-dictated exit if bash shell completion is requested
-        ns = self._parser.parse_args(argv)
-        logger.debug("parsed full namespace: %s", ns)
-        self.final_init(ns)
-        return self.dispatch_and_catch_exceptions(ns)
-
     @classmethod
     def get_config_cls(cls):
         """
@@ -107,221 +80,17 @@
         top-level subcommands.
         """
         # TODO: switch to plainbox plugins
-        RunCommand(self._checkbox).register_parser(subparsers)
+        RunCommand(self._provider).register_parser(subparsers)
         SelfTestCommand().register_parser(subparsers)
-        SRUCommand(self._checkbox, self._config).register_parser(subparsers)
+        SRUCommand(self._provider, self._config).register_parser(subparsers)
         CheckConfigCommand(self._config).register_parser(subparsers)
-        DevCommand(self._checkbox, self._config).register_parser(subparsers)
-
-    def early_init(self):
-        """
-        Do very early initialization. This is where we initalize stuff even
-        without seeing a shred of command line data or anything else.
-        """
-        self._early_parser = self.construct_early_parser()
-
-    def late_init(self, early_ns):
-        """
-        Initialize with early command line arguments being already parsed
-        """
-        adjust_logging(
-            level=early_ns.log_level, trace_list=early_ns.trace,
-            debug_console=early_ns.debug_console)
-        # Load plainbox configuration
-        self._config = self.get_config_cls().get()
-        # Load and initialize checkbox provider
-        # TODO: rename to provider, switch to plugins
-        self._checkbox = CheckBox(
-            mode=None if early_ns.checkbox == 'auto' else early_ns.checkbox)
-        # Construct the full command line argument parser
-        self._parser = self.construct_parser()
-
-    def final_init(self, ns):
-        """
-        Do some final initialization just before the command gets
-        dispatched. This is empty here but maybe useful for subclasses.
-        """
-
-    def construct_early_parser(self):
-        """
-        Create a parser that captures some of the early data we need to
-        be able to have a real parser and initialize the rest.
-        """
-        parser = argparse.ArgumentParser(add_help=False)
-        # Fake --help and --version
-        parser.add_argument("-h", "--help", action="store_const", const=None)
-        parser.add_argument("--version", action="store_const", const=None)
-        self.add_early_parser_arguments(parser)
-        # A catch-all net for everything else
-        parser.add_argument("rest", nargs="...")
-        return parser
-
-    def construct_parser(self):
-        parser = argparse.ArgumentParser(prog=self.get_exec_name())
-        parser.add_argument(
-            "--version", action="version", version=self.get_exec_version())
-        # Add all the things really parsed by the early parser so that it
-        # shows up in --help and bash tab completion.
-        self.add_early_parser_arguments(parser)
-        subparsers = parser.add_subparsers()
-        self.add_subcommands(subparsers)
-        # Enable argcomplete if it is available.
-        try:
-            import argcomplete
-        except ImportError:
-            pass
-        else:
-            argcomplete.autocomplete(parser)
-        return parser
-
-    def add_early_parser_arguments(self, parser):
-        # Since we need a CheckBox instance to create the main argument parser
-        # and we need to be able to specify where Checkbox is, we parse that
-        # option alone before parsing everything else
-        # TODO: rename this to -p | --provider
-        parser.add_argument(
-            '-c', '--checkbox',
-            action='store',
-            # TODO: have some public API for this, pretty please
-            choices=list(CheckBox._DIRECTORY_MAP.keys()) + ['auto'],
-            default='auto',
-            help="where to find the installation of CheckBox.")
-        group = parser.add_argument_group(
-            title="logging and debugging")
-        # Add the --log-level argument
-        group.add_argument(
-            "-l", "--log-level",
-            action="store",
-            choices=('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'),
-            default=None,
-            help=argparse.SUPPRESS)
-        # Add the --verbose argument
-        group.add_argument(
-            "-v", "--verbose",
-            dest="log_level",
-            action="store_const",
-            const="INFO",
-            help="be more verbose (same as --log-level=INFO)")
-        # Add the --debug flag
-        group.add_argument(
-            "-D", "--debug",
-            dest="log_level",
-            action="store_const",
-            const="DEBUG",
-            help="enable DEBUG messages on the root logger")
-        # Add the --debug flag
-        group.add_argument(
-            "-C", "--debug-console",
-            action="store_true",
-            help="display DEBUG messages in the console")
-        # Add the --trace flag
-        group.add_argument(
-            "-T", "--trace",
-            metavar="LOGGER",
-            action="append",
-            default=[],
-            help=("enable DEBUG messages on the specified logger "
-                  "(can be used multiple times)"))
-        # Add the --pdb flag
-        group.add_argument(
-            "-P", "--pdb",
-            action="store_true",
-            default=False,
-            help="jump into pdb (python debugger) when a command crashes")
-        # Add the --debug-interrupt flag
-        group.add_argument(
-            "-I", "--debug-interrupt",
-            action="store_true",
-            default=False,
-            help="crash on SIGINT/KeyboardInterrupt, useful with --pdb")
-
-    def dispatch_command(self, ns):
-        # Argh the horrror!
-        #
-        # Since CPython revision cab204a79e09 (landed for python3.3)
-        # http://hg.python.org/cpython/diff/cab204a79e09/Lib/argparse.py
-        # the argparse module behaves differently than it did in python3.2
-        #
-        # In practical terms subparsers are now optional in 3.3 so all of the
-        # commands are no longer required parameters.
-        #
-        # To compensate, on python3.3 and beyond, when the user just runs
-        # plainbox without specifying the command, we manually, explicitly do
-        # what python3.2 did: call parser.error(_('too few arguments'))
-        if (sys.version_info[:2] >= (3, 3)
-                and getattr(ns, "command", None) is None):
-            self._parser.error(argparse._("too few arguments"))
-        else:
-            return ns.command.invoked(ns)
-
-    def dispatch_and_catch_exceptions(self, ns):
-        try:
-            return self.dispatch_command(ns)
-        except SystemExit:
-            # Don't let SystemExit be caught in the logic below, we really
-            # just want to exit when that gets thrown.
-            logger.debug("caught SystemExit, exiting")
-            # We may want to raise SystemExit as it can carry a status code
-            # along and we cannot just consume that.
-            raise
-        except BaseException as exc:
-            logger.debug("caught %r, deciding on what to do next", exc)
-            # For all other exceptions (and I mean all), do a few checks
-            # and perform actions depending on the command line arguments
-            # By default we want to re-raise the exception
-            action = 'raise'
-            # We want to ignore IOErrors that are really EPIPE
-            if isinstance(exc, IOError):
-                if exc.errno == errno.EPIPE:
-                    action = 'ignore'
-            # We want to ignore KeyboardInterrupt unless --debug-interrupt
-            # was passed on command line
-            elif isinstance(exc, KeyboardInterrupt):
-                if ns.debug_interrupt:
-                    action = 'debug'
-                else:
-                    action = 'ignore'
-            else:
-                # For all other execptions, debug if requested
-                if ns.pdb:
-                    action = 'debug'
-            logger.debug("action for exception %r is %s", exc, action)
-            if action == 'ignore':
-                return 0
-            elif action == 'raise':
-                logging.getLogger("plainbox.crashes").fatal(
-                    "Executable %r invoked with %r has crashed",
-                    self.get_exec_name(), ns, exc_info=1)
-                raise
-            elif action == 'debug':
-                logger.error("caught runaway exception: %r", exc)
-                logger.error("starting debugger...")
-                pdb.post_mortem()
-                return 1
+        DevCommand(self._provider, self._config).register_parser(subparsers)
+        ServiceCommand(self._provider, self._config).register_parser(
+            subparsers)
 
 
 def main(argv=None):
-    # Another try/catch block for catching KeyboardInterrupt
-    # This one is really only meant for the early init abort
-    # (when someone runs main but bails out before we really
-    # get to the point when we do something useful and setup
-    # all the exception handlers).
-    try:
-        raise SystemExit(PlainBox().main(argv))
-    except KeyboardInterrupt:
-        pass
-
-
-def get_builtin_jobs():
-    raise NotImplementedError("get_builtin_jobs() not implemented")
-
-
-def save(something, somewhere):
-    raise NotImplementedError("save() not implemented")
-
-
-def run(*args, **kwargs):
-    raise NotImplementedError("run() not implemented")
+    raise SystemExit(PlainBoxTool().main(argv))
 
 
 # Setup logging before anything else starts working.

=== modified file 'plainbox/plainbox/impl/commands/__init__.py'
--- plainbox/plainbox/impl/commands/__init__.py	2013-02-25 11:02:58 +0000
+++ plainbox/plainbox/impl/commands/__init__.py	2013-09-13 17:12:45 +0000
@@ -27,11 +27,26 @@
 """
 
 from abc import abstractmethod, ABCMeta
+import argparse
+import errno
+import logging
+import pdb
+import sys
+
+from plainbox.impl.logging import adjust_logging
+from plainbox.impl.providers.v1 import all_providers
+
+
+logger = logging.getLogger("plainbox.commands")
 
 
 class PlainBoxCommand(metaclass=ABCMeta):
     """
-    Simple interface class for plainbox commands
+    Simple interface class for plainbox commands.
+
+    Command objects like this are consumed by PlainBoxTool subclasses to
+    implement hierarchical command system. The API supports arbitrary
+    many sub commands in arbitrary nesting arrangement.
     """
 
     @abstractmethod
@@ -49,3 +64,309 @@
         command. The subparsers argument is the return value of
         ArgumentParser.add_subparsers()
         """
+
+
+class PlainBoxToolBase(metaclass=ABCMeta):
+    """
+    Base class for implementing commands like 'plainbox'.
+
+    The tools support a variety of sub-commands, logging and debugging
+    support. If argcomplete module is available and used properly in
+    the shell then advanced tab-completion is also available.
+
+    There are three methods to implement for a basic tool. Those are:
+
+    1. :meth:`get_config_cls()` -- to know which config to use
+    2. :meth:`get_exec_name()` -- to know how the command will be called
+    3. :meth:`add_subcommands()` -- to add some actual commands to execute
+
+    This class has some complex control flow to support important
+    and interesting use cases. There are some concerns to people
+    that subclass this in order to implement their own command line tools.
+
+    The first concern is that input is parsed with two parsers, the early
+    parser and the full parser. The early parser quickly checks for a fraction
+    of supported arguments and uses that data to initialize environment
+    before construction of a full parser is possible. The full parser
+    sees the reminder of the input and does not re-parse things that where
+    already handled.
+
+    The second concern is that this command natively supports the concept
+    of a config object and a provider object. This may not be desired by
+    all users but it is the current state as of this writing. This means
+    that by the time eary init is done we have a known provider and config
+    objects that can be used to instantiate command objects
+    in :meth:`add_subcommands()`. This API might change when full
+    multi-provider is available but details are not known yet.
+    """
+
+    def __init__(self):
+        """
+        Initialize all the variables, real stuff happens in main()
+        """
+        self._early_parser = None  # set in _early_init()
+        self._config = None  # set in _late_init()
+        self._provider = None  # set in _late_init()
+        self._parser = None  # set in _late_init()
+
+    def main(self, argv=None):
+        """
+        Run as if invoked from command line directly
+        """
+        # Another try/catch block for catching KeyboardInterrupt
+        # This one is really only meant for the early init abort
+        # (when someone runs main but bails out before we really
+        # get to the point when we do something useful and setup
+        # all the exception handlers).
+        try:
+            self.early_init()
+            early_ns = self._early_parser.parse_args(argv)
+            self.late_init(early_ns)
+            logger.debug("parsed early namespace: %s", early_ns)
+            # parse the full command line arguments, this is also where we
+            # do argcomplete-dictated exit if bash shell completion
+            # is requested
+            ns = self._parser.parse_args(argv)
+            logger.debug("parsed full namespace: %s", ns)
+            self.final_init(ns)
+        except KeyboardInterrupt:
+            pass
+        else:
+            return self.dispatch_and_catch_exceptions(ns)
+
+    @classmethod
+    @abstractmethod
+    def get_config_cls(cls):
+        """
+        Get the Config class that is used by this implementation.
+
+        This can be overriden by subclasses to use a different config class
+        that is suitable for the particular application.
+        """
+
+    @classmethod
+    @abstractmethod
+    def get_exec_name(cls):
+        """
+        Get the name of this executable
+        """
+
+    @classmethod
+    @abstractmethod
+    def get_exec_version(cls):
+        """
+        Get the version reported by this executable
+        """
+
+    @abstractmethod
+    def add_subcommands(self, subparsers):
+        """
+        Add top-level subcommands to the argument parser.
+
+        This can be overriden by subclasses to use a different set of
+        top-level subcommands.
+        """
+
+    def early_init(self):
+        """
+        Do very early initialization. This is where we initalize stuff even
+        without seeing a shred of command line data or anything else.
+        """
+        self._early_parser = self.construct_early_parser()
+
+    def late_init(self, early_ns):
+        """
+        Initialize with early command line arguments being already parsed
+        """
+        adjust_logging(
+            level=early_ns.log_level, trace_list=early_ns.trace,
+            debug_console=early_ns.debug_console)
+        # Load plainbox configuration
+        self._config = self.get_config_cls().get()
+        # Load and initialize checkbox provider
+        # TODO: rename to provider, switch to plugins
+        all_providers.load()
+        # If the default value of 'None' was set for the checkbox (provider)
+        # argument then load the actual provider name from the configuration
+        # object (default for that is 'auto').
+        if early_ns.checkbox is None:
+            early_ns.checkbox = self._config.default_provider
+        assert early_ns.checkbox in ('auto', 'src', 'deb', 'stub', 'ihv')
+        if early_ns.checkbox == 'auto':
+            provider_name = 'checkbox-auto'
+        elif early_ns.checkbox == 'src':
+            provider_name = 'checkbox-src'
+        elif early_ns.checkbox == 'deb':
+            provider_name = 'checkbox-deb'
+        elif early_ns.checkbox == 'stub':
+            provider_name = 'stubbox'
+        elif early_ns.checkbox == 'ihv':
+            provider_name = 'ihv'
+        self._provider = all_providers.get_by_name(
+            provider_name).plugin_object()
+        # Construct the full command line argument parser
+        self._parser = self.construct_parser()
+
+    def final_init(self, ns):
+        """
+        Do some final initialization just before the command gets
+        dispatched. This is empty here but maybe useful for subclasses.
+        """
+
+    def construct_early_parser(self):
+        """
+        Create a parser that captures some of the early data we need to
+        be able to have a real parser and initialize the rest.
+        """
+        parser = argparse.ArgumentParser(add_help=False)
+        # Fake --help and --version
+        parser.add_argument("-h", "--help", action="store_const", const=None)
+        parser.add_argument("--version", action="store_const", const=None)
+        self.add_early_parser_arguments(parser)
+        # A catch-all net for everything else
+        parser.add_argument("rest", nargs="...")
+        return parser
+
+    def construct_parser(self):
+        parser = argparse.ArgumentParser(prog=self.get_exec_name())
+        parser.add_argument(
+            "--version", action="version", version=self.get_exec_version())
+        # Add all the things really parsed by the early parser so that it
+        # shows up in --help and bash tab completion.
+        self.add_early_parser_arguments(parser)
+        subparsers = parser.add_subparsers()
+        self.add_subcommands(subparsers)
+        # Enable argcomplete if it is available.
+        try:
+            import argcomplete
+        except ImportError:
+            pass
+        else:
+            argcomplete.autocomplete(parser)
+        return parser
+
+    def add_early_parser_arguments(self, parser):
+        # Since we need a CheckBox instance to create the main argument parser
+        # and we need to be able to specify where Checkbox is, we parse that
+        # option alone before parsing everything else
+        # TODO: rename this to -p | --provider
+        parser.add_argument(
+            '-c', '--checkbox',
+            action='store',
+            # TODO: have some public API for this, pretty please
+            choices=['src', 'deb', 'auto', 'stub', 'ihv'],
+            # None is a special value that means 'use whatever configured'
+            default=None,
+            help="where to find the installation of CheckBox.")
+        group = parser.add_argument_group(
+            title="logging and debugging")
+        # Add the --log-level argument
+        group.add_argument(
+            "-l", "--log-level",
+            action="store",
+            choices=('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'),
+            default=None,
+            help=argparse.SUPPRESS)
+        # Add the --verbose argument
+        group.add_argument(
+            "-v", "--verbose",
+            dest="log_level",
+            action="store_const",
+            const="INFO",
+            help="be more verbose (same as --log-level=INFO)")
+        # Add the --debug flag
+        group.add_argument(
+            "-D", "--debug",
+            dest="log_level",
+            action="store_const",
+            const="DEBUG",
+            help="enable DEBUG messages on the root logger")
+        # Add the --debug flag
+        group.add_argument(
+            "-C", "--debug-console",
+            action="store_true",
+            help="display DEBUG messages in the console")
+        # Add the --trace flag
+        group.add_argument(
+            "-T", "--trace",
+            metavar="LOGGER",
+            action="append",
+            default=[],
+            help=("enable DEBUG messages on the specified logger "
+                  "(can be used multiple times)"))
+        # Add the --pdb flag
+        group.add_argument(
+            "-P", "--pdb",
+            action="store_true",
+            default=False,
+            help="jump into pdb (python debugger) when a command crashes")
+        # Add the --debug-interrupt flag
+        group.add_argument(
+            "-I", "--debug-interrupt",
+            action="store_true",
+            default=False,
+            help="crash on SIGINT/KeyboardInterrupt, useful with --pdb")
+
+    def dispatch_command(self, ns):
+        # Argh the horrror!
+        #
+        # Since CPython revision cab204a79e09 (landed for python3.3)
+        # http://hg.python.org/cpython/diff/cab204a79e09/Lib/argparse.py
+        # the argparse module behaves differently than it did in python3.2
+        #
+        # In practical terms subparsers are now optional in 3.3 so all of the
+        # commands are no longer required parameters.
+        #
+        # To compensate, on python3.3 and beyond, when the user just runs
+        # plainbox without specifying the command, we manually, explicitly do
+        # what python3.2 did: call parser.error(_('too few arguments'))
+        if (sys.version_info[:2] >= (3, 3)
+                and getattr(ns, "command", None) is None):
+            self._parser.error(argparse._("too few arguments"))
+        else:
+            return ns.command.invoked(ns)
+
+    def dispatch_and_catch_exceptions(self, ns):
+        try:
+            return self.dispatch_command(ns)
+        except SystemExit:
+            # Don't let SystemExit be caught in the logic below, we really
+            # just want to exit when that gets thrown.
+            logger.debug("caught SystemExit, exiting")
+            # We may want to raise SystemExit as it can carry a status code
+            # along and we cannot just consume that.
+            raise
+        except BaseException as exc:
+            logger.debug("caught %r, deciding on what to do next", exc)
+            # For all other exceptions (and I mean all), do a few checks
+            # and perform actions depending on the command line arguments
+            # By default we want to re-raise the exception
+            action = 'raise'
+            # We want to ignore IOErrors that are really EPIPE
+            if isinstance(exc, IOError):
+                if exc.errno == errno.EPIPE:
+                    action = 'ignore'
+            # We want to ignore KeyboardInterrupt unless --debug-interrupt
+            # was passed on command line
+            elif isinstance(exc, KeyboardInterrupt):
+                if ns.debug_interrupt:
+                    action = 'debug'
+                else:
+                    action = 'ignore'
+            else:
+                # For all other execptions, debug if requested
+                if ns.pdb:
+                    action = 'debug'
+            logger.debug("action for exception %r is %s", exc, action)
+            if action == 'ignore':
+                return 0
+            elif action == 'raise':
+                logging.getLogger("plainbox.crashes").fatal(
+                    "Executable %r invoked with %r has crashed",
+                    self.get_exec_name(), ns, exc_info=1)
+                raise
+            elif action == 'debug':
+                logger.error("caught runaway exception: %r", exc)
+                logger.error("starting debugger...")
+                pdb.post_mortem()
+                return 1

=== modified file 'plainbox/plainbox/impl/commands/analyze.py'
--- plainbox/plainbox/impl/commands/analyze.py	2013-06-22 06:06:21 +0000
+++ plainbox/plainbox/impl/commands/analyze.py	2013-09-13 17:12:45 +0000
@@ -31,7 +31,7 @@
 from plainbox.impl.commands import PlainBoxCommand
 from plainbox.impl.commands.checkbox import CheckBoxCommandMixIn
 from plainbox.impl.commands.checkbox import CheckBoxInvocationMixIn
-from plainbox.impl.session import SessionState
+from plainbox.impl.session import SessionStateLegacyAPI as SessionState
 from plainbox.impl.runner import JobRunner
 
 
@@ -40,8 +40,8 @@
 
 class AnalyzeInvocation(CheckBoxInvocationMixIn):
 
-    def __init__(self, checkbox, ns):
-        super(AnalyzeInvocation, self).__init__(checkbox)
+    def __init__(self, provider, ns):
+        super(AnalyzeInvocation, self).__init__(provider)
         self.ns = ns
         self.job_list = self.get_job_list(ns)
         self.desired_job_list = self._get_matching_job_list(ns, self.job_list)
@@ -64,7 +64,7 @@
         with self.session.open():
             runner = JobRunner(
                 self.session.session_dir, self.session.jobs_io_log_dir,
-                command_io_delegate=self, outcome_callback=None)
+                command_io_delegate=self, interaction_callback=None)
             again = True
             while again:
                 for job in self.session.run_list:
@@ -127,11 +127,11 @@
     Implementation of ``$ plainbox dev analyze``
     """
 
-    def __init__(self, checkbox):
-        self.checkbox = checkbox
+    def __init__(self, provider):
+        self.provider = provider
 
     def invoked(self, ns):
-        return AnalyzeInvocation(self.checkbox, ns).run()
+        return AnalyzeInvocation(self.provider, ns).run()
 
     def register_parser(self, subparsers):
         parser = subparsers.add_parser(

=== modified file 'plainbox/plainbox/impl/commands/checkbox.py'
--- plainbox/plainbox/impl/commands/checkbox.py	2013-05-10 16:49:14 +0000
+++ plainbox/plainbox/impl/commands/checkbox.py	2013-09-13 17:12:45 +0000
@@ -32,14 +32,14 @@
 
 class CheckBoxInvocationMixIn:
 
-    def __init__(self, checkbox):
-        self.checkbox = checkbox
+    def __init__(self, provider):
+        self.provider = provider
 
     def get_job_list(self, ns):
         """
         Load and return a list of JobDefinition instances
         """
-        return self.checkbox.get_builtin_jobs()
+        return self.provider.get_builtin_jobs()
 
     def _get_matching_job_list(self, ns, job_list):
         # Find jobs that matched patterns
@@ -47,27 +47,29 @@
         # Pre-seed the include pattern list with data read from
         # the whitelist file.
         if ns.whitelist:
-            ns.include_pattern_list.extend([
-                pattern.strip()
-                for pattern in ns.whitelist.readlines()])
+            for whitelist in ns.whitelist:
+                ns.include_pattern_list.extend([
+                    pattern.strip()
+                    for pattern in whitelist.readlines()])
         # Decide which of the known jobs to include
-        for job in job_list:
-            # Reject all jobs that match any of the exclude
-            # patterns, matching strictly from the start to
-            # the end of the line.
+        if ns.exclude_pattern_list:
             for pattern in ns.exclude_pattern_list:
+                # Reject all jobs that match any of the exclude
+                # patterns, matching strictly from the start to
+                # the end of the line.
                 regexp_pattern = r"^{pattern}$".format(pattern=pattern)
-                if re.match(regexp_pattern, job.name):
-                    break
-            else:
-                # Then accept (include) all job that matches
+                for job in job_list:
+                    if re.match(regexp_pattern, job.name):
+                        job_list.remove(job)
+        if ns.include_pattern_list:
+            for pattern in ns.include_pattern_list:
+                # Accept (include) all job that matches
                 # any of include patterns, matching strictly
                 # from the start to the end of the line.
-                for pattern in ns.include_pattern_list:
-                    regexp_pattern = r"^{pattern}$".format(pattern=pattern)
+                regexp_pattern = r"^{pattern}$".format(pattern=pattern)
+                for job in job_list:
                     if re.match(regexp_pattern, job.name):
                         matching_job_list.append(job)
-                        break
         return matching_job_list
 
 
@@ -95,6 +97,7 @@
         # TODO: Find a way to handle the encoding of the file
         group.add_argument(
             '-w', '--whitelist',
+            action="append",
             metavar="WHITELIST",
             type=FileType("rt"),
             help="Load whitelist containing run patterns")

=== modified file 'plainbox/plainbox/impl/commands/dev.py'
--- plainbox/plainbox/impl/commands/dev.py	2013-06-22 06:06:21 +0000
+++ plainbox/plainbox/impl/commands/dev.py	2013-09-13 17:12:45 +0000
@@ -45,8 +45,8 @@
     Command hub for various development commands.
     """
 
-    def __init__(self, checkbox, config):
-        self.checkbox = checkbox
+    def __init__(self, provider, config):
+        self.provider = provider
         self.config = config
 
     def invoked(self, ns):
@@ -56,9 +56,9 @@
         parser = subparsers.add_parser(
             "dev", help="development commands")
         subdev = parser.add_subparsers()
-        ScriptCommand(self.checkbox, self.config).register_parser(subdev)
-        SpecialCommand(self.checkbox).register_parser(subdev)
-        AnalyzeCommand(self.checkbox).register_parser(subdev)
+        ScriptCommand(self.provider, self.config).register_parser(subdev)
+        SpecialCommand(self.provider).register_parser(subdev)
+        AnalyzeCommand(self.provider).register_parser(subdev)
         ParseCommand().register_parser(subdev)
         CrashCommand().register_parser(subdev)
         LogTestCommand().register_parser(subdev)

=== modified file 'plainbox/plainbox/impl/commands/run.py'
--- plainbox/plainbox/impl/commands/run.py	2013-06-22 06:06:21 +0000
+++ plainbox/plainbox/impl/commands/run.py	2013-09-13 17:12:45 +0000
@@ -35,17 +35,19 @@
 
 from requests.exceptions import ConnectionError, InvalidSchema, HTTPError
 
+from plainbox.abc import IJobResult
+from plainbox.impl.providers.checkbox import CheckBoxDebProvider
 from plainbox.impl.commands import PlainBoxCommand
 from plainbox.impl.commands.checkbox import CheckBoxCommandMixIn
 from plainbox.impl.commands.checkbox import CheckBoxInvocationMixIn
 from plainbox.impl.depmgr import DependencyDuplicateError
 from plainbox.impl.exporter import ByteStringStreamTranslator
 from plainbox.impl.exporter import get_all_exporters
-from plainbox.impl.result import JobResult
+from plainbox.impl.result import DiskJobResult, MemoryJobResult
 from plainbox.impl.runner import JobRunner
 from plainbox.impl.runner import authenticate_warmup
 from plainbox.impl.runner import slugify
-from plainbox.impl.session import SessionState
+from plainbox.impl.session import SessionStateLegacyAPI as SessionState
 from plainbox.impl.transport import get_all_transports
 
 
@@ -54,8 +56,8 @@
 
 class RunInvocation(CheckBoxInvocationMixIn):
 
-    def __init__(self, checkbox, ns):
-        super(RunInvocation, self).__init__(checkbox)
+    def __init__(self, provider, ns):
+        super(RunInvocation, self).__init__(provider)
         self.ns = ns
 
     def run(self):
@@ -110,25 +112,46 @@
         except ValueError as exc:
             raise SystemExit(str(exc))
 
-    def ask_for_resume(self, prompt=None, allowed=None):
-        # FIXME: Add support/callbacks for a GUI
-        if prompt is None:
-            prompt = "Do you want to resume the previous session [Y/n]? "
-        if allowed is None:
-            allowed = ('', 'y', 'Y', 'n', 'N')
+    def ask_for_resume(self):
+        return self.ask_user(
+            "Do you want to resume the previous session?", ('y', 'n')
+        ).lower() == "y"
+
+    def ask_for_resume_action(self):
+        return self.ask_user(
+            "What do you want to do with that job?", ('skip', 'fail', 'run'))
+
+    def ask_user(self, prompt, allowed):
         answer = None
         while answer not in allowed:
-            answer = input(prompt)
-        return False if answer in ('n', 'N') else True
+            answer = input("{} [{}] ".format(prompt, ", ".join(allowed)))
+        return answer
+
+    def _maybe_skip_last_job_after_resume(self, session):
+        last_job = session.metadata.running_job_name
+        if last_job is None:
+            return
+        print("We have previously tried to execute {}".format(last_job))
+        action = self.ask_for_resume_action()
+        if action == 'skip':
+            result = MemoryJobResult({
+                'outcome': 'skip',
+                'comment': "Skipped after resuming execution"
+            })
+        elif action == 'fail':
+            result = MemoryJobResult({
+                'outcome': 'fail',
+                'comment': "Failed after resuming execution"
+            })
+        elif action == 'run':
+            result = None
+        if result:
+            session.update_job_result(
+                session.job_state_map[last_job].job, result)
+            session.metadata.running_job_name = None
+            session.persistent_save()
 
     def _run_jobs(self, ns, job_list, exporter, transport=None):
-        # Ask the password before anything else in order to run jobs requiring
-        # privileges
-        if self.checkbox._mode == 'deb':
-            print("[ Authentication ]".center(80, '='))
-            return_code = authenticate_warmup()
-            if return_code:
-                raise SystemExit(return_code)
         # Compute the run list, this can give us notification about problems in
         # the selected jobs. Currently we just display each problem
         matching_job_list = self._get_matching_job_list(ns, job_list)
@@ -150,18 +173,28 @@
             if session.previous_session_file():
                 if self.ask_for_resume():
                     session.resume()
+                    self._maybe_skip_last_job_after_resume(session)
                 else:
                     session.clean()
+            session.metadata.title = " ".join(sys.argv)
+            session.persistent_save()
             self._update_desired_job_list(session, matching_job_list)
+            # Ask the password before anything else in order to run jobs
+            # requiring privileges
+            if self._auth_warmup_needed(session):
+                print("[ Authentication ]".center(80, '='))
+                return_code = authenticate_warmup()
+                if return_code:
+                    raise SystemExit(return_code)
             if (sys.stdin.isatty() and sys.stdout.isatty() and not
                     ns.not_interactive):
-                outcome_callback = self.ask_for_outcome
+                interaction_callback = self._interaction_callback
             else:
-                outcome_callback = None
+                interaction_callback = None
             runner = JobRunner(
                 session.session_dir,
                 session.jobs_io_log_dir,
-                outcome_callback=outcome_callback,
+                interaction_callback=interaction_callback,
                 dry_run=ns.dry_run
             )
             self._run_jobs_with_session(ns, session, runner)
@@ -189,6 +222,18 @@
         # FIXME: sensible return value
         return 0
 
+    def _auth_warmup_needed(self, session):
+        # Don't use authentication warm-up in modes other than 'deb' as it
+        # makes no sense to do so.
+        if not isinstance(self.provider, CheckBoxDebProvider):
+            return False
+        # Don't use authentication warm-up if none of the jobs on the run list
+        # requires it.
+        if all(job.user is None for job in session.run_list):
+            return False
+        # Otherwise, do pre-authentication
+        return True
+
     def _save_results(self, output_file, input_stream):
         if output_file is sys.stdout:
             print("[ Results ]".center(80, '='))
@@ -203,18 +248,32 @@
         if output_file is not sys.stdout:
             output_file.close()
 
-    def ask_for_outcome(self, prompt=None, allowed=None):
+    def _interaction_callback(self, runner, job, config, prompt=None,
+                             allowed_outcome=None):
+        result = {}
         if prompt is None:
-            prompt = "what is the outcome? "
-        if allowed is None:
-            allowed = (JobResult.OUTCOME_PASS,
-                       JobResult.OUTCOME_FAIL,
-                       JobResult.OUTCOME_SKIP)
-        answer = None
-        while answer not in allowed:
-            print("Allowed answers are: {}".format(", ".join(allowed)))
-            answer = input(prompt)
-        return answer
+            prompt = "Select an outcome or an action: "
+        if allowed_outcome is None:
+            allowed_outcome = [IJobResult.OUTCOME_PASS,
+                               IJobResult.OUTCOME_FAIL,
+                               IJobResult.OUTCOME_SKIP]
+        allowed_actions = ['comments']
+        if job.command:
+            allowed_actions.append('test')
+        result['outcome'] = IJobResult.OUTCOME_UNDECIDED
+        while result['outcome'] not in allowed_outcome:
+            print("Allowed answers are: {}".format(", ".join(allowed_outcome +
+                                                             allowed_actions)))
+            choice = input(prompt)
+            if choice in allowed_outcome:
+                result['outcome'] = choice
+                break
+            elif choice == 'test':
+                (result['return_code'],
+                 result['io_log_filename']) = runner._run_command(job, config)
+            elif choice == 'comments':
+                result['comments'] = input('Please enter your comments:\n')
+        return DiskJobResult(result)
 
     def _update_desired_job_list(self, session, desired_job_list):
         problem_list = session.update_desired_job_list(desired_job_list)
@@ -224,6 +283,18 @@
             for problem in problem_list:
                 print(" * {}".format(problem))
             print("Problematic jobs will not be considered")
+        (estimated_duration_auto,
+         estimated_duration_manual) = session.get_estimated_duration()
+        if estimated_duration_auto:
+            print("Estimated duration is {:.2f} for automated jobs.".format(
+                  estimated_duration_auto))
+        else:
+            print("Estimated duration cannot be determined for automated jobs.")
+        if estimated_duration_manual:
+            print("Estimated duration is {:.2f} for manual jobs.".format(
+                  estimated_duration_manual))
+        else:
+            print("Estimated duration cannot be determined for manual jobs.")
 
     def _run_jobs_with_session(self, ns, session, runner):
         # TODO: run all resource jobs concurrently with multiprocessing
@@ -272,13 +343,16 @@
         if job_state.can_start():
             print("Running... (output in {}.*)".format(
                 join(session.jobs_io_log_dir, slugify(job.name))))
+            session.metadata.running_job_name = job.name
+            session.persistent_save()
             job_result = runner.run_job(job)
+            session.metadata.running_job_name = None
+            session.persistent_save()
             print("Outcome: {}".format(job_result.outcome))
             print("Comments: {}".format(job_result.comments))
         else:
-            job_result = JobResult({
-                'job': job,
-                'outcome': JobResult.OUTCOME_NOT_SUPPORTED,
+            job_result = MemoryJobResult({
+                'outcome': IJobResult.OUTCOME_NOT_SUPPORTED,
                 'comments': job_state.get_readiness_description()
             })
         if job_result is not None:
@@ -287,11 +361,11 @@
 
 class RunCommand(PlainBoxCommand, CheckBoxCommandMixIn):
 
-    def __init__(self, checkbox):
-        self.checkbox = checkbox
+    def __init__(self, provider):
+        self.provider = provider
 
     def invoked(self, ns):
-        return RunInvocation(self.checkbox, ns).run()
+        return RunInvocation(self.provider, ns).run()
 
     def register_parser(self, subparsers):
         parser = subparsers.add_parser("run", help="run a test job")

=== modified file 'plainbox/plainbox/impl/commands/script.py'
--- plainbox/plainbox/impl/commands/script.py	2013-06-22 06:06:21 +0000
+++ plainbox/plainbox/impl/commands/script.py	2013-09-13 17:12:45 +0000
@@ -48,8 +48,8 @@
     the command is to be invoked.
     """
 
-    def __init__(self, checkbox, config, job_name):
-        self.checkbox = checkbox
+    def __init__(self, provider, config, job_name):
+        self.provider = provider
         self.config = config
         self.job_name = job_name
 
@@ -67,8 +67,10 @@
             bait_dir = os.path.join(scratch, 'files-created-in-current-dir')
             os.mkdir(bait_dir)
             with TestCwd(bait_dir):
-                return_code, fjson = runner._run_command(job, self.config)
+                return_code, record_path = runner._run_command(
+                    job, self.config)
             self._display_side_effects(scratch)
+            self._display_script_outcome(job, return_code)
         return return_code
 
     def _display_file(self, pathname, origin):
@@ -85,9 +87,13 @@
                 self._display_file(
                     os.path.join(dirpath, filename), scratch)
 
+    def _display_script_outcome(self, job, return_code):
+        print(job.name, "returned", return_code)
+        print("command:", job.command)
+
     def _get_job(self):
         job_list = get_matching_job_list(
-            self.checkbox.get_builtin_jobs(),
+            self.provider.get_builtin_jobs(),
             NameJobQualifier(self.job_name))
         if len(job_list) == 0:
             return None
@@ -101,12 +107,12 @@
     unconditionally.
     """
 
-    def __init__(self, checkbox, config):
-        self.checkbox = checkbox
+    def __init__(self, provider, config):
+        self.provider = provider
         self.config = config
 
     def invoked(self, ns):
-        return ScriptInvocation(self.checkbox, self.config, ns.job_name).run()
+        return ScriptInvocation(self.provider, self.config, ns.job_name).run()
 
     def register_parser(self, subparsers):
         parser = subparsers.add_parser(

=== added file 'plainbox/plainbox/impl/commands/service.py'
--- plainbox/plainbox/impl/commands/service.py	1970-01-01 00:00:00 +0000
+++ plainbox/plainbox/impl/commands/service.py	2013-09-13 17:12:45 +0000
@@ -0,0 +1,132 @@
+# This file is part of Checkbox.
+#
+# Copyright 2013 Canonical Ltd.
+# Written by:
+#   Zygmunt Krynicki <zygmunt.krynicki@xxxxxxxxxxxxx>
+#
+# Checkbox is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Checkbox is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Checkbox.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+:mod:`plainbox.impl.commands.service` -- service sub-command
+============================================================
+
+"""
+
+import logging
+import os
+
+from dbus import StarterBus, SessionBus
+from dbus.mainloop.glib import DBusGMainLoop, threads_init
+from dbus.service import BusName
+from gi.repository import GObject
+
+from plainbox.impl.commands import PlainBoxCommand
+from plainbox.impl.highlevel import Service
+from plainbox.impl.service import ServiceWrapper
+
+
+logger = logging.getLogger("plainbox.commands.service")
+
+
+def connect_to_session_bus():
+    """
+    Connect to the session bus properly.
+
+    Returns a tuple (session_bus, loop) where loop is a GObject.MainLoop
+    instance. The loop is there so that you can listen to signals.
+    """
+    # We'll need an event loop to observe signals. We will need the instance
+    # later below so let's keep it. Note that we're not passing it directly
+    # below as DBus needs specific API. The DBusGMainLoop class that we
+    # instantiate and pass is going to work with this instance transparently.
+    #
+    # NOTE: DBus tutorial suggests that we should create the loop _before_
+    # connecting to the bus.
+    logger.debug("Setting up glib-based event loop")
+    # Make sure gobject threads don't crash
+    GObject.threads_init()
+    threads_init()
+    loop = GObject.MainLoop()
+    # Let's get the system bus object.
+    logger.debug("Connecting to DBus session bus")
+    if os.getenv("DBUS_STARTER_ADDRESS"):
+        session_bus = StarterBus(mainloop=DBusGMainLoop())
+    else:
+        session_bus = SessionBus(mainloop=DBusGMainLoop())
+    return session_bus, loop
+
+
+class ServiceInvocation:
+
+    def __init__(self, provider, config, ns):
+        self.provider = provider
+        self.config = config
+        self.ns = ns
+
+    def run(self):
+        bus, loop = connect_to_session_bus()
+        logger.info("Setting up DBus objects...")
+        provider_list = [self.provider]
+        session_list = []  # TODO: load sessions
+        logger.debug("Constructing Service object")
+        service_obj = Service(provider_list, session_list)
+        logger.debug("Constructing ServiceWrapper")
+        service_wrp = ServiceWrapper(service_obj, on_exit=lambda: loop.quit())
+        logger.info("Publishing all objects on DBus")
+        service_wrp.publish_related_objects(bus)
+        logger.info("Publishing all managed objects (events should fire there)")
+        service_wrp.publish_managed_objects()
+        logger.debug("Attempting to claim bus name: %s", self.ns.bus_name)
+        bus_name = BusName(self.ns.bus_name, bus)
+        logger.info(
+            "PlainBox DBus service ready, claimed name: %s",
+            bus_name.get_name())
+        try:
+            loop.run()
+        except KeyboardInterrupt:
+            logger.warning((
+                "Main loop interrupted!"
+                " It is recommended to call the Exit() method on the"
+                " exported service object instead"))
+        finally:
+            logger.debug("Releasing %s", bus_name)
+            # XXX: ugly but that's how one can reliably release a bus name
+            del bus_name
+            # Remove objects from the bus
+            service_wrp.remove_from_connection()
+            logger.debug("Closing %s", bus)
+            bus.close()
+            logger.debug("Main loop terminated, exiting...")
+
+
+class ServiceCommand(PlainBoxCommand):
+    """
+    DBus service for PlainBox
+    """
+
+    # XXX: Maybe drop provider / config and handle them differently
+    def __init__(self, provider, config):
+        self.provider = provider
+        self.config = config
+
+    def invoked(self, ns):
+        return ServiceInvocation(self.provider, self.config, ns).run()
+
+    def register_parser(self, subparsers):
+        parser = subparsers.add_parser("service", help="spawn dbus service")
+        parser.add_argument(
+            '--bus-name', action="store",
+            default="com.canonical.certification.PlainBox1",
+            help="Use the specified DBus bus name")
+        parser.set_defaults(command=self)

=== modified file 'plainbox/plainbox/impl/commands/special.py'
--- plainbox/plainbox/impl/commands/special.py	2013-05-13 08:49:00 +0000
+++ plainbox/plainbox/impl/commands/special.py	2013-09-13 17:12:45 +0000
@@ -38,8 +38,8 @@
 
 class SpecialInvocation(CheckBoxInvocationMixIn):
 
-    def __init__(self, checkbox, ns):
-        super(SpecialInvocation, self).__init__(checkbox)
+    def __init__(self, provider, ns):
+        super(SpecialInvocation, self).__init__(provider)
         self.ns = ns
 
     def run(self):
@@ -124,11 +124,11 @@
     Implementation of ``$ plainbox special``
     """
 
-    def __init__(self, checkbox):
-        self.checkbox = checkbox
+    def __init__(self, provider):
+        self.provider = provider
 
     def invoked(self, ns):
-        return SpecialInvocation(self.checkbox, ns).run()
+        return SpecialInvocation(self.provider, ns).run()
 
     def register_parser(self, subparsers):
         parser = subparsers.add_parser(

=== modified file 'plainbox/plainbox/impl/commands/sru.py'
--- plainbox/plainbox/impl/commands/sru.py	2013-05-10 16:49:15 +0000
+++ plainbox/plainbox/impl/commands/sru.py	2013-09-13 17:12:45 +0000
@@ -28,21 +28,24 @@
 """
 import logging
 import os
+import sys
 import tempfile
 
 from requests.exceptions import ConnectionError, InvalidSchema, HTTPError
 
+from plainbox.impl.applogic import WhiteList
 from plainbox.impl.applogic import get_matching_job_list
 from plainbox.impl.applogic import run_job_if_possible
-from plainbox.impl.checkbox import WhiteList
 from plainbox.impl.commands import PlainBoxCommand
 from plainbox.impl.commands.check_config import CheckConfigInvocation
+from plainbox.impl.commands.checkbox import CheckBoxCommandMixIn
+from plainbox.impl.commands.checkbox import CheckBoxInvocationMixIn
 from plainbox.impl.config import ValidationError, Unset
 from plainbox.impl.depmgr import DependencyDuplicateError
 from plainbox.impl.exporter import ByteStringStreamTranslator
 from plainbox.impl.exporter.xml import XMLSessionStateExporter
 from plainbox.impl.runner import JobRunner
-from plainbox.impl.session import SessionState
+from plainbox.impl.session import SessionStateLegacyAPI as SessionState
 from plainbox.impl.transport.certification import CertificationTransport
 from plainbox.impl.transport.certification import InvalidSecureIDError
 
@@ -50,20 +53,26 @@
 logger = logging.getLogger("plainbox.commands.sru")
 
 
-class _SRUInvocation:
+class _SRUInvocation(CheckBoxInvocationMixIn):
     """
     Helper class instantiated to perform a particular invocation of the sru
     command. Unlike the SRU command itself, this class is instantiated each
     time.
     """
 
-    def __init__(self, checkbox, config, ns):
-        self.checkbox = checkbox
+    def __init__(self, provider, config, ns):
+        self.provider = provider
         self.config = config
         self.ns = ns
-        self.whitelist = WhiteList.from_file(os.path.join(
-            self.checkbox.whitelists_dir, "sru.whitelist"))
-        self.job_list = self.checkbox.get_builtin_jobs()
+        if self.ns.whitelist:
+            self.whitelist = WhiteList.from_file(self.ns.whitelist[0].name)
+        elif self.config.whitelist is not Unset:
+            self.whitelist = WhiteList.from_file(self.config.whitelist)
+        else:
+            self.whitelist = WhiteList.from_file(os.path.join(
+                self.provider.whitelists_dir, "sru.whitelist"))
+
+        self.job_list = self.provider.get_builtin_jobs()
         # XXX: maybe allow specifying system_id from command line?
         self.exporter = XMLSessionStateExporter(system_id=None)
         self.session = None
@@ -91,7 +100,7 @@
                 self.session.session_dir,
                 self.session.jobs_io_log_dir,
                 command_io_delegate=self,
-                outcome_callback=None,  # SRU runs are never interactive
+                interaction_callback=None,  # SRU runs are never interactive
                 dry_run=self.ns.dry_run
             )
             self._run_all_jobs()
@@ -178,9 +187,11 @@
 
     def _run_single_job(self, job):
         print("- {}:".format(job.name), end=' ')
+        sys.stdout.flush()
         job_state, job_result = run_job_if_possible(
             self.session, self.runner, self.config, job)
         print("{0}".format(job_result.outcome))
+        sys.stdout.flush()
         if job_result.comments is not None:
             print("comments: {0}".format(job_result.comments))
         if job_state.readiness_inhibitor_list:
@@ -190,7 +201,7 @@
         self.session.update_job_result(job, job_result)
 
 
-class SRUCommand(PlainBoxCommand):
+class SRUCommand(PlainBoxCommand, CheckBoxCommandMixIn):
     """
     Command for running Stable Release Update (SRU) tests.
 
@@ -204,8 +215,8 @@
     plainbox core on realistic workloads.
     """
 
-    def __init__(self, checkbox, config):
-        self.checkbox = checkbox
+    def __init__(self, provider, config):
+        self.provider = provider
         self.config = config
 
     def invoked(self, ns):
@@ -226,7 +237,7 @@
             retval = CheckConfigInvocation(self.config).run()
             if retval != 0:
                 return retval
-        return _SRUInvocation(self.checkbox, self.config, ns).run()
+        return _SRUInvocation(self.provider, self.config, ns).run()
 
     def register_parser(self, subparsers):
         parser = subparsers.add_parser(
@@ -262,6 +273,12 @@
             action='store',
             help=("POST the test report XML to this URL"
                   " (%(default)s)"))
+        group.add_argument(
+            '--staging',
+            dest='c3_url',
+            action='store_const',
+            const='https://certification.staging.canonical.com/submissions/submit/',
+            help='Override --destination to use the staging certification website')
         group = parser.add_argument_group(title="execution options")
         group.add_argument(
             '-n', '--dry-run',
@@ -269,3 +286,6 @@
             default=False,
             help=("Skip all usual jobs."
                   " Only local, resource and attachment jobs are started"))
+        # Call enhance_parser from CheckBoxCommandMixIn
+        self.enhance_parser(parser)
+

=== modified file 'plainbox/plainbox/impl/commands/test_dev.py'
--- plainbox/plainbox/impl/commands/test_dev.py	2013-06-22 06:06:21 +0000
+++ plainbox/plainbox/impl/commands/test_dev.py	2013-09-13 17:12:45 +0000
@@ -39,17 +39,17 @@
     def setUp(self):
         self.parser = argparse.ArgumentParser(prog='test')
         self.subparsers = self.parser.add_subparsers()
-        self.checkbox = mock.Mock()
+        self.provider = mock.Mock()
         self.config = mock.Mock()
         self.ns = mock.Mock()
 
     def test_init(self):
-        dev_cmd = DevCommand(self.checkbox, self.config)
-        self.assertIs(dev_cmd.checkbox, self.checkbox)
+        dev_cmd = DevCommand(self.provider, self.config)
+        self.assertIs(dev_cmd.provider, self.provider)
         self.assertIs(dev_cmd.config, self.config)
 
     def test_register_parser(self):
-        DevCommand(self.checkbox, self.config).register_parser(
+        DevCommand(self.provider, self.config).register_parser(
             self.subparsers)
         with TestIO() as io:
             self.parser.print_help()

=== modified file 'plainbox/plainbox/impl/commands/test_run.py'
--- plainbox/plainbox/impl/commands/test_run.py	2013-06-22 06:06:21 +0000
+++ plainbox/plainbox/impl/commands/test_run.py	2013-09-13 17:12:45 +0000
@@ -29,11 +29,16 @@
 import shutil
 import tempfile
 
+from collections import OrderedDict
 from inspect import cleandoc
 from mock import patch
 from unittest import TestCase
 
 from plainbox.impl.box import main
+from plainbox.impl.exporter.json import JSONSessionStateExporter
+from plainbox.impl.exporter.rfc822 import RFC822SessionStateExporter
+from plainbox.impl.exporter.text import TextSessionStateExporter
+from plainbox.impl.exporter.xml import XMLSessionStateExporter
 from plainbox.testing_utils.io import TestIO
 
 
@@ -46,6 +51,12 @@
         self._sandbox = tempfile.mkdtemp()
         self._env = os.environ
         os.environ['XDG_CACHE_HOME'] = self._sandbox
+        self._exporters = OrderedDict([
+            ('json', JSONSessionStateExporter),
+            ('rfc822', RFC822SessionStateExporter),
+            ('text', TextSessionStateExporter),
+            ('xml', XMLSessionStateExporter),
+        ])
 
     def test_help(self):
         with TestIO(combined=True) as io:
@@ -107,12 +118,16 @@
             self.assertEqual(call.exception.args, (0,))
         expected1 = """
         ===============================[ Analyzing Jobs ]===============================
+        Estimated duration cannot be determined for automated jobs.
+        Estimated duration cannot be determined for manual jobs.
         ==============================[ Running All Jobs ]==============================
         ==================================[ Results ]===================================
         """
         expected2 = """
         ===============================[ Authentication ]===============================
         ===============================[ Analyzing Jobs ]===============================
+        Estimated duration cannot be determined for automated jobs.
+        Estimated duration cannot be determined for manual jobs.
         ==============================[ Running All Jobs ]==============================
         ==================================[ Results ]===================================
         """
@@ -123,7 +138,9 @@
     def test_output_format_list(self):
         with TestIO(combined=True) as io:
             with self.assertRaises(SystemExit) as call:
-                main(['run', '--output-format=?'])
+                with patch('plainbox.impl.commands.run.get_all_exporters') as mock_get_all_exporters:
+                    mock_get_all_exporters.return_value = self._exporters
+                    main(['run', '--output-format=?'])
             self.assertEqual(call.exception.args, (0,))
         expected = """
         Available output formats: json, rfc822, text, xml
@@ -133,7 +150,9 @@
     def test_output_option_list(self):
         with TestIO(combined=True) as io:
             with self.assertRaises(SystemExit) as call:
-                main(['run', '--output-option=?'])
+                with patch('plainbox.impl.commands.run.get_all_exporters') as mock_get_all_exporters:
+                    mock_get_all_exporters.return_value = self._exporters
+                    main(['run', '--output-option=?'])
             self.assertEqual(call.exception.args, (0,))
         expected = """
         Each format may support a different set of options

=== modified file 'plainbox/plainbox/impl/commands/test_script.py'
--- plainbox/plainbox/impl/commands/test_script.py	2013-06-22 06:06:21 +0000
+++ plainbox/plainbox/impl/commands/test_script.py	2013-09-13 17:12:45 +0000
@@ -32,7 +32,7 @@
 
 from plainbox.impl.applogic import PlainBoxConfig
 from plainbox.impl.commands.script import ScriptInvocation, ScriptCommand
-from plainbox.impl.provider import DummyProvider1
+from plainbox.impl.providers.v1 import DummyProvider1
 from plainbox.impl.testing_utils import make_job
 from plainbox.testing_utils.io import TestIO
 
@@ -42,17 +42,17 @@
     def setUp(self):
         self.parser = argparse.ArgumentParser(prog='test')
         self.subparsers = self.parser.add_subparsers()
-        self.checkbox = mock.Mock()
+        self.provider = mock.Mock()
         self.config = mock.Mock()
         self.ns = mock.Mock()
 
     def test_init(self):
-        script_cmd = ScriptCommand(self.checkbox, self.config)
-        self.assertIs(script_cmd.checkbox, self.checkbox)
+        script_cmd = ScriptCommand(self.provider, self.config)
+        self.assertIs(script_cmd.provider, self.provider)
         self.assertIs(script_cmd.config, self.config)
 
     def test_register_parser(self):
-        ScriptCommand(self.checkbox, self.config).register_parser(
+        ScriptCommand(self.provider, self.config).register_parser(
             self.subparsers)
         with TestIO() as io:
             self.parser.print_help()
@@ -75,26 +75,26 @@
 
     @mock.patch("plainbox.impl.commands.script.ScriptInvocation")
     def test_invoked(self, patched_ScriptInvocation):
-        retval = ScriptCommand(self.checkbox, self.config).invoked(self.ns)
+        retval = ScriptCommand(self.provider, self.config).invoked(self.ns)
         patched_ScriptInvocation.assert_called_once_with(
-            self.checkbox, self.config, self.ns.job_name)
+            self.provider, self.config, self.ns.job_name)
         self.assertEqual(
             retval, patched_ScriptInvocation(
-                self.checkbox, self.config,
+                self.provider, self.config,
                 self.ns.job_name).run.return_value)
 
 
 class ScriptInvocationTests(TestCase):
 
     def setUp(self):
-        self.checkbox = mock.Mock()
+        self.provider = mock.Mock()
         self.config = PlainBoxConfig()
         self.job_name = mock.Mock()
 
     def test_init(self):
         script_inv = ScriptInvocation(
-            self.checkbox, self.config, self.job_name)
-        self.assertIs(script_inv.checkbox, self.checkbox)
+            self.provider, self.config, self.job_name)
+        self.assertIs(script_inv.provider, self.provider)
         self.assertIs(script_inv.config, self.config)
         self.assertIs(script_inv.job_name, self.job_name)
 
@@ -124,22 +124,27 @@
         self.assertEqual(retval, 125)
 
     def test_job_with_command(self):
+        dummy_name = 'foo'
+        dummy_command = 'echo ok'
         provider = DummyProvider1([
-            make_job('foo', command='echo ok')])
-        script_inv = ScriptInvocation(provider, self.config, 'foo')
+            make_job(dummy_name, command=dummy_command)])
+        script_inv = ScriptInvocation(provider, self.config, dummy_name)
         with TestIO() as io:
             retval = script_inv.run()
         self.assertEqual(
             io.stdout, cleandoc(
                 """
                 (job foo, <stdout:00001>) ok
-                """) + '\n')
+                """) + '\n' + "{} returned 0\n".format(dummy_name) +
+                "command: {}\n".format(dummy_command))
         self.assertEqual(retval, 0)
 
     def test_job_with_command_making_files(self):
+        dummy_name = 'foo'
+        dummy_command = 'echo ok > file'
         provider = DummyProvider1([
-            make_job('foo', command='echo ok > file')])
-        script_inv = ScriptInvocation(provider, self.config, 'foo')
+            make_job(dummy_name, command=dummy_command)])
+        script_inv = ScriptInvocation(provider, self.config, dummy_name)
         with TestIO() as io:
             retval = script_inv.run()
         self.maxDiff = None
@@ -148,5 +153,6 @@
                 """
                 Leftover file detected: 'files-created-in-current-dir/file':
                   files-created-in-current-dir/file:1: ok
-                """) + '\n')
+                """) + '\n' + "{} returned 0\n".format(dummy_name) +
+                "command: {}\n".format(dummy_command))
         self.assertEqual(retval, 0)

=== modified file 'plainbox/plainbox/impl/commands/test_sru.py'
--- plainbox/plainbox/impl/commands/test_sru.py	2013-04-24 17:50:58 +0000
+++ plainbox/plainbox/impl/commands/test_sru.py	2013-09-13 17:12:45 +0000
@@ -41,7 +41,8 @@
         self.maxDiff = None
         expected = """
         usage: plainbox sru [-h] [--check-config] --secure-id SECURE-ID
-                            [--fallback FILE] [--destination URL] [-n]
+                            [--fallback FILE] [--destination URL] [--staging] [-n]
+                            [-i PATTERN] [-x PATTERN] [-w WHITELIST]
 
         optional arguments:
           -h, --help            show this help message and exit
@@ -55,9 +56,21 @@
                                 (unset)
           --destination URL     POST the test report XML to this URL (https://certific
                                 ation.canonical.com/submissions/submit/)
+          --staging             Override --destination to use the staging
+                                certification website
 
         execution options:
           -n, --dry-run         Skip all usual jobs. Only local, resource and
                                 attachment jobs are started
+
+        job definition options:
+          -i PATTERN, --include-pattern PATTERN
+                                Run jobs matching the given regular expression.
+                                Matches from the start to the end of the line.
+          -x PATTERN, --exclude-pattern PATTERN
+                                Do not run jobs matching the given regular expression.
+                                Matches from the start to the end of the line.
+          -w WHITELIST, --whitelist WHITELIST
+                                Load whitelist containing run patterns
         """
         self.assertEqual(io.combined, cleandoc(expected) + "\n")

=== modified file 'plainbox/plainbox/impl/config.py'
--- plainbox/plainbox/impl/config.py	2013-06-22 06:06:21 +0000
+++ plainbox/plainbox/impl/config.py	2013-09-13 17:12:45 +0000
@@ -543,3 +543,16 @@
     def __call__(self, variable, new_value):
         if not self.pattern.match(new_value):
             return "does not match pattern: {!r}".format(self.pattern_text)
+
+
+class ChoiceValidator(IValidator):
+    """
+    A validator ensuring that values are in a given set
+    """
+
+    def __init__(self, choice_list):
+        self.choice_list = choice_list
+
+    def __call__(self, variable, new_value):
+        if new_value not in self.choice_list:
+            return "must be one of {}".format(", ".join(self.choice_list))

=== added directory 'plainbox/plainbox/impl/dbus'
=== added file 'plainbox/plainbox/impl/dbus/__init__.py'
--- plainbox/plainbox/impl/dbus/__init__.py	1970-01-01 00:00:00 +0000
+++ plainbox/plainbox/impl/dbus/__init__.py	2013-09-13 17:12:45 +0000
@@ -0,0 +1,47 @@
+# This file is part of Checkbox.
+#
+# Copyright 2013 Canonical Ltd.
+# Written by:
+#   Zygmunt Krynicki <zygmunt.krynicki@xxxxxxxxxxxxx>
+#
+# Checkbox is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Checkbox is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Checkbox.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+:mod:`plainbox.impl.dbus` -- DBus support code for PlainBox
+===========================================================
+"""
+
+__all__ = [
+    'service',
+    'exceptions',
+    'Signature',
+    'Struct',
+    'types',
+    'INTROSPECTABLE_IFACE',
+    'PEER_IFACE',
+    'PROPERTIES_IFACE',
+    'OBJECT_MANAGER_IFACE',
+]
+
+from dbus import INTROSPECTABLE_IFACE
+from dbus import PEER_IFACE
+from dbus import PROPERTIES_IFACE
+from dbus import Signature
+from dbus import Struct
+from dbus import exceptions
+from dbus import types
+
+OBJECT_MANAGER_IFACE = "org.freedesktop.DBus.ObjectManager"
+
+from plainbox.impl.dbus import service

=== added file 'plainbox/plainbox/impl/dbus/decorators.py'
--- plainbox/plainbox/impl/dbus/decorators.py	1970-01-01 00:00:00 +0000
+++ plainbox/plainbox/impl/dbus/decorators.py	2013-09-13 17:12:45 +0000
@@ -0,0 +1,351 @@
+"""Service-side D-Bus decorators."""
+
+# Copyright (C) 2003, 2004, 2005, 2006 Red Hat Inc. <http://www.redhat.com/>
+# Copyright (C) 2003 David Zeuthen
+# Copyright (C) 2004 Rob Taylor
+# Copyright (C) 2005, 2006 Collabora Ltd. <http://www.collabora.co.uk/>
+#
+# Permission is hereby granted, free of charge, to any person
+# obtaining a copy of this software and associated documentation
+# files (the "Software"), to deal in the Software without
+# restriction, including without limitation the rights to use, copy,
+# modify, merge, publish, distribute, sublicense, and/or sell copies
+# of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+
+__all__ = ('method', 'signal')
+__docformat__ = 'restructuredtext'
+
+import inspect
+
+from dbus import validate_interface_name, Signature, validate_member_name
+from dbus.lowlevel import SignalMessage
+from dbus.exceptions import DBusException
+from dbus._compat import is_py2
+
+
+def method(dbus_interface, in_signature=None, out_signature=None,
+           async_callbacks=None,
+           sender_keyword=None, path_keyword=None, destination_keyword=None,
+           message_keyword=None, connection_keyword=None,
+           byte_arrays=False,
+           rel_path_keyword=None, **kwargs):
+    """Factory for decorators used to mark methods of a `dbus.service.Object`
+    to be exported on the D-Bus.
+
+    The decorated method will be exported over D-Bus as the method of the
+    same name on the given D-Bus interface.
+
+    :Parameters:
+        `dbus_interface` : str
+            Name of a D-Bus interface
+        `in_signature` : str or None
+            If not None, the signature of the method parameters in the usual
+            D-Bus notation
+        `out_signature` : str or None
+            If not None, the signature of the return value in the usual
+            D-Bus notation
+        `async_callbacks` : tuple containing (str,str), or None
+            If None (default) the decorated method is expected to return
+            values matching the `out_signature` as usual, or raise
+            an exception on error. If not None, the following applies:
+
+            `async_callbacks` contains the names of two keyword arguments to
+            the decorated function, which will be used to provide a success
+            callback and an error callback (in that order).
+
+            When the decorated method is called via the D-Bus, its normal
+            return value will be ignored; instead, a pair of callbacks are
+            passed as keyword arguments, and the decorated method is
+            expected to arrange for one of them to be called.
+
+            On success the success callback must be called, passing the
+            results of this method as positional parameters in the format
+            given by the `out_signature`.
+
+            On error the decorated method may either raise an exception
+            before it returns, or arrange for the error callback to be
+            called with an Exception instance as parameter.
+
+        `sender_keyword` : str or None
+            If not None, contains the name of a keyword argument to the
+            decorated function, conventionally ``'sender'``. When the
+            method is called, the sender's unique name will be passed as
+            this keyword argument.
+
+        `path_keyword` : str or None
+            If not None (the default), the decorated method will receive
+            the destination object path as a keyword argument with this
+            name. Normally you already know the object path, but in the
+            case of "fallback paths" you'll usually want to use the object
+            path in the method's implementation.
+
+            For fallback objects, `rel_path_keyword` (new in 0.82.2) is
+            likely to be more useful.
+
+            :Since: 0.80.0?
+
+        `rel_path_keyword` : str or None
+            If not None (the default), the decorated method will receive
+            the destination object path, relative to the path at which the
+            object was exported, as a keyword argument with this
+            name. For non-fallback objects the relative path will always be
+            '/'.
+
+            :Since: 0.82.2
+
+        `destination_keyword` : str or None
+            If not None (the default), the decorated method will receive
+            the destination bus name as a keyword argument with this name.
+            Included for completeness - you shouldn't need this.
+
+            :Since: 0.80.0?
+
+        `message_keyword` : str or None
+            If not None (the default), the decorated method will receive
+            the `dbus.lowlevel.MethodCallMessage` as a keyword argument
+            with this name.
+
+            :Since: 0.80.0?
+
+        `connection_keyword` : str or None
+            If not None (the default), the decorated method will receive
+            the `dbus.connection.Connection` as a keyword argument
+            with this name. This is generally only useful for objects
+            that are available on more than one connection.
+
+            :Since: 0.82.0
+
+        `utf8_strings` : bool
+            If False (default), D-Bus strings are passed to the decorated
+            method as objects of class dbus.String, a unicode subclass.
+
+            If True, D-Bus strings are passed to the decorated method
+            as objects of class dbus.UTF8String, a str subclass guaranteed
+            to be encoded in UTF-8.
+
+            This option does not affect object-paths and signatures, which
+            are always 8-bit strings (str subclass) encoded in ASCII.
+
+            :Since: 0.80.0
+
+        `byte_arrays` : bool
+            If False (default), a byte array will be passed to the decorated
+            method as an `Array` (a list subclass) of `Byte` objects.
+
+            If True, a byte array will be passed to the decorated method as
+            a `ByteArray`, a str subclass. This is usually what you want,
+            but is switched off by default to keep dbus-python's API
+            consistent.
+
+            :Since: 0.80.0
+    """
+    validate_interface_name(dbus_interface)
+
+    def decorator(func):
+        # If the function is decorated and uses @functools.wrapper then use the
+        # __wrapped__ attribute to look at the original function signature.
+        #
+        # This allows us to see past the generic *args, **kwargs seen on most decorators.
+        if hasattr(func, '__wrapped__'):
+            args = inspect.getfullargspec(func.__wrapped__)[0]
+        else:
+            args = inspect.getfullargspec(func)[0]
+        args.pop(0)
+        if async_callbacks:
+            if type(async_callbacks) != tuple:
+                raise TypeError('async_callbacks must be a tuple of (keyword for return callback, keyword for error callback)')
+            if len(async_callbacks) != 2:
+                raise ValueError('async_callbacks must be a tuple of (keyword for return callback, keyword for error callback)')
+            args.remove(async_callbacks[0])
+            args.remove(async_callbacks[1])
+
+        if sender_keyword:
+            args.remove(sender_keyword)
+        if rel_path_keyword:
+            args.remove(rel_path_keyword)
+        if path_keyword:
+            args.remove(path_keyword)
+        if destination_keyword:
+            args.remove(destination_keyword)
+        if message_keyword:
+            args.remove(message_keyword)
+        if connection_keyword:
+            args.remove(connection_keyword)
+
+        if in_signature:
+            in_sig = tuple(Signature(in_signature))
+
+            if len(in_sig) > len(args):
+                raise ValueError('input signature is longer than the number of arguments taken')
+            elif len(in_sig) < len(args):
+                raise ValueError('input signature is shorter than the number of arguments taken')
+
+        func._dbus_is_method = True
+        func._dbus_async_callbacks = async_callbacks
+        func._dbus_interface = dbus_interface
+        func._dbus_in_signature = in_signature
+        func._dbus_out_signature = out_signature
+        func._dbus_sender_keyword = sender_keyword
+        func._dbus_path_keyword = path_keyword
+        func._dbus_rel_path_keyword = rel_path_keyword
+        func._dbus_destination_keyword = destination_keyword
+        func._dbus_message_keyword = message_keyword
+        func._dbus_connection_keyword = connection_keyword
+        func._dbus_args = args
+        func._dbus_get_args_options = dict(byte_arrays=byte_arrays)
+        if is_py2:
+            func._dbus_get_args_options['utf8_strings'] = kwargs.get(
+                'utf8_strings', False)
+        elif 'utf8_strings' in kwargs:
+            raise TypeError("unexpected keyword argument 'utf8_strings'")
+        return func
+
+    return decorator
+
+
+def signal(dbus_interface, signature=None, path_keyword=None,
+           rel_path_keyword=None):
+    """Factory for decorators used to mark methods of a `dbus.service.Object`
+    to emit signals on the D-Bus.
+
+    Whenever the decorated method is called in Python, after the method
+    body is executed, a signal with the same name as the decorated method,
+    with the given D-Bus interface, will be emitted from this object.
+
+    :Parameters:
+        `dbus_interface` : str
+            The D-Bus interface whose signal is emitted
+        `signature` : str
+            The signature of the signal in the usual D-Bus notation
+
+        `path_keyword` : str or None
+            A keyword argument to the decorated method. If not None,
+            that argument will not be emitted as an argument of
+            the signal, and when the signal is emitted, it will appear
+            to come from the object path given by the keyword argument.
+
+            Note that when calling the decorated method, you must always
+            pass in the object path as a keyword argument, not as a
+            positional argument.
+
+            This keyword argument cannot be used on objects where
+            the class attribute ``SUPPORTS_MULTIPLE_OBJECT_PATHS`` is true.
+
+            :Deprecated: since 0.82.0. Use `rel_path_keyword` instead.
+
+        `rel_path_keyword` : str or None
+            A keyword argument to the decorated method. If not None,
+            that argument will not be emitted as an argument of
+            the signal.
+
+            When the signal is emitted, if the named keyword argument is given,
+            the signal will appear to come from the object path obtained by
+            appending the keyword argument to the object's object path.
+            This is useful to implement "fallback objects" (objects which
+            own an entire subtree of the object-path tree).
+
+            If the object is available at more than one object-path on the
+            same or different connections, the signal will be emitted at
+            an appropriate object-path on each connection - for instance,
+            if the object is exported at /abc on connection 1 and at
+            /def and /x/y/z on connection 2, and the keyword argument is
+            /foo, then signals will be emitted from /abc/foo and /def/foo
+            on connection 1, and /x/y/z/foo on connection 2.
+
+            :Since: 0.82.0
+    """
+    validate_interface_name(dbus_interface)
+
+    if path_keyword is not None:
+        from warnings import warn
+        warn(DeprecationWarning('dbus.service.signal::path_keyword has been '
+                                'deprecated since dbus-python 0.82.0, and '
+                                'will not work on objects that support '
+                                'multiple object paths'),
+             DeprecationWarning, stacklevel=2)
+        if rel_path_keyword is not None:
+            raise TypeError('dbus.service.signal::path_keyword and '
+                            'rel_path_keyword cannot both be used')
+
+    def decorator(func):
+        member_name = func.__name__
+        validate_member_name(member_name)
+
+        def emit_signal(self, *args, **keywords):
+            abs_path = None
+            if path_keyword is not None:
+                if self.SUPPORTS_MULTIPLE_OBJECT_PATHS:
+                    raise TypeError('path_keyword cannot be used on the '
+                                    'signals of an object that supports '
+                                    'multiple object paths')
+                abs_path = keywords.pop(path_keyword, None)
+                if (abs_path != self.__dbus_object_path__ and
+                    not self.__dbus_object_path__.startswith(abs_path + '/')):
+                    raise ValueError('Path %r is not below %r', abs_path,
+                                     self.__dbus_object_path__)
+
+            rel_path = None
+            if rel_path_keyword is not None:
+                rel_path = keywords.pop(rel_path_keyword, None)
+
+            func(self, *args, **keywords)
+
+            for location in self.locations:
+                if abs_path is None:
+                    # non-deprecated case
+                    if rel_path is None or rel_path in ('/', ''):
+                        object_path = location[1]
+                    else:
+                        # will be validated by SignalMessage ctor in a moment
+                        object_path = location[1] + rel_path
+                else:
+                    object_path = abs_path
+
+                message = SignalMessage(object_path,
+                                                       dbus_interface,
+                                                       member_name)
+                message.append(signature=signature, *args)
+
+                location[0].send_message(message)
+        # end emit_signal
+
+        args = inspect.getargspec(func)[0]
+        args.pop(0)
+
+        for keyword in rel_path_keyword, path_keyword:
+            if keyword is not None:
+                try:
+                    args.remove(keyword)
+                except ValueError:
+                    raise ValueError('function has no argument "%s"' % keyword)
+
+        if signature:
+            sig = tuple(Signature(signature))
+
+            if len(sig) > len(args):
+                raise ValueError('signal signature is longer than the number of arguments provided')
+            elif len(sig) < len(args):
+                raise ValueError('signal signature is shorter than the number of arguments provided')
+
+        emit_signal.__name__ = func.__name__
+        emit_signal.__doc__ = func.__doc__
+        emit_signal._dbus_is_signal = True
+        emit_signal._dbus_interface = dbus_interface
+        emit_signal._dbus_signature = signature
+        emit_signal._dbus_args = args
+        return emit_signal
+
+    return decorator

=== added file 'plainbox/plainbox/impl/dbus/service.py'
--- plainbox/plainbox/impl/dbus/service.py	1970-01-01 00:00:00 +0000
+++ plainbox/plainbox/impl/dbus/service.py	2013-09-13 17:12:45 +0000
@@ -0,0 +1,662 @@
+# This file is part of Checkbox.
+#
+# Copyright 2013 Canonical Ltd.
+# Written by:
+#   Zygmunt Krynicki <zygmunt.krynicki@xxxxxxxxxxxxx>
+#
+# Checkbox is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Checkbox is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Checkbox.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+:mod:`plainbox.impl.dbus.service` -- DBus Service support code for PlainBox
+===========================================================================
+"""
+
+import logging
+import threading
+import weakref
+
+import _dbus_bindings
+import dbus
+import dbus.service
+import dbus.exceptions
+
+from plainbox.impl.signal import Signal
+from plainbox.impl.dbus import INTROSPECTABLE_IFACE
+from plainbox.impl.dbus import OBJECT_MANAGER_IFACE
+from plainbox.impl.dbus import PROPERTIES_IFACE
+# Note: use our own version of the decorators because
+# vanilla versions choke on annotations
+from plainbox.impl.dbus.decorators import method, signal
+
+
+# This is the good old standard python property decorator
+_property = property
+
+__all__ = [
+    'Interface',
+    'Object',
+    'method',
+    'property',
+    'signal',
+]
+
+logger = logging.getLogger("plainbox.dbus")
+
+
+class InterfaceType(dbus.service.InterfaceType):
+    """
+    Subclass of :class:`dbus.service.InterfaceType` that also handles
+    properties.
+    """
+
+    def _reflect_on_property(cls, func):
+        reflection_data = (
+            '    <property name="{}" type="{}" access="{}"/>\n').format(
+                func._dbus_property, func._signature,
+                func.dbus_access_flag)
+        return reflection_data
+
+
+#Subclass of :class:`dbus.service.Interface` that also handles properties
+Interface = InterfaceType('Interface', (dbus.service.Interface,), {})
+
+
+class property:
+    """
+    property that handles DBus stuff
+    """
+
+    def __init__(self, signature, dbus_interface, dbus_property=None,
+                 setter=False):
+        """
+        Initialize new dbus_property with the given interface name.
+
+        If dbus_property is not specified it is set to the name of the
+        decorated method. In special circumstances you may wish to specify
+        alternate dbus property name explicitly.
+
+        If setter is set to True then the implicit decorated function is a
+        setter, not the default getter. This allows to define write-only
+        properties.
+        """
+        self.__name__ = None
+        self.__doc__ = None
+        self._signature = signature
+        self._dbus_interface = dbus_interface
+        self._dbus_property = dbus_property
+        self._getf = None
+        self._setf = None
+        self._implicit_setter = setter
+
+    def __repr__(self):
+        return "<dbus.service.property {!r}>".format(self.__name__)
+
+    @_property
+    def dbus_access_flag(self):
+        """
+        access flag of this DBus property
+
+        :returns: either "readwrite", "read" or "write"
+        :raises TypeError: if the property is ill-defined
+        """
+        if self._getf and self._setf:
+            return "readwrite"
+        elif self._getf:
+            return "read"
+        elif self._setf:
+            return "write"
+        else:
+            raise TypeError(
+                "property provides neither readable nor writable")
+
+    @_property
+    def dbus_interface(self):
+        """
+        name of the DBus interface of this DBus property
+        """
+        return self._dbus_interface
+
+    @_property
+    def dbus_property(self):
+        """
+        name of this DBus property
+        """
+        return self._dbus_property
+
+    @_property
+    def signature(self):
+        """
+        signature of this DBus property
+        """
+        return self._signature
+
+    @_property
+    def setter(self):
+        """
+        decorator for setter functions
+
+        This property can be used to decorate additional method that
+        will be used as a property setter. Otherwise properties cannot
+        be assigned.
+        """
+        def decorator(func):
+            self._setf = func
+            return self
+        return decorator
+
+    @_property
+    def getter(self):
+        """
+        decorator for getter functions
+
+        This property can be used to decorate additional method that
+        will be used as a property getter. It is only provider for parity
+        as by default, the @dbus.service.property() decorator designates
+        a getter function. This behavior can be controlled by passing
+        setter=True to the property initializer.
+        """
+        def decorator(func):
+            self._getf = func
+            return self
+        return decorator
+
+    def __call__(self, func):
+        """
+        Decorate a getter function and return the property object
+
+        This method sets __name__, __doc__ and _dbus_property.
+        """
+        self.__name__ = func.__name__
+        if self.__doc__ is None:
+            self.__doc__ = func.__doc__
+        if self._dbus_property is None:
+            self._dbus_property = func.__name__
+        if self._implicit_setter:
+            return self.setter(func)
+        else:
+            return self.getter(func)
+
+    def __get__(self, instance, owner):
+        if instance is None:
+            return self
+        else:
+            if self._getf is None:
+                raise dbus.exceptions.DBusException(
+                    "property is not readable")
+            return self._getf(instance)
+
+    def __set__(self, instance, value):
+        if self._setf is None:
+            raise dbus.exceptions.DBusException(
+                "property is not writable")
+        self._setf(instance, value)
+
+    # This little helper is here is to help :meth:`Object.Introspect()`
+    # figure out how to handle properties.
+    _dbus_is_property = True
+
+
+class Object(Interface, dbus.service.Object):
+    """
+    dbus.service.Object subclass that providers additional features.
+
+    This class providers the following additional features:
+
+    * Implementation of the PROPERTIES_IFACE. This includes the methods
+      Get(), Set(), GetAll() and the signal PropertiesChanged()
+
+    * Implementation of the OBJECT_MANAGER_IFACE. This includes the method
+      GetManagedObjects() and signals InterfacesAdded() and
+      InterfacesRemoved().
+
+    * Tracking of object-path-to-object association using the new
+      :meth:`find_object_by_path()` method
+
+    * Selective activation of any of the above interfaces using
+      :meth:`should_expose_interface()` method.
+
+    * Improved version of the INTROSPECTABLE_IFACE that understands properties
+    """
+
+    def __init__(self, conn=None, object_path=None, bus_name=None):
+        dbus.service.Object.__init__(self, conn, object_path, bus_name)
+        self._managed_object_list = []
+
+    # [ Public DBus methods of the INTROSPECTABLE_IFACE interface ]
+
+    @method(
+        dbus_interface=INTROSPECTABLE_IFACE,
+        in_signature='', out_signature='s',
+        path_keyword='object_path', connection_keyword='connection')
+    def Introspect(self, object_path, connection):
+        """
+        Return a string of XML encoding this object's supported interfaces,
+        methods and signals.
+        """
+        logger.debug("Introspect(object_path=%r)", object_path)
+        reflection_data = (
+            _dbus_bindings.DBUS_INTROSPECT_1_0_XML_DOCTYPE_DECL_NODE)
+        reflection_data += '<node name="%s">\n' % object_path
+        interfaces = self._dct_entry
+        for (name, funcs) in interfaces.items():
+            # Allow classes to ignore certain interfaces This is useful because
+            # this class implements all kinds of methods internally (for
+            # simplicity) but does not really advertise them all directly
+            # unless asked to.
+            if not self.should_expose_interface(name):
+                continue
+            reflection_data += '  <interface name="%s">\n' % (name)
+            for func in funcs.values():
+                if getattr(func, '_dbus_is_method', False):
+                    reflection_data += self.__class__._reflect_on_method(func)
+                elif getattr(func, '_dbus_is_signal', False):
+                    reflection_data += self.__class__._reflect_on_signal(func)
+                elif getattr(func, '_dbus_is_property', False):
+                    reflection_data += (
+                        self.__class__._reflect_on_property(func))
+            reflection_data += '  </interface>\n'
+        for name in connection.list_exported_child_objects(object_path):
+            reflection_data += '  <node name="%s"/>\n' % name
+        reflection_data += '</node>\n'
+        logger.debug("Introspect() returns: %s", reflection_data)
+        return reflection_data
+
+    # [ Public DBus methods of the PROPERTIES_IFACE interface ]
+
+    @dbus.service.method(
+        dbus_interface=dbus.PROPERTIES_IFACE,
+        in_signature="ss", out_signature="v")
+    def Get(self, interface_name, property_name):
+        """
+        Get the value of a property @property_name on interface
+        @interface_name.
+        """
+        logger.debug(
+            "%r.Get(%r, %r) -> ...",
+            self, interface_name, property_name)
+        try:
+            props = self._dct_entry[interface_name]
+        except KeyError:
+            raise dbus.exceptions.DBusException(
+                dbus.PROPERTIES_IFACE,
+                "No such interface {}".format(interface_name))
+        try:
+            prop = props[property_name]
+        except KeyError:
+            raise dbus.exceptions.DBusException(
+                dbus.PROPERTIES_IFACE,
+                "No such property {}:{}".format(
+                    interface_name, property_name))
+        try:
+            value = prop.__get__(self, self.__class__)
+        except dbus.exceptions.DBusException:
+            raise
+        except Exception as exc:
+            logger.exception(
+                "runaway exception from Get(%r, %r)",
+                interface_name, property_name)
+            raise dbus.exceptions.DBusException(
+                dbus.PROPERTIES_IFACE,
+                "Unable to get property interface/property {}:{}: {!r}".format(
+                    interface_name, property_name, exc))
+        else:
+            logger.debug(
+                "%r.Get(%r, %r) -> %r",
+                self, interface_name, property_name, value)
+            return value
+
+    @dbus.service.method(
+        dbus_interface=dbus.PROPERTIES_IFACE,
+        in_signature="ssv", out_signature="")
+    def Set(self, interface_name, property_name, value):
+        logger.debug(
+            "%r.Set(%r, %r, %r) -> ...",
+            self, interface_name, property_name, value)
+        try:
+            props = self._dct_entry[interface_name]
+        except KeyError:
+            raise dbus.exceptions.DBusException(
+                dbus.PROPERTIES_IFACE,
+                "No such interface {}".format(interface_name))
+        try:
+            # Map the real property name
+            prop = {
+                prop.dbus_property: prop
+                for prop in props.values()
+                if isinstance(prop, property)
+            }[property_name]
+            if not isinstance(prop, property):
+                raise KeyError(property_name)
+        except KeyError:
+            raise dbus.exceptions.DBusException(
+                dbus.PROPERTIES_IFACE,
+                "No such property {}:{}".format(
+                    interface_name, property_name))
+        try:
+            prop.__set__(self, value)
+        except dbus.exceptions.DBusException:
+            raise
+        except Exception as exc:
+            logger.exception(
+                "runaway exception from %r.Set(%r, %r, %r)",
+                self, interface_name, property_name, value)
+            raise dbus.exceptions.DBusException(
+                dbus.PROPERTIES_IFACE,
+                "Unable to set property {}:{}: {!r}".format(
+                    interface_name, property_name, exc))
+        logger.debug(
+            "%r.Set(%r, %r, %r) -> None",
+            self, interface_name, property_name, value)
+
+    @dbus.service.method(
+        dbus_interface=dbus.PROPERTIES_IFACE,
+        in_signature="s", out_signature="a{sv}")
+    def GetAll(self, interface_name):
+        logger.debug("%r.GetAll(%r)", self, interface_name)
+        try:
+            props = self._dct_entry[interface_name]
+        except KeyError:
+            raise dbus.exceptions.DBusException(
+                dbus.PROPERTIES_IFACE,
+                "No such interface {}".format(interface_name))
+        result = {}
+        for prop in props.values():
+            if not isinstance(prop, property):
+                continue
+            prop_name = prop.dbus_property
+            try:
+                prop_value = prop.__get__(self, self.__class__)
+            except:
+                logger.exception(
+                    "Unable to read property %r from %r", prop, self)
+            else:
+                result[prop_name] = prop_value
+        return result
+
+    @dbus.service.signal(
+        dbus_interface=dbus.PROPERTIES_IFACE,
+        signature='sa{sv}as')
+    def PropertiesChanged(
+            self, interface_name, changed_properties, invalidated_properties):
+        logger.debug(
+            "PropertiesChanged(%r, %r, %r)",
+            interface_name, changed_properties, invalidated_properties)
+
+    # [ Public DBus methods of the OBJECT_MANAGER_IFACE interface ]
+
+    @dbus.service.method(
+        dbus_interface=OBJECT_MANAGER_IFACE,
+        in_signature="", out_signature="a{oa{sa{sv}}}")
+    def GetManagedObjects(self):
+        logger.debug("%r.GetManagedObjects() -> ...", self)
+        result = {}
+        for obj in self._managed_object_list:
+            logger.debug("Looking for stuff exported by %r", obj)
+            result[obj] = {}
+            for iface_name in obj._dct_entry.keys():
+                props = obj.GetAll(iface_name)
+                if len(props):
+                    result[obj][iface_name] = props
+        logger.debug("%r.GetManagedObjects() -> %r", self, result)
+        return result
+
+    @dbus.service.signal(
+        dbus_interface=OBJECT_MANAGER_IFACE,
+        signature='oa{sa{sv}}')
+    def InterfacesAdded(self, object_path, interfaces_and_properties):
+        logger.debug("%r.InterfacesAdded(%r, %r)",
+                     self, object_path, interfaces_and_properties)
+
+    @dbus.service.signal(
+        dbus_interface=OBJECT_MANAGER_IFACE, signature='oas')
+    def InterfacesRemoved(self, object_path, interfaces):
+        logger.debug("%r.InterfacesRemoved(%r, %r)",
+                     self, object_path, interfaces)
+
+    # [ Overridden methods of dbus.service.Object ]
+
+    def add_to_connection(self, connection, path):
+        """
+        Version of dbus.service.Object.add_to_connection() that keeps track of
+        all object paths.
+        """
+        with self._object_path_map_lock:
+            # Super-call add_to_connection(). This can fail which is
+            # okay as we haven't really modified anything yet.
+            super(Object, self).add_to_connection(connection, path)
+            # Touch self.connection, this will fail if the call above failed
+            # and self._connection (mind the leading underscore) is still None.
+            # It will also fail if the object is being exposed on multiple
+            # connections (so self._connection is _MANY). We are interested in
+            # the second check as _MANY connections are not supported here.
+            self.connection
+            # If everything is okay, just add the specified path to the
+            # _object_path_to_object_map.
+            self._object_path_to_object_map[path] = self
+
+    def remove_from_connection(self, connection=None, path=None):
+        with self._object_path_map_lock:
+            # Touch self.connection, this triggers a number of interesting
+            # checks, in particular checks for self._connection (mind the
+            # leading underscore) being _MANY or being None. Both of those
+            # throw an AttributeError that we can simply propagate at this
+            # point.
+            self.connection
+            # Create a copy of locations. This is required because locations
+            # are modified by remove_from_connection() which can also fail.  If
+            # we were to use self.locations here directly we would have to undo
+            # any changes if remove_from_connection() raises an exception.
+            # Instead it is easier to first super-call remove_from_connection()
+            # and then do what we need to at this layer, after
+            # remove_from_connection() finishes successfully.
+            locations_copy = list(self.locations)
+            # Super-call remove_from_connection()
+            super(Object, self).remove_from_connection(connection, path)
+            # If either path or connection are none then treat them like
+            # match-any wild-cards. The same logic is implemented in the
+            # superclass version of this method.
+            if path is None or connection is None:
+                # Location is a tuple of at least two elements, connection and
+                # path. There may be other elements added later so let's not
+                # assume this is a simple pair.
+                for location in locations_copy:
+                    location_conn = location[0]
+                    location_path = location[1]
+                    # If (connection matches or is None)
+                    # and (path matches or is None)
+                    # then remove that association
+                    if ((location_conn == connection or connection is None)
+                            and (path == location_path or path is None)):
+                        del self._object_path_to_object_map[location_path]
+            else:
+                # If connection and path were specified, just remove the
+                # association from the specified path.
+                del self._object_path_to_object_map[path]
+
+    # [ Custom Extension Methods ]
+
+    def should_expose_interface(self, iface_name):
+        """
+        Check if the specified interface should be exposed.
+
+        This method controls which of the interfaces are visible as implemented
+        by this Object. By default objects don't implement any interface expect
+        for PEER_IFACE. There are two more interfaces that are implemented
+        internally but need to be explicitly exposed: the PROPERTIES_IFACE and
+        OBJECT_MANAGER_IFACE.
+
+        Typically subclasses should NOT override this method, instead
+        subclasses should define class-scope HIDDEN_INTERFACES as a
+        frozenset() of classes to hide and remove one of the entries found in
+        _STD_INTERFACES from it to effectively enable that interface.
+        """
+        return iface_name not in self.HIDDEN_INTERFACES
+
+    @classmethod
+    def find_object_by_path(cls, object_path):
+        """
+        Find and return the object that is exposed as object_path on any
+        connection. Using multiple connections is not supported at this time.
+
+        .. note::
+            This obviously only works for objects exposed from the same
+            application. The main use case is to have a way to lookup object
+            paths that may be passed as arguments and also originate in the
+            same application.
+        """
+        # XXX: ideally this would be per-connection method.
+        with cls._object_path_map_lock:
+            return cls._object_path_to_object_map[object_path]
+
+    @_property
+    def managed_objects(self):
+        """
+        list of of managed objects.
+
+        This collection is a part of the OBJECT_MANAGER_IFACE. While it can be
+        manipulated directly (technically) it should only be manipulated using
+        :meth:`add_managed_object()`, :meth:`add_manage_object_list()`,
+        :meth:`remove_managed_object()` and
+        :meth:`remove_managed_object_list()` as they send appropriate DBus
+        signals.
+        """
+        return self._managed_object_list
+
+    def add_managed_object(self, obj):
+        self.add_managed_object_list([obj])
+
+    def remove_managed_object(self, obj):
+        self.remove_managed_object_list([obj])
+
+    def add_managed_object_list(self, obj_list):
+        logger.debug("Adding managed objects: %s", obj_list)
+        for obj in obj_list:
+            if not isinstance(obj, Object):
+                raise TypeError("obj must be of type {!r}".format(Object))
+        old = self._managed_object_list
+        new = list(old)
+        new.extend(obj_list)
+        self._managed_object_list = new
+        self._on_managed_objects_changed(old, new)
+
+    def remove_managed_object_list(self, obj_list):
+        logger.debug("Removing managed objects: %s", obj_list)
+        for obj in obj_list:
+            if not isinstance(obj, Object):
+                raise TypeError("obj must be of type {!r}".format(Object))
+        old = self._managed_object_list
+        new = list(old)
+        for obj in obj_list:
+            new.remove(obj)
+        self._managed_object_list = new
+        self._on_managed_objects_changed(old, new)
+
+    # [ Custom Private Implementation Data ]
+
+    _STD_INTERFACES = frozenset([
+        INTROSPECTABLE_IFACE,
+        OBJECT_MANAGER_IFACE,
+        # TODO: peer interface is not implemented in this class
+        # PEER_IFACE,
+        PROPERTIES_IFACE
+    ])
+
+    HIDDEN_INTERFACES = frozenset([
+        OBJECT_MANAGER_IFACE,
+        PROPERTIES_IFACE
+    ])
+
+    # Lock protecting access to _object_path_to_object_map.
+    # XXX: ideally this would be a per-connection attribute
+    _object_path_map_lock = threading.Lock()
+
+    # Map of object_path -> dbus.service.Object instances
+    # XXX: ideally this would be a per-connection attribute
+    _object_path_to_object_map = weakref.WeakValueDictionary()
+
+    # [ Custom Private Implementation Methods ]
+
+    @_property
+    def _dct_key(self):
+        """
+        the key indexing this Object in Object.__class__._dbus_class_table
+        """
+        return self.__class__.__module__ + '.' + self.__class__.__name__
+
+    @_property
+    def _dct_entry(self):
+        """
+        same as self.__class__._dbus_class_table[self._dct_key]
+        """
+        return self.__class__._dbus_class_table[self._dct_key]
+
+    @Signal.define
+    def _on_managed_objects_changed(self, old_objs, new_objs):
+        logger.debug("%r._on_managed_objects_changed(%r, %r)",
+                     self, old_objs, new_objs)
+        for obj in frozenset(new_objs) - frozenset(old_objs):
+            ifaces_and_props = {}
+            for iface_name in obj._dct_entry.keys():
+                try:
+                    props = obj.GetAll(iface_name)
+                except dbus.exceptions.DBusException as exc:
+                    logger.warning("Caught %r", exc)
+                else:
+                    if len(props):
+                        ifaces_and_props[iface_name] = props
+            self.InterfacesAdded(obj.__dbus_object_path__, ifaces_and_props)
+        for obj in frozenset(old_objs) - frozenset(new_objs):
+            ifaces = list(obj._dct_entry.keys())
+            self.InterfacesRemoved(obj.__dbus_object_path__, ifaces)
+
+
+class ObjectWrapper(Object):
+    """
+    Wrapper for a single python object which makes it easier to expose over
+    DBus as a service. The object should be injected into something that
+    extends dbus.service.Object class.
+
+    The class maintains an association between each wrapper and native object
+    and offers methods for converting between the two.
+    """
+
+    # Lock protecting access to _native_id_to_wrapper_map
+    _native_id_map_lock = threading.Lock()
+
+    # Man of id(wrapper.native) -> wrapper
+    _native_id_to_wrapper_map = weakref.WeakValueDictionary()
+
+    def __init__(self, native, conn=None, object_path=None, bus_name=None):
+        """
+        Create a new wrapper for the specified native object
+        """
+        super(ObjectWrapper, self).__init__(conn, object_path, bus_name)
+        with self._native_id_map_lock:
+            self._native_id_to_wrapper_map[id(native)] = self
+        self._native = native
+
+    @_property
+    def native(self):
+        """
+        native python object being wrapped by this wrapper
+        """
+        return self._native
+
+    @classmethod
+    def find_wrapper_by_native(cls, native):
+        """
+        Find the wrapper associated with the specified native object
+        """
+        with cls._native_id_map_lock:
+            return cls._native_id_to_wrapper_map[id(native)]

=== modified file 'plainbox/plainbox/impl/exporter/__init__.py'
--- plainbox/plainbox/impl/exporter/__init__.py	2013-06-22 06:06:21 +0000
+++ plainbox/plainbox/impl/exporter/__init__.py	2013-09-13 17:12:45 +0000
@@ -148,6 +148,9 @@
                 continue
             data['result_map'][job_name] = OrderedDict()
             data['result_map'][job_name]['outcome'] = job_state.result.outcome
+            if job_state.result.execution_duration:
+                data['result_map'][job_name]['execution_duration'] = \
+                    job_state.result.execution_duration
             if self.OPTION_WITH_COMMENTS in self._option_list:
                 data['result_map'][job_name]['comments'] = \
                     job_state.result.comments
@@ -155,12 +158,12 @@
             # Add Parent hash if requested
             if self.OPTION_WITH_JOB_VIA in self._option_list:
                 data['result_map'][job_name]['via'] = \
-                    job_state.result.job.via
+                    job_state.job.via
 
             # Add Job hash if requested
             if self.OPTION_WITH_JOB_HASH in self._option_list:
                 data['result_map'][job_name]['hash'] = \
-                    job_state.result.job.get_checksum()
+                    job_state.job.get_checksum()
 
             # Add Job definitions if requested
             if self.OPTION_WITH_JOB_DEFS in self._option_list:
@@ -170,17 +173,17 @@
                              'command',
                              'description',
                              ):
-                    if not getattr(job_state.result.job, prop):
+                    if not getattr(job_state.job, prop):
                         continue
                     data['result_map'][job_name][prop] = getattr(
-                        job_state.result.job, prop)
+                        job_state.job, prop)
 
             # Add Attachments if requested
-            if job_state.result.job.plugin == 'attachment':
+            if job_state.job.plugin == 'attachment':
                 if self.OPTION_WITH_ATTACHMENTS in self._option_list:
                     raw_bytes = b''.join(
                         (record[2] for record in
-                         job_state.result.io_log if record[1] == 'stdout'))
+                         job_state.result.get_io_log() if record[1] == 'stdout'))
                     data['attachment_map'][job_name] = \
                         base64.standard_b64encode(raw_bytes).decode('ASCII')
                 continue  # Don't add attachments IO logs to the result_map
@@ -191,12 +194,12 @@
                 # saved, discarding stream name and the relative timestamp.
                 if self.OPTION_SQUASH_IO_LOG in self._option_list:
                     io_log_data = self._squash_io_log(
-                        job_state.result.io_log)
+                        job_state.result.get_io_log())
                 elif self.OPTION_FLATTEN_IO_LOG in self._option_list:
                     io_log_data = self._flatten_io_log(
-                        job_state.result.io_log)
+                        job_state.result.get_io_log())
                 else:
-                    io_log_data = self._io_log(job_state.result.io_log)
+                    io_log_data = self._io_log(job_state.result.get_io_log())
                 data['result_map'][job_name]['io_log'] = io_log_data
         return data
 
@@ -288,8 +291,10 @@
     for entry_point in sorted(iterator, key=lambda ep: ep.name):
         try:
             exporter_cls = entry_point.load()
+        except pkg_resources.DistributionNotFound as exc:
+            logger.info("Unable to load %s: %s", entry_point, exc)
         except ImportError as exc:
-            logger.exception("Unable to import {}: {}", entry_point, exc)
+            logger.exception("Unable to import %s: %s", entry_point, exc)
         else:
             exporter_map[entry_point.name] = exporter_cls
     return exporter_map

=== added file 'plainbox/plainbox/impl/exporter/html.py'
--- plainbox/plainbox/impl/exporter/html.py	1970-01-01 00:00:00 +0000
+++ plainbox/plainbox/impl/exporter/html.py	2013-09-13 17:12:45 +0000
@@ -0,0 +1,145 @@
+# This file is part of Checkbox.
+#
+# Copyright 2013 Canonical Ltd.
+# Written by:
+#   Sylvain Pineau <sylvain.pineau@xxxxxxxxxxxxx>
+#   Zygmunt Krynicki <zygmunt.krynicki@xxxxxxxxxxxxx>
+#   Daniel Manrique <daniel.manrique@xxxxxxxxxxxxx>
+#
+# Checkbox is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Checkbox is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Checkbox.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+:mod:`plainbox.impl.exporter.html`
+==================================
+
+HTML exporter for human consumption
+
+.. warning::
+    THIS MODULE DOES NOT HAVE A STABLE PUBLIC API
+"""
+
+from string import Template
+import base64
+import logging
+import mimetypes
+
+from lxml import etree as ET
+from pkg_resources import resource_filename
+
+from plainbox.impl.exporter.xml import XMLSessionStateExporter
+
+
+logger = logging.getLogger("plainbox.exporter.html")
+
+
+class HTMLResourceInliner(object):
+    """ A helper class to inline resources referenced in an lxml tree.
+    """
+    def _resource_content(self, url):
+        try:
+            with open(url, 'rb') as f:
+                file_contents = f.read()
+        except (IOError, OSError):
+            logger.warning("Unable to load resource %s, not inlining",
+                           url)
+            return ""
+        type, encoding = mimetypes.guess_type(url)
+        if not encoding:
+            encoding = "utf-8"
+        if type in("text/css", "application/javascript"):
+            return file_contents.decode(encoding)
+        elif type in("image/png", "image/jpg"):
+            b64_data = base64.b64encode(file_contents)
+            b64_data = b64_data.decode("ascii")
+            return_string = "data:{};base64,{}".format(type, b64_data)
+            return return_string
+        else:
+            logger.warning("Resource of type %s unknown", type)
+            #Strip it out, better not to have it.
+            return ""
+
+    def inline_resources(self, document_tree):
+        """
+        Replace references to external resources by an in-place (inlined)
+        representation of each resource.
+
+        Currently images, stylesheets and scripts are inlined.
+
+        Only local (i.e. file) resources/locations are supported. If a
+        non-local resource is requested for inlining, it will be removed
+        (replaced by a blank string), with the goal that the resulting
+        lxml tree will not reference any unreachable resources.
+
+        :param document_tree:
+            lxml tree to process.
+
+        :returns:
+            lxml tree with some elements replaced by their inlined
+            representation.
+        """
+        # Try inlining using result_tree here.
+        for node in document_tree.xpath('//script'):
+            # These have  src attribute, need to remove the
+            # attribute and add the content of the src file
+            # as the node's text
+            src = node.attrib.pop('src')
+            node.text = self._resource_content(src)
+
+        for node in document_tree.xpath('//link[@rel="stylesheet"]'):
+            # These have a href attribute and need to be completely replaced
+            # by a new <style> node with contents of the href file
+            # as its text.
+            src = node.attrib.pop('href')
+            type = node.attrib.pop('type')
+            style_elem = ET.Element("style")
+            style_elem.attrib['type'] = type
+            style_elem.text = self._resource_content(src)
+            node.getparent().append(style_elem)
+            # Now zorch the existing node
+            node.getparent().remove(node)
+
+        for node in document_tree.xpath('//img'):
+            # src attribute points to a file and needs to
+            # contain the base64 encoded version of that file.
+            src = node.attrib.pop('src')
+            node.attrib['src'] = self._resource_content(src)
+        return document_tree
+
+
+class HTMLSessionStateExporter(XMLSessionStateExporter):
+    """
+    Session state exporter creating HTML documents.
+
+    It basically applies an xslt to the XMLSessionStateExporter output,
+    and then inlines some resources to produce a monolithic report in a
+    single file.
+    """
+
+    def dump(self, data, stream):
+        """
+        Public method to dump the HTML report to a stream
+        """
+        root = self.get_root_element(data)
+        self.xslt_filename = resource_filename(
+            "plainbox", "data/report/checkbox.xsl")
+        template_substitutions = {
+            'PLAINBOX_ASSETS': resource_filename("plainbox", "data/")}
+        with open(self.xslt_filename, encoding="UTF-8") as xslt_file:
+            xslt_template = Template(xslt_file.read())
+        xslt_data = xslt_template.substitute(template_substitutions)
+        xslt_root = ET.XML(xslt_data)
+        transformer = ET.XSLT(xslt_root)
+        r_tree = transformer(root)
+        inlined_result_tree = HTMLResourceInliner().inline_resources(r_tree)
+        stream.write(ET.tostring(inlined_result_tree, pretty_print=True))

=== added file 'plainbox/plainbox/impl/exporter/test_html.py'
--- plainbox/plainbox/impl/exporter/test_html.py	1970-01-01 00:00:00 +0000
+++ plainbox/plainbox/impl/exporter/test_html.py	2013-09-13 17:12:45 +0000
@@ -0,0 +1,142 @@
+# This file is part of Checkbox.
+#
+# Copyright 2013 Canonical Ltd.
+# Written by:
+#   Sylvain Pineau <sylvain.pineau@xxxxxxxxxxxxx>
+#   Zygmunt Krynicki <zygmunt.krynicki@xxxxxxxxxxxxx>
+#   Daniel Manrique <daniel.manrique@xxxxxxxxxxxxx>
+#
+# Checkbox is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Checkbox is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Checkbox.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+plainbox.impl.exporter.test_html
+================================
+
+Test definitions for plainbox.impl.exporter.html module
+"""
+from io import StringIO
+from string import Template
+from unittest import TestCase
+import io
+
+from lxml import etree as ET
+from pkg_resources import resource_filename
+from pkg_resources import resource_string
+
+from plainbox.testing_utils import resource_json
+from plainbox.impl.exporter.html import HTMLResourceInliner
+from plainbox.impl.exporter.html import HTMLSessionStateExporter
+
+
+class HTMLInlinerTests(TestCase):
+    def setUp(self):
+        template_substitutions = {
+            'PLAINBOX_ASSETS': resource_filename("plainbox", "data/")}
+        test_file_location = "test-data/html-exporter/html-inliner.html"
+        test_file = resource_filename("plainbox",
+                                      test_file_location)
+        with open(test_file) as html_file:
+            html_template = Template(html_file.read())
+        html_content = html_template.substitute(template_substitutions)
+        self.tree = ET.parse(StringIO(html_content), ET.HTMLParser())
+        # Now self.tree contains a tree with adequately-substituted
+        # paths and resources
+        inliner = HTMLResourceInliner()
+        self.inlined_tree = inliner.inline_resources(self.tree)
+
+    def test_script_inlining(self):
+        """Test that a <script> resource gets inlined."""
+        for node in self.inlined_tree.xpath('//script'):
+            self.assertTrue(node.text)
+
+    def test_img_inlining(self):
+        """
+        Test that a <img> gets inlined.
+        It should be replaced by a base64 representation of the
+        referenced image's data as per RFC2397.
+        """
+        for node in self.inlined_tree.xpath('//img'):
+            # Skip image that purposefully points to a remote
+            # resource
+            if node.attrib.get('class') != "remote_resource":
+                self.assertTrue("base64" in node.attrib['src'])
+
+    def test_css_inlining(self):
+        """Test that a <style> resource gets inlined."""
+        for node in self.inlined_tree.xpath('//style'):
+            # Skip a fake remote_resource node that's purposefully
+            # not inlined
+            if not 'nonexistent_resource' in node.attrib['type']:
+                self.assertTrue("body" in node.text)
+
+    def test_remote_resource_inlining(self):
+        """
+        Test that a resource with a non-local (i.e. not file://
+        url) does NOT get inlined (rather it's replaced by an
+        empty string). We use <style> in this test.
+        """
+        for node in self.inlined_tree.xpath('//style'):
+            # The not-inlined remote_resource
+            if 'nonexistent_resource' in node.attrib['type']:
+                self.assertTrue(node.text == "")
+
+    def test_unfindable_file_inlining(self):
+        """
+        Test that a resource whose file does not exist does NOT
+        get inlined, and is instead replaced by empty string.
+        We use <img> in this test.
+        """
+        for node in self.inlined_tree.xpath('//img'):
+            if node.attrib.get('class') == "remote_resource":
+                self.assertEqual("", node.attrib['src'])
+
+
+class HTMLExporterTests(TestCase):
+
+    def setUp(self):
+        data = resource_json(
+            "plainbox", "test-data/xml-exporter/example-data.json",
+            exact=True)
+        exporter = HTMLSessionStateExporter(
+            system_id="",
+            timestamp="2012-12-21T12:00:00",
+            client_version="1.0")
+        stream = io.BytesIO()
+        exporter.dump(data, stream)
+        self.actual_result = stream.getvalue()  # This is bytes
+        self.assertIsInstance(self.actual_result, bytes)
+
+    def test_html_output(self):
+        """
+        Test that output from the exporter is HTML (or at least,
+        appears to be).
+        """
+        # A pretty simplistic test since we just validate the output
+        # appears to be HTML. Looking at the exporter's code, it's mostly
+        # boilerplate use of lxml and etree, so let's not fall into testing
+        # an external library.
+        self.assertIn(b"<html>",
+                      self.actual_result)
+        self.assertIn(b"<title>System Testing Report</title>",
+                      self.actual_result)
+
+    def test_perfect_match(self):
+        """
+        Test that output from the exporter exactly matches known
+        good HTML output, inlining and everything included.
+        """
+        expected_result = resource_string(
+            "plainbox", "test-data/html-exporter/example-data.html"
+        )  # unintuitively, resource_string returns bytes
+        self.assertEqual(self.actual_result, expected_result)

=== modified file 'plainbox/plainbox/impl/exporter/test_init.py'
--- plainbox/plainbox/impl/exporter/test_init.py	2013-06-22 06:06:21 +0000
+++ plainbox/plainbox/impl/exporter/test_init.py	2013-09-13 17:12:45 +0000
@@ -29,13 +29,14 @@
 from tempfile import TemporaryDirectory
 from unittest import TestCase
 
+from plainbox.abc import IJobResult
 from plainbox.impl.exporter import ByteStringStreamTranslator
 from plainbox.impl.exporter import SessionStateExporterBase
 from plainbox.impl.exporter import classproperty
 from plainbox.impl.job import JobDefinition
-from plainbox.impl.result import JobResult, IOLogRecord
+from plainbox.impl.result import MemoryJobResult, IOLogRecord
 from plainbox.impl.session import SessionState
-from plainbox.impl.testing_utils import make_io_log, make_job, make_job_result
+from plainbox.impl.testing_utils import make_job, make_job_result
 
 
 class ClassPropertyTests(TestCase):
@@ -76,8 +77,8 @@
         job_b = make_job('job_b')
         session = SessionState([job_a, job_b])
         session.update_desired_job_list([job_a, job_b])
-        result_a = make_job_result(job_a, 'pass')
-        result_b = make_job_result(job_b, 'fail')
+        result_a = make_job_result(outcome=IJobResult.OUTCOME_PASS)
+        result_b = make_job_result(outcome=IJobResult.OUTCOME_FAIL)
         session.update_job_result(job_a, result_a)
         session.update_job_result(job_b, result_b)
         return session
@@ -115,22 +116,16 @@
         })
         session = SessionState([job_a, job_b])
         session.update_desired_job_list([job_a, job_b])
-        result_a = JobResult({
-            'job': job_a,
-            'outcome': 'pass',
+        result_a = MemoryJobResult({
+            'outcome': IJobResult.OUTCOME_PASS,
             'return_code': 0,
-            'io_log': make_io_log(
-                (IOLogRecord(0, 'stdout', b'testing\n'),),
-                session_dir)
+            'io_log': [(0, 'stdout', b'testing\n')],
         })
-        result_b = JobResult({
-            'job': job_b,
-            'outcome': 'pass',
+        result_b = MemoryJobResult({
+            'outcome': IJobResult.OUTCOME_PASS,
             'return_code': 0,
             'comments': 'foo',
-            'io_log': make_io_log(
-                (IOLogRecord(0, 'stdout', b'ready: yes\n'),),
-                session_dir)
+            'io_log': [(0, 'stdout', b'ready: yes\n')],
         })
         session.update_job_result(job_a, result_a)
         session.update_job_result(job_b, result_b)

=== added file 'plainbox/plainbox/impl/exporter/xlsx.py'
--- plainbox/plainbox/impl/exporter/xlsx.py	1970-01-01 00:00:00 +0000
+++ plainbox/plainbox/impl/exporter/xlsx.py	2013-09-13 17:12:45 +0000
@@ -0,0 +1,570 @@
+# This file is part of Checkbox.
+#
+# Copyright 2013 Canonical Ltd.
+# Written by:
+#   Sylvain Pineau <sylvain.pineau@xxxxxxxxxxxxx>
+#
+# Checkbox is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Checkbox is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Checkbox.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+:mod:`plainbox.impl.exporter.xlsx`
+=================================
+
+XLSX exporter
+
+.. warning::
+    THIS MODULE DOES NOT HAVE A STABLE PUBLIC API
+"""
+
+from base64 import standard_b64decode
+from collections import defaultdict, OrderedDict
+import re
+
+from plainbox.impl.exporter import SessionStateExporterBase
+from plainbox.abc import IJobResult
+from xlsxwriter.workbook import Workbook
+from xlsxwriter.utility import xl_rowcol_to_cell
+
+
+class XLSXSessionStateExporter(SessionStateExporterBase):
+    """
+    Session state exporter creating XLSX documents
+
+    The hardware devices are extracted from the content of the following
+    attachment:
+        * lspci_attachment
+
+    The following resource jobs are needed to populate the system info section
+    of this report:
+        * dmi
+        * device
+        * cpuinfo
+        * meminfo
+        * package
+    """
+
+    OPTION_WITH_SYSTEM_INFO = 'with-sys-info'
+    OPTION_WITH_SUMMARY = 'with-summary'
+    OPTION_WITH_DESCRIPTION = 'with-job-description'
+    OPTION_WITH_TEXT_ATTACHMENTS = 'with-text-attachments'
+
+    SUPPORTED_OPTION_LIST = (
+        OPTION_WITH_SYSTEM_INFO,
+        OPTION_WITH_SUMMARY,
+        OPTION_WITH_DESCRIPTION,
+        OPTION_WITH_TEXT_ATTACHMENTS,
+    )
+
+    def __init__(self, option_list=None):
+        """
+        Initialize a new XLSXSessionStateExporter.
+        """
+        # Super-call with empty option list
+        super(XLSXSessionStateExporter, self).__init__(())
+        # All the "options" are simply a required configuration element and are
+        # not optional in any way. There is no way to opt-out.
+        if option_list is None:
+            option_list = ()
+        for option in option_list:
+            if option not in self.supported_option_list:
+                raise ValueError("Unsupported option: {}".format(option))
+        self._option_list = (
+            SessionStateExporterBase.OPTION_WITH_IO_LOG,
+            SessionStateExporterBase.OPTION_FLATTEN_IO_LOG,
+            SessionStateExporterBase.OPTION_WITH_JOB_DEFS,
+            SessionStateExporterBase.OPTION_WITH_JOB_VIA,
+            SessionStateExporterBase.OPTION_WITH_JOB_HASH,
+            SessionStateExporterBase.OPTION_WITH_RESOURCE_MAP,
+            SessionStateExporterBase.OPTION_WITH_ATTACHMENTS)
+        self._option_list += tuple(option_list)
+        self.total_pass = 0
+        self.total_fail = 0
+        self.total_skip = 0
+        self.total = 0
+
+    def _set_formats(self):
+        # Main Title format (Orange)
+        self.format01 = self.workbook.add_format({
+            'align': 'left', 'size': 24, 'font_color': '#DC4C00',
+        })
+        # Default font
+        self.format02 = self.workbook.add_format({
+            'align': 'left', 'valign': 'vcenter', 'size': 10,
+        })
+        # Titles
+        self.format03 = self.workbook.add_format({
+            'align': 'left', 'size': 12, 'bold': 1,
+        })
+        # Titles + borders
+        self.format04 = self.workbook.add_format({
+            'align': 'left', 'size': 12, 'bold': 1, 'border': 7
+        })
+        # System info with borders
+        self.format05 = self.workbook.add_format({
+            'align': 'left', 'valign': 'vcenter', 'text_wrap': 1, 'size': 8,
+            'border': 7,
+        })
+        # System info with borders, grayed out background
+        self.format06 = self.workbook.add_format({
+            'align': 'left', 'valign': 'vcenter', 'text_wrap': 1, 'size': 8,
+            'border': 7, 'bg_color': '#E6E6E6',
+        })
+        # Headlines (center)
+        self.format07 = self.workbook.add_format({
+            'align': 'center', 'size': 10, 'bold': 1,
+        })
+        # Table rows without borders
+        self.format08 = self.workbook.add_format({
+            'align': 'left', 'valign': 'vcenter', 'text_wrap': 1, 'size': 8,
+        })
+        # Table rows without borders, grayed out background
+        self.format09 = self.workbook.add_format({
+            'align': 'left', 'valign': 'vcenter', 'text_wrap': 1, 'size': 8,
+            'bg_color': '#E6E6E6',
+        })
+        # Green background / Size 8
+        self.format10 = self.workbook.add_format({
+            'align': 'center', 'valign': 'vcenter', 'text_wrap': 1, 'size': 8,
+            'bg_color': 'lime', 'border': 7, 'border_color': 'white',
+        })
+        # Red background / Size 8
+        self.format11 = self.workbook.add_format({
+            'align': 'center', 'valign': 'vcenter', 'text_wrap': 1, 'size': 8,
+            'bg_color': 'red', 'border': 7, 'border_color': 'white',
+        })
+        # Gray background / Size 8
+        self.format12 = self.workbook.add_format({
+            'align': 'center', 'valign': 'vcenter', 'text_wrap': 1, 'size': 8,
+            'bg_color': 'gray', 'border': 7, 'border_color': 'white',
+        })
+        # Attachments
+        self.format13 = self.workbook.add_format({
+            'align': 'left', 'valign': 'vcenter', 'text_wrap': 1, 'size': 8,
+            'font': 'Courier New',
+        })
+        # Invisible man
+        self.format14 = self.workbook.add_format({'font_color': 'white'})
+        # Headlines (left-aligned)
+        self.format15 = self.workbook.add_format({
+            'align': 'left', 'size': 10, 'bold': 1,
+        })
+        # Table rows without borders, indent level 1
+        self.format16 = self.workbook.add_format({
+            'align': 'left', 'valign': 'vcenter', 'size': 8, 'indent': 1,
+        })
+        # Table rows without borders, grayed out background, indent level 1
+        self.format17 = self.workbook.add_format({
+            'align': 'left', 'valign': 'vcenter', 'size': 8,
+            'bg_color': '#E6E6E6', 'indent': 1,
+        })
+
+    def _hw_collection(self, data):
+        hw_info = defaultdict(lambda: 'NA')
+        if 'dmi' in data['resource_map']:
+            result = ['{} {} ({})'.format(i['vendor'], i['product'],
+                      i['version']) for i in data["resource_map"]['dmi']
+                      if i['category'] == 'SYSTEM']
+            if result:
+                hw_info['platform'] = result.pop()
+            result = ['{}'.format(i['version'])
+                      for i in data["resource_map"]['dmi']
+                      if i['category'] == 'BIOS']
+            if result:
+                hw_info['bios'] = result.pop()
+        if 'cpuinfo' in data['resource_map']:
+            result = ['{} x {}'.format(i['model'], i['count'])
+                      for i in data["resource_map"]['cpuinfo']]
+            if result:
+                hw_info['processors'] = result.pop()
+        if 'lspci_attachment' in data['attachment_map']:
+            lspci = data['attachment_map']['lspci_attachment']
+            content = standard_b64decode(lspci.encode()).decode("UTF-8")
+            match = re.search('ISA bridge.*?:\s(?P<chipset>.*?)\sLPC', content)
+            if match:
+                hw_info['chipset'] = match.group('chipset')
+            match = re.search(
+                'Audio device.*?:\s(?P<audio>.*?)\s\[\w+:\w+]', content)
+            if match:
+                hw_info['audio'] = match.group('audio')
+            match = re.search(
+                'Ethernet controller.*?:\s(?P<nic>.*?)\s\[\w+:\w+]', content)
+            if match:
+                hw_info['nic'] = match.group('nic')
+            match = re.search(
+                'Network controller.*?:\s(?P<wireless>.*?)\s\[\w+:\w+]',
+                content)
+            if match:
+                hw_info['wireless'] = match.group('wireless')
+            for i, match in enumerate(re.finditer(
+                'VGA compatible controller.*?:\s(?P<video>.*?)\s\[\w+:\w+]',
+                content), start=1
+            ):
+                hw_info['video{}'.format(i)] = match.group('video')
+            vram = 0
+            for match in re.finditer(
+                    'Memory.+ prefetchable\) \[size=(?P<vram>\d+)M\]',
+                    content):
+                vram += int(match.group('vram'))
+            if vram:
+                hw_info['vram'] = '{} MiB'.format(vram)
+        if 'meminfo' in data['resource_map']:
+            result = ['{} GiB'.format(format(int(i['total']) / 1073741824,
+                      '.1f')) for i in data["resource_map"]['meminfo']]
+            if result:
+                hw_info['memory'] = result.pop()
+        if 'device' in data['resource_map']:
+            result = ['{}'.format(i['product'])
+                      for i in data["resource_map"]['device']
+                      if ('category' in i and
+                      i['category'] == 'BLUETOOTH' and 'driver' in i)]
+            if result:
+                hw_info['bluetooth'] = result.pop()
+        return hw_info
+
+    def write_systeminfo(self, data):
+        self.worksheet1.set_column(0, 0, 4)
+        self.worksheet1.set_column(1, 1, 34)
+        self.worksheet1.set_column(2, 3, 58)
+        hw_info = self._hw_collection(data)
+        self.worksheet1.write(5, 1, 'Platform Name', self.format03)
+        self.worksheet1.write(5, 2, hw_info['platform'], self.format03)
+        self.worksheet1.write(7, 1, 'BIOS', self.format04)
+        self.worksheet1.write(7, 2, hw_info['bios'], self.format06)
+        self.worksheet1.write(8, 1, 'Processors', self.format04)
+        self.worksheet1.write(8, 2, hw_info['processors'], self.format05)
+        self.worksheet1.write(9, 1, 'Chipset', self.format04)
+        self.worksheet1.write(9, 2, hw_info['chipset'], self.format06)
+        self.worksheet1.write(10, 1, 'Memory', self.format04)
+        self.worksheet1.write(10, 2, hw_info['memory'], self.format05)
+        self.worksheet1.write(11, 1, 'Video (on board)', self.format04)
+        self.worksheet1.write(11, 2, hw_info['video1'], self.format06)
+        self.worksheet1.write(12, 1, 'Video (add-on)', self.format04)
+        self.worksheet1.write(12, 2, hw_info['video2'], self.format05)
+        self.worksheet1.write(13, 1, 'Video memory', self.format04)
+        self.worksheet1.write(13, 2, hw_info['vram'], self.format06)
+        self.worksheet1.write(14, 1, 'Audio', self.format04)
+        self.worksheet1.write(14, 2, hw_info['audio'], self.format05)
+        self.worksheet1.write(15, 1, 'NIC', self.format04)
+        self.worksheet1.write(15, 2, hw_info['nic'], self.format06)
+        self.worksheet1.write(16, 1, 'Wireless', self.format04)
+        self.worksheet1.write(16, 2, hw_info['wireless'], self.format05)
+        self.worksheet1.write(17, 1, 'Bluetooth', self.format04)
+        self.worksheet1.write(17, 2, hw_info['bluetooth'], self.format06)
+        if "package" in data["resource_map"]:
+            self.worksheet1.write(19, 1, 'Packages Installed', self.format03)
+            self.worksheet1.write_row(
+                21, 1, ['Name', 'Version'], self.format07
+            )
+            for i in range(20, 22):
+                self.worksheet1.set_row(
+                    i, None, None, {'level': 1, 'hidden': True}
+                )
+            for i, pkg in enumerate(data["resource_map"]["package"]):
+                self.worksheet1.write_row(
+                    22 + i, 1,
+                    [pkg['name'], pkg['version']],
+                    self.format08 if i % 2 else self.format09
+                )
+                self.worksheet1.set_row(
+                    22 + i, None, None, {'level': 1, 'hidden': True}
+                )
+            self.worksheet1.set_row(
+                22+len(data["resource_map"]["package"]),
+                None, None, {'collapsed': True}
+            )
+
+    def write_summary(self, data):
+        self.worksheet2.set_column(0, 0, 5)
+        self.worksheet2.set_column(1, 1, 2)
+        self.worksheet2.set_column(3, 3, 27)
+        self.worksheet2.write(3, 1, 'Failures summary', self.format03)
+        self.worksheet2.write(4, 1, '✔', self.format10)
+        self.worksheet2.write(
+            4, 2,
+            '{} Tests passed - Success Rate: {:.2f}% ({}/{})'.format(
+            self.total_pass, self.total_pass / self.total * 100,
+            self.total_pass, self.total), self.format02)
+        self.worksheet2.write(5, 1, '✘', self.format11)
+        self.worksheet2.write(
+            5, 2,
+            '{} Tests failed - Failure Rate: {:.2f}% ({}/{})'.format(
+            self.total_fail, self.total_fail / self.total * 100,
+            self.total_fail, self.total), self.format02)
+        self.worksheet2.write(6, 1, '-', self.format12)
+        self.worksheet2.write(
+            6, 2,
+            '{} Tests skipped - Skip Rate: {:.2f}% ({}/{})'.format(
+            self.total_skip, self.total_skip / self.total * 100,
+            self.total_skip, self.total), self.format02)
+        self.worksheet2.write_column(
+            'L3', ['Fail', 'Skip', 'Pass'], self.format14)
+        self.worksheet2.write_column(
+            'M3', [self.total_fail, self.total_skip, self.total_pass],
+            self.format14)
+        # Configure the series.
+        chart = self.workbook.add_chart({'type': 'pie'})
+        chart.set_legend({'position': 'none'})
+        chart.add_series({
+            'points': [
+                {'fill': {'color': 'red'}},
+                {'fill': {'color': 'gray'}},
+                {'fill': {'color': 'lime'}},
+            ],
+            'categories': '=Summary!$L$3:$L$5',
+            'values': '=Summary!$M$3:$M$5'}
+        )
+        # Insert the chart into the worksheet.
+        self.worksheet2.insert_chart('F4', chart, {
+            'x_offset': 0, 'y_offset': 10, 'x_scale': 0.25, 'y_scale': 0.25
+        })
+
+    def _set_category_status(self, result_map, via, child):
+        for parent in [j for j in result_map if result_map[j]['hash'] == via]:
+            if 'category_status' not in result_map[parent]:
+                result_map[parent]['category_status'] = None
+            child_status = result_map[child]['outcome']
+            if 'category_status' in result_map[child]:
+                child_status = result_map[child]['category_status']
+            if child_status == IJobResult.OUTCOME_FAIL:
+                result_map[parent]['category_status'] = IJobResult.OUTCOME_FAIL
+            elif (
+                child_status == IJobResult.OUTCOME_PASS and
+                result_map[parent]['category_status'] != IJobResult.OUTCOME_FAIL
+            ):
+                result_map[parent]['category_status'] = IJobResult.OUTCOME_PASS
+            elif (
+                result_map[parent]['category_status'] not in
+                (IJobResult.OUTCOME_PASS, IJobResult.OUTCOME_FAIL)
+            ):
+                result_map[parent]['category_status'] = IJobResult.OUTCOME_SKIP
+
+    def _tree(self, result_map, via=None, level=0, max_level=0):
+        res = {}
+        for job_name in [j for j in result_map if result_map[j]['via'] == via]:
+            if re.search(
+                    'resource|attachment',
+                    result_map[job_name]['plugin']):
+                continue
+            level += 1
+            # Find the maximum depth of the test tree
+            if level > max_level:
+                max_level = level
+            res[job_name], max_level = self._tree(
+                result_map, result_map[job_name]['hash'], level, max_level)
+            # Generate parent categories status
+            if via is not None:
+                self._set_category_status(result_map, via, job_name)
+            level -= 1
+        return res, max_level
+
+    def _write_job(self, tree, result_map, max_level, level=0):
+        for job, children in OrderedDict(
+                sorted(
+                    tree.items(),
+                    key=lambda t: 'z' + t[0] if t[1] else 'a' + t[0])).items():
+            self._lineno += 1
+            if children:
+                self.worksheet3.write(
+                    self._lineno, level + 1,
+                    result_map[job]['description'], self.format15)
+                if (
+                    result_map[job]['category_status'] ==
+                    IJobResult.OUTCOME_PASS
+                ):
+                    self.worksheet3.write(
+                        self._lineno, max_level + 2, 'PASS', self.format10)
+                elif (
+                    result_map[job]['category_status'] ==
+                    IJobResult.OUTCOME_FAIL
+                ):
+                    self.worksheet3.write(
+                        self._lineno, max_level + 2, 'FAIL', self.format11)
+                elif (
+                    result_map[job]['category_status'] ==
+                    IJobResult.OUTCOME_SKIP
+                ):
+                    self.worksheet3.write(
+                        self._lineno, max_level + 2, 'skip', self.format12)
+                if self.OPTION_WITH_DESCRIPTION in self._option_list:
+                    self.worksheet4.write(
+                        self._lineno, level + 1,
+                        result_map[job]['description'], self.format15)
+                if level:
+                    self.worksheet3.set_row(
+                        self._lineno, 13, None, {'level': level})
+                    if self.OPTION_WITH_DESCRIPTION in self._option_list:
+                        self.worksheet4.set_row(
+                            self._lineno, 13, None, {'level': level})
+                else:
+                    self.worksheet3.set_row(self._lineno, 13)
+                    if self.OPTION_WITH_DESCRIPTION in self._option_list:
+                        self.worksheet4.set_row(self._lineno, 13)
+                self._write_job(children, result_map, max_level, level + 1)
+            else:
+                self.worksheet3.write(
+                    self._lineno, max_level + 1, job,
+                    self.format08 if self._lineno % 2 else self.format09)
+                if self.OPTION_WITH_DESCRIPTION in self._option_list:
+                    link_cell = xl_rowcol_to_cell(self._lineno, max_level + 1)
+                    self.worksheet3.write_url(
+                        self._lineno, max_level + 1,
+                        'internal:Test Descriptions!' + link_cell,
+                        self.format08 if self._lineno % 2 else self.format09,
+                        job)
+                    self.worksheet4.write(
+                        self._lineno, max_level + 1, job,
+                        self.format08 if self._lineno % 2 else self.format09)
+                self.total += 1
+                if result_map[job]['outcome'] == IJobResult.OUTCOME_PASS:
+                    self.worksheet3.write(
+                        self._lineno, max_level, '✔', self.format10)
+                    self.worksheet3.write(
+                        self._lineno, max_level + 2, 'PASS', self.format10)
+                    self.total_pass += 1
+                elif result_map[job]['outcome'] == IJobResult.OUTCOME_FAIL:
+                    self.worksheet3.write(
+                        self._lineno, max_level, '✘', self.format11)
+                    self.worksheet3.write(
+                        self._lineno, max_level + 2, 'FAIL', self.format11)
+                    self.total_fail += 1
+                elif result_map[job]['outcome'] == IJobResult.OUTCOME_SKIP:
+                    self.worksheet3.write(
+                        self._lineno, max_level, '-', self.format12)
+                    self.worksheet3.write(
+                        self._lineno, max_level + 2, 'skip', self.format12)
+                    self.total_skip += 1
+                elif result_map[job]['outcome'] == \
+                        IJobResult.OUTCOME_NOT_SUPPORTED:
+                    self.worksheet3.write(
+                        self._lineno, max_level, '-', self.format12)
+                    self.worksheet3.write(
+                        self._lineno, max_level + 2,
+                        'not supported', self.format12)
+                    self.total_skip += 1
+                else:
+                    self.worksheet3.write(
+                        self._lineno, max_level, '-',    self.format12)
+                    self.worksheet3.write(
+                        self._lineno, max_level + 2, None, self.format12)
+                    self.total_skip += 1
+                io_log = ' '
+                if result_map[job]['io_log']:
+                    io_log = standard_b64decode(
+                        result_map[job]['io_log'].encode()).decode(
+                            'UTF-8').rstrip()
+                io_lines = len(io_log.splitlines()) - 1
+                desc_lines = len(result_map[job]['description'].splitlines())
+                desc_lines -= 1
+                self.worksheet3.write(
+                    self._lineno, max_level + 3, io_log,
+                    self.format16 if self._lineno % 2 else self.format17)
+                if self.OPTION_WITH_DESCRIPTION in self._option_list:
+                    self.worksheet4.write(
+                        self._lineno, max_level + 2,
+                        result_map[job]['description'],
+                        self.format16 if self._lineno % 2 else self.format17)
+                if level:
+                    self.worksheet3.set_row(
+                        self._lineno, 12 + 9.71 * io_lines,
+                        None, {'level': level})
+                    if self.OPTION_WITH_DESCRIPTION in self._option_list:
+                        self.worksheet4.set_row(
+                            self._lineno, 12 + 9.71 * desc_lines,
+                            None, {'level': level})
+                else:
+                    self.worksheet3.set_row(self._lineno, 12 + 9.71 * io_lines)
+                    if self.OPTION_WITH_DESCRIPTION in self._option_list:
+                        self.worksheet4.set_row(
+                            self._lineno, 12 + 9.71 * desc_lines)
+
+    def write_results(self, data):
+        tree, max_level = self._tree(data['result_map'])
+        self.worksheet3.write(3, 1, 'Tests Performed', self.format03)
+        self.worksheet3.freeze_panes(6, 0)
+        self.worksheet3.set_tab_color('#DC4C00')  # Orange
+        self.worksheet3.set_column(0, 0, 5)
+        [self.worksheet3.set_column(i, i, 2) for i in range(1, max_level + 1)]
+        self.worksheet3.set_column(max_level + 1, max_level + 0, 48)
+        self.worksheet3.set_column(max_level + 2, max_level + 1, 12)
+        self.worksheet3.set_column(max_level + 3, max_level + 2, 65)
+        self.worksheet3.write_row(
+            5, max_level + 1, ['Name', 'Result', 'I/O Log'], self.format07
+        )
+        if self.OPTION_WITH_DESCRIPTION in self._option_list:
+            self.worksheet4.write(3, 1, 'Tests Descriptions', self.format03)
+            self.worksheet4.freeze_panes(6, 0)
+            self.worksheet4.set_column(0, 0, 5)
+            [self.worksheet4.set_column(i, i, 2)
+                for i in range(1, max_level + 1)]
+            self.worksheet4.set_column(max_level + 1, max_level + 0, 48)
+            self.worksheet4.set_column(max_level + 2, max_level + 1, 65)
+            self.worksheet4.write_row(
+                5, max_level + 1, ['Name', 'Description'], self.format07
+            )
+        self._lineno = 5
+        self._write_job(tree, data['result_map'], max_level)
+        self.worksheet3.autofilter(5, max_level, self._lineno, max_level + 3)
+
+    def write_attachments(self, data):
+        self.worksheet5.set_column(0, 0, 5)
+        self.worksheet5.set_column(1, 1, 120)
+        i = 4
+        for name in data['attachment_map']:
+            try:
+                content = standard_b64decode(
+                    data['attachment_map'][name].encode()).decode('UTF-8')
+            except UnicodeDecodeError:
+                # Skip binary attachments
+                continue
+            self.worksheet5.write(i, 1, name, self.format03)
+            i += 1
+            self.worksheet5.set_row(
+                i, None, None, {'level': 1, 'hidden': True}
+            )
+            j = 1
+            for line in content.splitlines():
+                self.worksheet5.write(j + i, 1, line, self.format13)
+                self.worksheet5.set_row(
+                    j + i, None, None, {'level': 1, 'hidden': True}
+                )
+                j += 1
+            self.worksheet5.set_row(i + j, None, None, {'collapsed': True})
+            i += j + 1  # Insert a newline between attachments
+
+    def dump(self, data, stream):
+        """
+        Public method to dump the XLSX report to a stream
+        """
+        self.workbook = Workbook(stream)
+        self._set_formats()
+        if self.OPTION_WITH_SYSTEM_INFO in self._option_list:
+            self.worksheet1 = self.workbook.add_worksheet('System Info')
+            self.write_systeminfo(data)
+        self.worksheet3 = self.workbook.add_worksheet('Test Results')
+        if self.OPTION_WITH_DESCRIPTION in self._option_list:
+            self.worksheet4 = self.workbook.add_worksheet('Test Descriptions')
+        self.write_results(data)
+        if self.OPTION_WITH_SUMMARY in self._option_list:
+            self.worksheet2 = self.workbook.add_worksheet('Summary')
+            self.write_summary(data)
+        if self.OPTION_WITH_TEXT_ATTACHMENTS in self._option_list:
+            self.worksheet5 = self.workbook.add_worksheet('Log Files')
+            self.write_attachments(data)
+        for worksheet in self.workbook.worksheets():
+            worksheet.outline_settings(True, False, False, True)
+            worksheet.hide_gridlines(2)
+            worksheet.fit_to_pages(1, 0)
+            worksheet.write(1, 1, 'System Testing Report', self.format01)
+            worksheet.set_row(1, 30)
+        self.workbook.close()

=== modified file 'plainbox/plainbox/impl/exporter/xml.py'
--- plainbox/plainbox/impl/exporter/xml.py	2013-06-22 06:06:21 +0000
+++ plainbox/plainbox/impl/exporter/xml.py	2013-09-13 17:12:45 +0000
@@ -38,8 +38,8 @@
 from pkg_resources import resource_filename
 
 from plainbox import __version__ as version
+from plainbox.abc import IJobResult
 from plainbox.impl.exporter import SessionStateExporterBase
-from plainbox.impl.result import JobResult
 
 
 logger = logging.getLogger("plainbox.exporter.xml")
@@ -98,24 +98,26 @@
 
     SUPPORTED_OPTION_LIST = ()
 
-    # These are the job statuses allowed by the checkbox parser. 
+    # These are the job statuses allowed by the checkbox parser.
     # This is a limitation of the certification website, so we
     # have to accomodate that here.
-    _ALLOWED_STATUS = ["none",
-                       JobResult.OUTCOME_PASS,
-                       JobResult.OUTCOME_FAIL,
-                       JobResult.OUTCOME_SKIP]
+    _ALLOWED_STATUS = [
+        "none",
+        IJobResult.OUTCOME_PASS,
+        IJobResult.OUTCOME_FAIL,
+        IJobResult.OUTCOME_SKIP]
 
     # This describes mappings from all possible plainbox job statuses
     # to one of the allowed statuses listed above.
-    _STATUS_MAP = {"none": "none",
-            JobResult.OUTCOME_PASS: JobResult.OUTCOME_PASS,
-            JobResult.OUTCOME_FAIL: JobResult.OUTCOME_FAIL,
-            JobResult.OUTCOME_SKIP: JobResult.OUTCOME_SKIP,
-            JobResult.OUTCOME_NOT_SUPPORTED: JobResult.OUTCOME_SKIP}
+    _STATUS_MAP = {
+        "none": "none",
+        IJobResult.OUTCOME_PASS: IJobResult.OUTCOME_PASS,
+        IJobResult.OUTCOME_FAIL: IJobResult.OUTCOME_FAIL,
+        IJobResult.OUTCOME_SKIP: IJobResult.OUTCOME_SKIP,
+        IJobResult.OUTCOME_NOT_SUPPORTED: IJobResult.OUTCOME_SKIP}
 
-    def __init__(self, system_id=None, timestamp=None, client_version=None,
-                 client_name='plainbox'):
+    def __init__(self, option_list=None, system_id=None, timestamp=None,
+                 client_version=None, client_name='plainbox'):
         """
         Initialize a new XMLSessionStateExporter with given arguments.
 

=== added file 'plainbox/plainbox/impl/highlevel.py'
--- plainbox/plainbox/impl/highlevel.py	1970-01-01 00:00:00 +0000
+++ plainbox/plainbox/impl/highlevel.py	2013-09-13 17:12:45 +0000
@@ -0,0 +1,117 @@
+# This file is part of Checkbox.
+#
+# Copyright 2013 Canonical Ltd.
+# Written by:
+#   Zygmunt Krynicki <zygmunt.krynicki@xxxxxxxxxxxxx>
+#
+# Checkbox is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Checkbox is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Checkbox.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+:mod:`plainbox.impl.highlevel` -- High-level API
+================================================
+"""
+
+import logging
+from threading import Thread
+from io import BytesIO
+
+from plainbox import __version__ as plainbox_version
+from plainbox.abc import IJobResult
+from plainbox.impl.exporter import get_all_exporters
+from plainbox.impl.result import MemoryJobResult
+from plainbox.impl.runner import JobRunner
+from plainbox.impl.session.legacy import SessionStateLegacyAPI as SessionState
+
+
+logger = logging.getLogger("plainbox.highlevel")
+
+
+class Service:
+
+    def __init__(self, provider_list, session_list):
+        # TODO: session_list will be changed to session_manager_list
+        self._provider_list = provider_list
+        self._session_list = session_list
+
+    @property
+    def version(self):
+        return "{}.{}.{}".format(*plainbox_version[:3])
+
+    @property
+    def provider_list(self):
+        return self._provider_list
+
+    @property
+    def session_list(self):
+        return self._session_list
+
+    def create_session(self, job_list):
+        # TODO: allocate storage
+        # TODO: construct state
+        # TODO: construct manager, binding storage and state
+        # TODO: if something fails destroy storage
+        session = SessionState(job_list)
+        session.open()
+        return session
+
+    def get_all_exporters(self):
+        return {name: exporter_cls.supported_option_list for
+                name, exporter_cls in get_all_exporters().items()}
+
+    def export_session(self, session, output_format, option_list):
+        temp_stream = BytesIO()
+        self._export_session_to_stream(session, output_format,
+                                       option_list, temp_stream)
+        return temp_stream.getvalue()
+
+    def export_session_to_file(self, session, output_format, option_list, output_file):
+        with open(output_file, 'wb') as f:
+            self._export_session_to_stream(session, output_format,
+                                       option_list, f)
+        return output_file
+
+    def _export_session_to_stream(self, session, output_format, option_list, stream):
+        exporter_cls = get_all_exporters()[output_format]
+        exporter = exporter_cls(option_list)
+        data_subset = exporter.get_session_data_subset(session)
+        exporter.dump(data_subset, stream)
+
+    def _run(self, session, job, running_job_wrapper):
+        """
+        Start a JobRunner in a separate thread
+        """
+        runner = JobRunner(
+            session.session_dir,
+            session.jobs_io_log_dir,
+            command_io_delegate=running_job_wrapper.ui_io_delegate,
+            interaction_callback=running_job_wrapper.emitAskForOutcomeSignal
+        )
+        job_state = session.job_state_map[job.name]
+        if job_state.can_start():
+            job_result = runner.run_job(job)
+        else:
+            job_result = MemoryJobResult({
+                'outcome': IJobResult.OUTCOME_NOT_SUPPORTED,
+                'comments': job_state.get_readiness_description()
+            })
+        if job_result is not None:
+            running_job_wrapper.update_job_result_callback(job, job_result)
+
+    # FIXME: broken layering, running_job_wrapper is from the dbus layer
+    def run_job(self, session, job, running_job_wrapper):
+        runner = Thread(target=self._run,
+                        args=(session, job, running_job_wrapper))
+        runner.start()
+        # FIXME: we need to keep track of this thread
+        return job

=== modified file 'plainbox/plainbox/impl/integration_tests.py'
--- plainbox/plainbox/impl/integration_tests.py	2013-04-04 17:12:13 +0000
+++ plainbox/plainbox/impl/integration_tests.py	2013-09-13 17:12:45 +0000
@@ -38,11 +38,25 @@
 from plainbox.testing_utils.cwd import TestCwd
 from plainbox.testing_utils.io import TestIO
 from plainbox.testing_utils.testcases import TestCaseWithParameters
+from plainbox.testing_utils.resource import ResourceCache
 
 
 class IntegrationTests(TestCaseWithParameters):
-
-    parameter_names = ('job_name',)
+    """
+    Test cases for checking execution and outcome of checkbox jobs.
+    Each test case is parametrized by the job name and execution "profile".
+
+    The profile is simply a string that somehow characterizes where this test
+    is applicable.
+    """
+
+    # XXX: we cannot use weak resource cache here because test parameters
+    # iterate over methods first and then over actual scenarios so our cache
+    # would constantly loose data. This might be fixable with a different
+    # implementation of test parameters but that's not a low hanging fruit.
+    cache = ResourceCache(weak=False)
+
+    parameter_names = ('scenario_pathname',)
 
     def setUp(self):
         # session data are kept in XDG_CACHE_HOME/plainbox/.session
@@ -51,24 +65,61 @@
         self._sandbox = tempfile.mkdtemp()
         self._env = os.environ
         os.environ['XDG_CACHE_HOME'] = self._sandbox
+        # Load the expected results and keep them in memory
+        self.scenario_data = self.cache.get(
+            key=('scenario_data', self.parameters.scenario_pathname),
+            operation=lambda: load_scenario_data(
+                self.parameters.scenario_pathname))
+        # Skip tests that are not applicable for the current system
+        self.skip_if_incompatible()
+        # Execute the job and remember the results.
+        (self.job_name, self.job_outcome, self.job_execution_duration,
+         self.job_return_code, self.job_stdout,
+         self.job_stderr) = self.cache.get(
+             key=('job-run-artifacts', self.parameters.scenario_pathname),
+             operation=lambda: execute_job(self.scenario_data['job_name']))
+
+    def test_job_outcome(self):
+        # Check that results match expected values
+        self.assertEqual(self.job_outcome,
+                         self.scenario_data['result']['result_map'] \
+                                           [self.job_name]['outcome'])
+
+    def test_job_return_code(self):
+        # Check the return code for correctness
+        self.assertEqual(self.job_return_code,
+                         self.scenario_data.get("return_code", 0))
+
+    def skip_if_incompatible(self):
+        """
+        Skip a job if it is incompatible with the current environment
+        """
+        if self.scenario_data.get('profile') != 'default':
+            self.skipTest("not applicable for current profile")
 
     @classmethod
-    def _gen_job_name_values(cls, package='plainbox',
-                             root='test-data/integration-tests/'):
+    def _discover_test_scenarios(cls, package='plainbox',
+                                 dirname="/test-data/integration-tests/",
+                                 extension=".json"):
         """
-        Discover job names for jobs that we have reference data for
+        Discover test scenarios.
+
+        Generates special absolute pathnames to scenario files. All those paths
+        are really relative to the plainbox package. Those pathnames are
+        suitable for pkg_resources.resource_ functions.
 
         All reference data should be dropped to
         ``plainbox/test-data/integration-tests/`` as a json file
         """
-        for name in resource_listdir(package, root):
-            resource_name = os.path.join(root, name)
-            if resource_isdir(package, resource_name):
-                for item in cls._gen_job_name_values(package, resource_name):
+        for name in resource_listdir(package, dirname):
+            resource_pathname = os.path.join(dirname, name)
+            if resource_isdir(package, resource_pathname):
+                for item in cls._discover_test_scenarios(package,
+                                                         resource_pathname,
+                                                         extension):
                     yield item
-            elif resource_name.endswith('.json'):
-                yield resource_name[
-                    len('test-data/integration-tests/'):-len('.json')]
+            elif resource_pathname.endswith(extension):
+                yield resource_pathname
 
     @classmethod
     def get_parameter_values(cls):
@@ -77,39 +128,61 @@
 
         Creates subsequent tuples for each job that has reference data
         """
-        for job_name in cls._gen_job_name_values():
-            yield (job_name,)
-
-    def test_job_result(self):
-        # Create a scratch directory so that we can save results there. The
-        # shared directory is also used for running tests as some test jobs
-        # leave junk around the current directory.
-        with TemporaryDirectory() as scratch_dir:
-            # Save results to results.json in the scratch directory
-            actual_results_path = os.path.join(scratch_dir, 'results.json')
-            # Redirect all standard IO so that the test is silent.
-            # Run the script, having relocated to the scratch directory
-            # Capture SystemExit that is always raised by main() so that
-            # we can observe the return code as well.
-            with TestIO(), TestCwd(scratch_dir),\
-                    self.assertRaises(SystemExit) as call:
-                main(['run', '-i', self.parameters.job_name,
-                      '--output-format=json', '-o', actual_results_path])
-            # Check the return code for correctness
-            self.assertEqual(call.exception.args, (0,))
-            # Load the actual results and keep them in memory
-            with open(actual_results_path, encoding='UTF-8') as stream:
-                actual_result = json.load(stream)
-        # [ At this time TestIO and TemporaryDirectory are gone ]
-        # Load the expected results and keep them in memory
-        reference_path = resource_filename(
-            "plainbox", "test-data/integration-tests/{}.json".format(
-                self.parameters.job_name))
-        with open(reference_path, encoding='UTF-8') as stream:
-            expected_result = json.load(stream)
-        # Check that results match expected values
-        self.assertEqual(actual_result, expected_result)
+        for scenario_pathname in cls._discover_test_scenarios():
+            yield (scenario_pathname,)
 
     def tearDown(self):
         shutil.rmtree(self._sandbox)
         os.environ = self._env
+
+
+def load_scenario_data(scenario_pathname):
+    """
+    Load and return scenario data.
+
+    Data is loaded from a .json file located in the plainbox package
+    directory. Individual files are named after the jobs they describe.
+    """
+    pathname = resource_filename("plainbox", scenario_pathname)
+    with open(pathname, encoding='UTF-8') as stream:
+        return json.load(stream)
+
+
+def execute_job(job_name):
+    """
+    Execute the specified job.
+
+    The job is invoked using a high-level interface from box so the test will
+    actually execute the same way as the UI would execute it. It will
+    create/tear-down appropriate session objects as well.
+
+    Returns (result, return_code) where result is the deserialized JSON saved
+    at the end of the job.
+    """
+    # Create a scratch directory so that we can save results there. The
+    # shared directory is also used for running tests as some test jobs
+    # leave junk around the current directory.
+    with TemporaryDirectory() as scratch_dir:
+        # Save results to results.json in the scratch directory
+        pathname = os.path.join(scratch_dir, 'results.json')
+        # Redirect all standard IO so that the test is silent.
+        # Run the script, having relocated to the scratch directory
+        with TestIO() as io, TestCwd(scratch_dir):
+            try:
+                main(['run', '-i', job_name,
+                      '--output-format=json', '-o', pathname])
+            except SystemExit as exc:
+                # Capture SystemExit that is always raised by main() so that we
+                # can observe the return code as well.
+                job_return_code = exc.args[0]
+            else:
+                job_return_code = None
+        # Load the actual results and keep them in memory
+        with open(pathname, encoding='UTF-8') as stream:
+            job_result = json.load(stream)
+            job_outcome = job_result['result_map'][job_name]['outcome']
+            job_execution_duration = job_result['result_map'][job_name] \
+                                               ['execution_duration']
+    # [ At this time TestIO and TemporaryDirectory are gone ]
+    return (job_name, job_outcome, job_execution_duration,
+           job_return_code, io.stdout, io.stderr)

=== modified file 'plainbox/plainbox/impl/job.py'
--- plainbox/plainbox/impl/job.py	2013-06-22 06:06:21 +0000
+++ plainbox/plainbox/impl/job.py	2013-09-13 17:12:45 +0000
@@ -47,30 +47,58 @@
     definition
     """
 
+    def get_record_value(self, name, default=None):
+        """
+        Obtain the value of the specified record attribute
+        """
+        value = super(JobDefinition, self).get_record_value("_{}".format(name))
+        if value is None:
+            value = super(JobDefinition, self).get_record_value(name, default)
+        return value
+
     @property
     def name(self):
-        return self.__getattr__('name')
+        return self.get_record_value('name')
 
     @property
     def requires(self):
-        try:
-            return self.__getattr__('requires')
-        except AttributeError:
-            return None
+        return self.get_record_value('requires')
 
     @property
     def description(self):
-        try:
-            return self.__getattr__('description')
-        except AttributeError:
-            return None
+        return self.get_record_value('description')
 
     @property
     def depends(self):
+        return self.get_record_value('depends')
+
+    @property
+    def estimated_duration(self):
+        """
+        estimated duration of this job in seconds.
+
+        The value may be None, which indicates that the duration is basically
+        unknown. Fractional numbers are allowed and indicate fractions of a
+        second.
+        """
+        value = self.get_record_value('estimated_duration')
+        if value is None:
+            return
         try:
-            return self.__getattr__('depends')
-        except AttributeError:
-            return None
+            return float(value)
+        except ValueError:
+            logger.warning((
+                "Incorrect value of 'estimated_duration' in job"
+                "%s read from %s"), self.name, self.origin)
+
+    @property
+    def automated(self):
+        """
+        Whether the job is fully automated and runs without any
+        intervention from the user
+        """
+        return self.plugin in ['shell', 'resource',
+                               'attachment', 'local']
 
     @property
     def via(self):
@@ -89,11 +117,11 @@
         """
         return self._origin
 
-    def __init__(self, data, origin=None, checkbox=None, via=None):
+    def __init__(self, data, origin=None, provider=None, via=None):
         super(JobDefinition, self).__init__(data)
         self._resource_program = None
         self._origin = origin
-        self._checkbox = checkbox
+        self._provider = provider
         self._via = via
 
     def __str__(self):
@@ -103,25 +131,6 @@
         return "<JobDefinition name:{!r} plugin:{!r}>".format(
             self.name, self.plugin)
 
-    def __getattr__(self, attr):
-        if attr in self._data:
-            return self._data[attr]
-        gettext_attr = "_{}".format(attr)
-        if gettext_attr in self._data:
-            value = self._data[gettext_attr]
-            # TODO: feed through gettext
-            return value
-        raise AttributeError(attr)
-
-    def _get_persistance_subset(self):
-        state = {}
-        state['data'] = {}
-        for key, value in self._data.items():
-            state['data'][key] = value
-        if self.via is not None:
-            state['via'] = self.via
-        return state
-
     def __eq__(self, other):
         if not isinstance(other, JobDefinition):
             return False
@@ -178,13 +187,12 @@
         Only the 'name' and 'plugin' keys are required.
         All other data is stored as is and is entirely optional.
         """
-        for key in ['plugin', 'name']:
-            if key not in record.data:
-                raise ValueError(
-                    "Required record key {!r} was not found".format(key))
+        if 'name' not in record.data:
+            raise ValueError("Cannot create job without a name")
         return cls(record.data, record.origin)
 
-    def modify_execution_environment(self, env, session_dir, config=None):
+    def modify_execution_environment(self, env, session_dir,
+                                     checkbox_data_dir, config=None):
         """
         Alter execution environment as required to execute this job.
 
@@ -211,20 +219,20 @@
         """
         # XXX: this obviously requires a checkbox object to know where stuff is
         # but during the transition we may not have one available.
-        assert self._checkbox is not None
+        assert self._provider is not None
         # Use PATH that can lookup checkbox scripts
-        if self._checkbox.extra_PYTHONPATH:
+        if self._provider.extra_PYTHONPATH:
             env['PYTHONPATH'] = os.pathsep.join(
-                [self._checkbox.extra_PYTHONPATH]
+                [self._provider.extra_PYTHONPATH]
                 + env.get("PYTHONPATH", "").split(os.pathsep))
         # Update PATH so that scripts can be found
         env['PATH'] = os.pathsep.join(
-            [self._checkbox.extra_PATH]
+            [self._provider.extra_PATH]
             + env.get("PATH", "").split(os.pathsep))
         # Add CHECKBOX_SHARE that is needed by one script
-        env['CHECKBOX_SHARE'] = self._checkbox.CHECKBOX_SHARE
+        env['CHECKBOX_SHARE'] = self._provider.CHECKBOX_SHARE
         # Add CHECKBOX_DATA (temporary checkbox data)
-        env['CHECKBOX_DATA'] = session_dir
+        env['CHECKBOX_DATA'] = checkbox_data_dir
         # Inject additional variables that are requested in the config
         if config is not None and config.environment is not Unset:
             for env_var in config.environment:
@@ -247,13 +255,6 @@
         2) to set the ``via`` attribute (to aid the trusted launcher)
         """
         job = self.from_rfc822_record(record)
-        job._checkbox = self._checkbox
+        job._provider = self._provider
         job._via = self.get_checksum()
         return job
-
-    @classmethod
-    def from_json_record(cls, record):
-        """
-        Create a JobDefinition instance from JSON record
-        """
-        return cls(record['data'], via=record.get('via'))

=== modified file 'plainbox/plainbox/impl/logging.py'
--- plainbox/plainbox/impl/logging.py	2013-06-22 06:06:21 +0000
+++ plainbox/plainbox/impl/logging.py	2013-09-13 17:12:45 +0000
@@ -84,14 +84,23 @@
     """
 
     def setup_logging(self):
+        config_dict = self.DEFAULT_CONFIG
         # Ensure that the logging directory exists. This is important
-        # because we're about to open some files there.
+        # because we're about to open some files there. If it can't be created
+        # we fall back to a console-only config.
         if not os.path.exists(self.log_dir):
             # It seems that exists_ok is flaky
-            os.makedirs(self.log_dir, exist_ok=True)
-        # Apply the default configuration. This overrides anything currently
+            try:
+                os.makedirs(self.log_dir, exist_ok=True)
+            except OSError as error:
+                logger.warning(
+                    "Unable to create log directory: %s", self.log_dir)
+                logger.warning(("Reason: %s. All logs will go to "
+                                "console instead."), error)
+                config_dict = self.DEFAULT_CONSOLE_ONLY_CONFIG
+        # Apply the selected configuration. This overrides anything currently
         # defined for all of the logging subsystem in this python runtime
-        logging.config.dictConfig(self.DEFAULT_CONFIG)
+        logging.config.dictConfig(config_dict)
 
     def adjust_logging(self, level=None, trace_list=None, debug_console=False):
         # Bump logging on the root logger if requested
@@ -118,162 +127,271 @@
         return os.path.join(xdg_cache_home, 'plainbox', 'logs')
 
     @property
+    def DEFAULT_FORMATTERS(self):
+        """
+        Reusable dictionary with the formatter configuration plainbox uses
+        """
+        return {
+            "console_debug": {
+                "()": "plainbox.impl.logging.ANSIFormatter",
+                "format": (
+                    "{ansi.f.BLACK}{ansi.s.BRIGHT}"
+                    "%(levelname)s"
+                    "{ansi.s.NORMAL}{ansi.f.RESET}"
+                    " "
+                    "{ansi.f.CYAN}{ansi.s.DIM}"
+                    "%(name)s"
+                    "{ansi.f.RESET}{ansi.s.NORMAL}"
+                    ": "
+                    "{ansi.s.DIM}"
+                    "%(message)s"
+                    "{ansi.s.NORMAL}"
+                ),
+            },
+            "console_info": {
+                "()": "plainbox.impl.logging.ANSIFormatter",
+                "format": (
+                    "{ansi.f.WHITE}{ansi.s.BRIGHT}"
+                    "%(levelname)s"
+                    "{ansi.s.NORMAL}{ansi.f.RESET}"
+                    " "
+                    "{ansi.f.CYAN}{ansi.s.BRIGHT}"
+                    "%(name)s"
+                    "{ansi.f.RESET}{ansi.s.NORMAL}"
+                    ": "
+                    "%(message)s"
+                ),
+            },
+            "console_warning": {
+                "()": "plainbox.impl.logging.ANSIFormatter",
+                "format": (
+                    "{ansi.f.YELLOW}{ansi.s.BRIGHT}"
+                    "%(levelname)s"
+                    "{ansi.f.RESET}{ansi.s.NORMAL}"
+                    " "
+                    "{ansi.f.CYAN}%(name)s{ansi.f.RESET}"
+                    ": "
+                    "{ansi.f.WHITE}%(message)s{ansi.f.RESET}"
+                ),
+            },
+            "console_error": {
+                "()": "plainbox.impl.logging.ANSIFormatter",
+                "format": (
+                    "{ansi.f.RED}{ansi.s.BRIGHT}"
+                    "%(levelname)s"
+                    "{ansi.f.RESET}{ansi.s.NORMAL}"
+                    " "
+                    "{ansi.f.CYAN}%(name)s{ansi.f.RESET}"
+                    ": "
+                    "{ansi.f.WHITE}%(message)s{ansi.f.RESET}"
+                ),
+            },
+            "log_precise": {
+                "format": (
+                    "%(asctime)s "
+                    "[pid:%(process)s, thread:%(threadName)s, "
+                    "reltime:%(relativeCreated)dms] "
+                    "%(levelname)s %(name)s: %(message)s"
+                ),
+                "datefmt": "%Y-%m-%d %H:%M:%S",
+            },
+        }
+
+    @property
+    def DEFAULT_FILTERS(self):
+        """
+        Reusable dictionary with the filter configuration plainbox uses
+        """
+        return {
+            "only_debug": {
+                "()": "plainbox.impl.logging.LevelFilter",
+                "max_level": "DEBUG",
+            },
+            "only_info": {
+                "()": "plainbox.impl.logging.LevelFilter",
+                "min_level": "INFO",
+                "max_level": "INFO",
+            },
+            "only_warnings": {
+                "()": "plainbox.impl.logging.LevelFilter",
+                "min_level": "WARNING",
+                "max_level": "WARNING",
+            },
+        }
+
+    @property
+    def DEFAULT_HANDLERS(self):
+        """
+        Reusable dictionary with the handler configuration plainbox uses.
+        This configuration assumes the log file locations exist and are
+        writable.
+        """
+        return {
+            "console_debug": {
+                "class": "logging.StreamHandler",
+                "stream": "ext://sys.stdout",
+                "formatter": "console_debug",
+                "filters": ["only_debug"],
+                "level": 150,
+            },
+            "console_info": {
+                "class": "logging.StreamHandler",
+                "stream": "ext://sys.stdout",
+                "formatter": "console_info",
+                "filters": ["only_info"],
+            },
+            "console_warning": {
+                "class": "logging.StreamHandler",
+                "stream": "ext://sys.stderr",
+                "formatter": "console_warning",
+                "filters": ["only_warnings"],
+            },
+            "console_error": {
+                "class": "logging.StreamHandler",
+                "stream": "ext://sys.stderr",
+                "formatter": "console_error",
+                "level": "ERROR",
+            },
+            "logfile_debug": {
+                "class": "logging.handlers.RotatingFileHandler",
+                "filename": os.path.join(self.log_dir, "debug.log"),
+                "maxBytes": 32 << 20,
+                "backupCount": 3,
+                "mode": "a",
+                "formatter": "log_precise",
+                "delay": True,
+                "filters": ["only_debug"],
+            },
+            "logfile_error": {
+                "class": "logging.handlers.RotatingFileHandler",
+                "filename": os.path.join(self.log_dir, "error.log"),
+                "backupCount": 3,
+                "level": "ERROR",
+                "mode": "a",
+                "formatter": "log_precise",
+                "delay": True,
+            },
+            "logfile_crash": {
+                "class": "logging.handlers.RotatingFileHandler",
+                "filename": os.path.join(self.log_dir, "crash.log"),
+                "backupCount": 3,
+                "level": "ERROR",
+                "mode": "a",
+                "formatter": "log_precise",
+                "delay": True,
+            },
+        }
+
+    @property
+    def DEFAULT_CONSOLE_ONLY_HANDLERS(self):
+        """
+        Reusable dictionary with a handler configuration using only the
+        console for output.
+        """
+        return {
+            "console_debug": {
+                "class": "logging.StreamHandler",
+                "stream": "ext://sys.stdout",
+                "formatter": "console_debug",
+                "filters": ["only_debug"],
+                "level": 150,
+            },
+            "console_info": {
+                "class": "logging.StreamHandler",
+                "stream": "ext://sys.stdout",
+                "formatter": "console_info",
+                "filters": ["only_info"],
+            },
+            "console_warning": {
+                "class": "logging.StreamHandler",
+                "stream": "ext://sys.stderr",
+                "formatter": "console_warning",
+                "filters": ["only_warnings"],
+            },
+            "console_error": {
+                "class": "logging.StreamHandler",
+                "stream": "ext://sys.stderr",
+                "formatter": "console_error",
+                "level": "ERROR",
+            },
+        }
+
+    @property
+    def DEFAULT_LOGGERS(self):
+        """
+        Reusable dictionary with the logger configuration plainbox uses.
+        This configuration assumes the log file locations exist and are
+        writable.
+        """
+        return {
+            "plainbox": {
+                "level": "WARNING",
+                "handlers": [
+                    "console_debug",
+                    "console_info",
+                    "console_warning",
+                    "console_error",
+                    "logfile_error",
+                    "logfile_debug",
+                ],
+            },
+            "plainbox.crashes": {
+                "level": "ERROR",
+                "handlers": ["logfile_crash"],
+            },
+        }
+
+    @property
+    def DEFAULT_CONSOLE_ONLY_LOGGERS(self):
+        """
+        Reusable dictionary with a logger configuration using only the
+        console for output.
+        """
+        return {
+            "plainbox": {
+                "level": "WARNING",
+                "handlers": [
+                    "console_debug",
+                    "console_info",
+                    "console_warning",
+                    "console_error",
+                ],
+            },
+            "plainbox.crashes": {
+                "level": "ERROR",
+                "handlers": ["console_error"],
+            },
+        }
+
+    @property
     def DEFAULT_CONFIG(self):
-        return {
-            "version": 1,
-            "formatters": {
-                "console_debug": {
-                    "()": "plainbox.impl.logging.ANSIFormatter",
-                    "format": (
-                        "{ansi.f.BLACK}{ansi.s.BRIGHT}"
-                        "%(levelname)s"
-                        "{ansi.s.NORMAL}{ansi.f.RESET}"
-                        " "
-                        "{ansi.f.CYAN}{ansi.s.DIM}"
-                        "%(name)s"
-                        "{ansi.f.RESET}{ansi.s.NORMAL}"
-                        ": "
-                        "{ansi.s.DIM}"
-                        "%(message)s"
-                        "{ansi.s.NORMAL}"
-                    ),
-                },
-                "console_info": {
-                    "()": "plainbox.impl.logging.ANSIFormatter",
-                    "format": (
-                        "{ansi.f.WHITE}{ansi.s.BRIGHT}"
-                        "%(levelname)s"
-                        "{ansi.s.NORMAL}{ansi.f.RESET}"
-                        " "
-                        "{ansi.f.CYAN}{ansi.s.BRIGHT}"
-                        "%(name)s"
-                        "{ansi.f.RESET}{ansi.s.NORMAL}"
-                        ": "
-                        "%(message)s"
-                    ),
-                },
-                "console_warning": {
-                    "()": "plainbox.impl.logging.ANSIFormatter",
-                    "format": (
-                        "{ansi.f.YELLOW}{ansi.s.BRIGHT}"
-                        "%(levelname)s"
-                        "{ansi.f.RESET}{ansi.s.NORMAL}"
-                        " "
-                        "{ansi.f.CYAN}%(name)s{ansi.f.RESET}"
-                        ": "
-                        "{ansi.f.WHITE}%(message)s{ansi.f.RESET}"
-                    ),
-                },
-                "console_error": {
-                    "()": "plainbox.impl.logging.ANSIFormatter",
-                    "format": (
-                        "{ansi.f.RED}{ansi.s.BRIGHT}"
-                        "%(levelname)s"
-                        "{ansi.f.RESET}{ansi.s.NORMAL}"
-                        " "
-                        "{ansi.f.CYAN}%(name)s{ansi.f.RESET}"
-                        ": "
-                        "{ansi.f.WHITE}%(message)s{ansi.f.RESET}"
-                    ),
-                },
-                "log_precise": {
-                    "format": (
-                        "%(asctime)s "
-                        "[pid:%(process)s, thread:%(threadName)s, "
-                        "reltime:%(relativeCreated)dms] "
-                        "%(levelname)s %(name)s: %(message)s"
-                    ),
-                    "datefmt": "%Y-%m-%d %H:%M:%S",
-                },
-            },
-            "filters": {
-                "only_debug": {
-                    "()": "plainbox.impl.logging.LevelFilter",
-                    "max_level": "DEBUG",
-                },
-                "only_info": {
-                    "()": "plainbox.impl.logging.LevelFilter",
-                    "min_level": "INFO",
-                    "max_level": "INFO",
-                },
-                "only_warnings": {
-                    "()": "plainbox.impl.logging.LevelFilter",
-                    "min_level": "WARNING",
-                    "max_level": "WARNING",
-                },
-            },
-            "handlers": {
-                "console_debug": {
-                    "class": "logging.StreamHandler",
-                    "stream": "ext://sys.stdout",
-                    "formatter": "console_debug",
-                    "filters": ["only_debug"],
-                    "level": 150,
-                },
-                "console_info": {
-                    "class": "logging.StreamHandler",
-                    "stream": "ext://sys.stdout",
-                    "formatter": "console_info",
-                    "filters": ["only_info"],
-                },
-                "console_warning": {
-                    "class": "logging.StreamHandler",
-                    "stream": "ext://sys.stderr",
-                    "formatter": "console_warning",
-                    "filters": ["only_warnings"],
-                },
-                "console_error": {
-                    "class": "logging.StreamHandler",
-                    "stream": "ext://sys.stderr",
-                    "formatter": "console_error",
-                    "level": "ERROR",
-                },
-                "logfile_debug": {
-                    "class": "logging.handlers.RotatingFileHandler",
-                    "filename": os.path.join(self.log_dir, "debug.log"),
-                    "maxBytes": 32 << 20,
-                    "backupCount": 3,
-                    "mode": "a",
-                    "formatter": "log_precise",
-                    "delay": True,
-                    "filters": ["only_debug"],
-                },
-                "logfile_error": {
-                    "class": "logging.handlers.RotatingFileHandler",
-                    "filename": os.path.join(self.log_dir, "error.log"),
-                    "backupCount": 3,
-                    "level": "ERROR",
-                    "mode": "a",
-                    "formatter": "log_precise",
-                    "delay": True,
-                },
-                "logfile_crash": {
-                    "class": "logging.handlers.RotatingFileHandler",
-                    "filename": os.path.join(self.log_dir, "crash.log"),
-                    "backupCount": 3,
-                    "level": "ERROR",
-                    "mode": "a",
-                    "formatter": "log_precise",
-                    "delay": True,
-                },
-            },
-            "loggers": {
-                "plainbox": {
-                    "level": "WARNING",
-                    "handlers": [
-                        "console_debug",
-                        "console_info",
-                        "console_warning",
-                        "console_error",
-                        "logfile_error",
-                        "logfile_debug"
-                    ],
-                },
-                "plainbox.crashes": {
-                    "level": "ERROR",
-                    "handlers": ["logfile_crash"],
-                },
-            },
+        """
+        Plainbox logging configuration with logfiles and console.
+        """
+        return {
+            "version": 1,
+            "formatters": self.DEFAULT_FORMATTERS,
+            "filters": self.DEFAULT_FILTERS,
+            "handlers": self.DEFAULT_HANDLERS,
+            "loggers": self.DEFAULT_LOGGERS,
+            "root": {
+                "level": "WARNING",
+            },
+            "incremental": False,
+            "disable_existing_loggers": True,
+        }
+
+    @property
+    def DEFAULT_CONSOLE_ONLY_CONFIG(self):
+        """
+        Plainbox logging configuration with console output only.
+        """
+        return {
+            "version": 1,
+            "formatters": self.DEFAULT_FORMATTERS,
+            "filters": self.DEFAULT_FILTERS,
+            "handlers": self.DEFAULT_CONSOLE_ONLY_HANDLERS,
+            "loggers": self.DEFAULT_CONSOLE_ONLY_LOGGERS,
             "root": {
                 "level": "WARNING",
             },

=== added directory 'plainbox/plainbox/impl/providers'
=== added file 'plainbox/plainbox/impl/providers/__init__.py'
--- plainbox/plainbox/impl/providers/__init__.py	1970-01-01 00:00:00 +0000
+++ plainbox/plainbox/impl/providers/__init__.py	2013-09-13 17:12:45 +0000
@@ -0,0 +1,58 @@
+# This file is part of Checkbox.
+#
+# Copyright 2013 Canonical Ltd.
+# Written by:
+#   Zygmunt Krynicki <zygmunt.krynicki@xxxxxxxxxxxxx>
+#
+# Checkbox is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Checkbox is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Checkbox.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+:mod:`plainbox.impl.providers` -- providers package
+===================================================
+
+Providers are a mechanism by which PlainBox can enumerate jobs and whitelists.
+Currently there are only v1 (as in version one) providers that basically have
+to behave as CheckBox itself (mini CheckBox forks for example)
+
+There is ongoing work and discussion on V2 providers that would have a
+lower-level interface and would be able to define new job types, new whitelist
+types and generally all the next-gen semantics.
+
+PlainBox does not come with any real provider by default. PlainBox sometimes
+creates special dummy providers that have particular data in them for testing.
+
+
+V1 providers
+------------
+
+The first (current) version of PlainBox providers has the following properties,
+this is also described by :class:`IProvider1`::
+
+    * there is a directory with '.txt' or '.txt.in' files with RFC822-encoded
+      job definitions. The definitions need a particular set of keys to work.
+
+    * there is a directory with '.whitelist' files that contain a list (one per
+      line) of job definitions to execute.
+
+    * there is a directory with additional executables (added to PATH)
+
+    * there is a directory with an additional python3 libraries (added to
+      PYTHONPATH)
+"""
+
+
+class ProviderNotFound(LookupError):
+    """
+    Exception used to report that a provider cannot be located
+    """

=== added file 'plainbox/plainbox/impl/providers/checkbox.py'
--- plainbox/plainbox/impl/providers/checkbox.py	1970-01-01 00:00:00 +0000
+++ plainbox/plainbox/impl/providers/checkbox.py	2013-09-13 17:12:45 +0000
@@ -0,0 +1,117 @@
+# This file is part of Checkbox.
+#
+# Copyright 2012, 2013 Canonical Ltd.
+# Written by:
+#   Zygmunt Krynicki <zygmunt.krynicki@xxxxxxxxxxxxx>
+#
+# Checkbox is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Checkbox is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Checkbox.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+:mod:`plainbox.impl.providers.checkbox` -- CheckBox Provider
+============================================================
+
+.. warning::
+
+    THIS MODULE DOES NOT HAVE STABLE PUBLIC API
+"""
+
+import logging
+import os
+
+from plainbox.impl import get_plainbox_dir
+from plainbox.impl.providers.v1 import Provider1
+
+
+logger = logging.getLogger("plainbox.providers.checkbox")
+
+
+class CheckBoxNotFound(LookupError):
+    """
+    Exception used to report that CheckBox cannot be located
+    """
+
+    def __repr__(self):
+        return "CheckBoxNotFound()"
+
+    def __str__(self):
+        return "CheckBox cannot be found"
+
+
+def _get_checkbox_dir():
+    """
+    Return the root directory of the checkbox source checkout
+
+    Historically plainbox used a git submodule with checkbox tree (converted to
+    git). This ended with the merge of plainbox into the checkbox tree.
+
+    Now it's the other way around and the checkbox tree can be located two
+    directories "up" from the plainbox module, in a checkbox-old directory.
+    """
+    return os.path.normpath(
+        os.path.join(
+            get_plainbox_dir(), "..", "..", "checkbox-old"))
+
+
+class CheckBoxSrcProvider(Provider1):
+    """
+    A provider for checkbox jobs when used in development mode.
+
+    This provider is only likely to be used when developing checkbox inside a
+    virtualenv environment. It assumes the particular layout of code and data
+    (relative to the code directory) directories.
+    """
+
+    def __init__(self):
+        super(CheckBoxSrcProvider, self).__init__(
+            _get_checkbox_dir(), "checkbox", "CheckBox (live source)")
+        if not os.path.exists(self._base_dir):
+            raise CheckBoxNotFound()
+
+    @property
+    def extra_PYTHONPATH(self):
+        """
+        Return additional entry for PYTHONPATH
+
+        This entry is required for CheckBox scripts to import the correct
+        CheckBox python libraries.
+        """
+        # NOTE: When CheckBox is installed then all the scripts should not use
+        # 'env' to locate the python interpreter (otherwise they might use
+        # virtualenv which is not desirable for Debian packages). When we're
+        # using CheckBox from source then the source directory (which contains
+        # the 'checkbox' package) should be added to PYTHONPATH for all the
+        # imports to work.
+        return _get_checkbox_dir()
+
+
+class CheckBoxDebProvider(Provider1):
+    """
+    A provider for checkbox jobs
+
+    This provider exposes jobs and whitelists of the system-wide installed copy
+    of checkbox.
+    """
+
+    def __init__(self):
+        super(CheckBoxDebProvider, self).__init__(
+            "/usr/share/checkbox/", "checkbox", "CheckBox")
+        if not os.path.exists(self._base_dir):
+            raise CheckBoxNotFound()
+
+
+def CheckBoxAutoProvider():
+    try:
+        return CheckBoxSrcProvider()
+    except LookupError:
+        return CheckBoxDebProvider()

=== added file 'plainbox/plainbox/impl/providers/special.py'
--- plainbox/plainbox/impl/providers/special.py	1970-01-01 00:00:00 +0000
+++ plainbox/plainbox/impl/providers/special.py	2013-09-13 17:12:45 +0000
@@ -0,0 +1,69 @@
+# This file is part of Checkbox.
+#
+# Copyright 2013 Canonical Ltd.
+# Written by:
+#   Zygmunt Krynicki <zygmunt.krynicki@xxxxxxxxxxxxx>
+#
+# Checkbox is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Checkbox is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Checkbox.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+:mod:`plainbox.impl.providers.special` -- Implementation of special providers
+=============================================================================
+"""
+
+import logging
+
+from plainbox.abc import IProvider1, IProviderBackend1
+from plainbox.impl.providers.checkbox import CheckBoxAutoProvider
+
+
+logger = logging.getLogger("plainbox.providers.special")
+
+
+class IHVProvider(IProvider1, IProviderBackend1):
+
+    def __init__(self, real=None):
+        if real is None:
+            real = CheckBoxAutoProvider()
+        self._real = real
+
+    @property
+    def name(self):
+        return "ihv"
+
+    @property
+    def description(self):
+        return "IHV"
+
+    def get_builtin_jobs(self):
+        # XXX: should we filter jobs too?
+        return self._real.get_builtin_jobs()
+
+    def get_builtin_whitelists(self):
+        return [
+            whitelist
+            for whitelist in self._real.get_builtin_whitelists()
+            if whitelist.name.startswith('ihv-')]
+
+    @property
+    def CHECKBOX_SHARE(self):
+        return self._real.CHECKBOX_SHARE
+
+    @property
+    def extra_PYTHONPATH(self):
+        return self._real.extra_PYTHONPATH
+
+    @property
+    def extra_PATH(self):
+        return self._real.extra_PATH

=== added directory 'plainbox/plainbox/impl/providers/stubbox'
=== added file 'plainbox/plainbox/impl/providers/stubbox/__init__.py'
--- plainbox/plainbox/impl/providers/stubbox/__init__.py	1970-01-01 00:00:00 +0000
+++ plainbox/plainbox/impl/providers/stubbox/__init__.py	2013-09-13 17:12:45 +0000
@@ -0,0 +1,43 @@
+# This file is part of Checkbox.
+#
+# Copyright 2013 Canonical Ltd.
+# Written by:
+#   Zygmunt Krynicki <zygmunt.krynicki@xxxxxxxxxxxxx>
+#
+# Checkbox is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Checkbox is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Checkbox.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+:mod:`plainbox.impl.providers.stubbox` -- stub job provider
+===========================================================
+
+The stubbox provider is useful for various kinds of testing where you don't
+want to pull in a volume of data, just a bit of each kind of jobs that we need
+to support.
+"""
+
+import os
+
+from plainbox.impl import get_plainbox_dir
+from plainbox.impl.providers.v1 import Provider1
+
+
+class StubBoxProvider(Provider1):
+    """
+    A provider for stub, dummy and otherwise non-production jobs.
+    """
+
+    def __init__(self):
+        super(StubBoxProvider, self).__init__(
+            os.path.join(get_plainbox_dir(), "impl/providers/stubbox"),
+            "stubbox", "StubBox (dummy data for development)")

=== added directory 'plainbox/plainbox/impl/providers/stubbox/data'
=== added directory 'plainbox/plainbox/impl/providers/stubbox/data/whitelists'
=== added file 'plainbox/plainbox/impl/providers/stubbox/data/whitelists/stub.whitelist'
--- plainbox/plainbox/impl/providers/stubbox/data/whitelists/stub.whitelist	1970-01-01 00:00:00 +0000
+++ plainbox/plainbox/impl/providers/stubbox/data/whitelists/stub.whitelist	2013-09-13 17:12:45 +0000
@@ -0,0 +1,12 @@
+stub/true
+stub/false
+stub/dependency/good
+stub/dependency/bad
+# stub_package
+stub/requirement/good
+stub/requirement/bad
+stub/manual
+__local__
+stub/local/true
+stub/multilevel
+stub/multilevel.*

=== added file 'plainbox/plainbox/impl/providers/stubbox/data/whitelists/stub1.whitelist'
--- plainbox/plainbox/impl/providers/stubbox/data/whitelists/stub1.whitelist	1970-01-01 00:00:00 +0000
+++ plainbox/plainbox/impl/providers/stubbox/data/whitelists/stub1.whitelist	2013-09-13 17:12:45 +0000
@@ -0,0 +1,7 @@
+stub/true
+stub/dependency/bad
+# stub_package
+stub/requirement/good
+stub/requirement/bad
+stub/multilevel
+stub/multilevel.*

=== added file 'plainbox/plainbox/impl/providers/stubbox/data/whitelists/stub2.whitelist'
--- plainbox/plainbox/impl/providers/stubbox/data/whitelists/stub2.whitelist	1970-01-01 00:00:00 +0000
+++ plainbox/plainbox/impl/providers/stubbox/data/whitelists/stub2.whitelist	2013-09-13 17:12:45 +0000
@@ -0,0 +1,7 @@
+stub/false
+stub/dependency/good
+stub/dependency/bad
+# stub_package
+stub/manual
+__local__
+stub/local/true

=== added directory 'plainbox/plainbox/impl/providers/stubbox/jobs'
=== added file 'plainbox/plainbox/impl/providers/stubbox/jobs/local.txt.in'
--- plainbox/plainbox/impl/providers/stubbox/jobs/local.txt.in	1970-01-01 00:00:00 +0000
+++ plainbox/plainbox/impl/providers/stubbox/jobs/local.txt.in	2013-09-13 17:12:45 +0000
@@ -0,0 +1,5 @@
+plugin: shell
+name: stub/local/true
+command: true
+_description:
+ Check success result from shell test case (generated from a local job)

=== added file 'plainbox/plainbox/impl/providers/stubbox/jobs/multilevel.txt.in'
--- plainbox/plainbox/impl/providers/stubbox/jobs/multilevel.txt.in	1970-01-01 00:00:00 +0000
+++ plainbox/plainbox/impl/providers/stubbox/jobs/multilevel.txt.in	2013-09-13 17:12:45 +0000
@@ -0,0 +1,20 @@
+plugin: local
+name: stub/multilevel
+_description:
+ Multilevel tests
+command:
+ cat <<'EOF'
+ plugin: shell
+ name: stub/multilevel_1
+ command: echo 1
+ description: This is just a sample multilevel test. Test 1.
+ EOF
+ echo ""
+ cat <<'EOF'
+ plugin: shell
+ name: stub/multilevel_2
+ command: echo 2
+ description: This is just a sample multilevel test. Test 2.
+ EOF
+ echo ""
+

=== added file 'plainbox/plainbox/impl/providers/stubbox/jobs/stub.txt.in'
--- plainbox/plainbox/impl/providers/stubbox/jobs/stub.txt.in	1970-01-01 00:00:00 +0000
+++ plainbox/plainbox/impl/providers/stubbox/jobs/stub.txt.in	2013-09-13 17:12:45 +0000
@@ -0,0 +1,80 @@
+plugin: shell
+name: stub/true
+command: true; echo oops
+_description:
+ Check success result from shell test case
+
+plugin: shell
+name: stub/false
+command: false
+_description:
+ Check failed result from shell test case
+
+plugin: shell
+name: stub/dependency/good
+depends: stub/true
+command: true
+_description:
+ Check job is executed when dependency succeeds
+
+plugin: shell
+name: stub/dependency/bad
+depends: stub/false
+command: true
+_description:
+ Check job result is set to uninitiated when dependency fails
+
+plugin: shell
+name: stub/sleep-60
+command: sleep 60
+_description: Sleep for sixty seconds
+
+plugin: shell
+depends: stub/multilevel
+name: stub/kill-ppid-if-KILLER-set
+command: if [ "$KILLER" == "yes" ]; then kill -9 $PPID; fi
+_description: Kill $PPID if $KILLER is set to yes
+
+plugin: resource
+name: stub_package
+command: stub_package_list
+
+plugin: shell
+name: stub/requirement/good
+requires: stub_package.name == "checkbox"
+command: true
+_description:
+ Check job is executed when requirements are met
+
+plugin: shell
+name: stub/requirement/bad
+requires: stub_package.name == "unknown-package"
+command: true
+_description:
+ Check job result is set to "not required on this system" when requirements are not met
+
+plugin: manual
+name: stub/manual
+command: echo Bonjour PlainBox
+_description:
+ PURPOSE:
+     This test checks that the manual plugin works fine
+ STEPS:
+     1. Add a comment
+     2. Set the result as passed
+ VERIFICATION:
+     Check that in the report the result is passed and the comment is displayed
+
+plugin: local
+name: __local__
+_description: stub local tests
+command:
+  shopt -s extglob
+  cat $CHECKBOX_SHARE/jobs/local.txt?(.in)
+
+plugin: local
+name: __multilevel__
+_description: stub multilevel tests
+command:
+  shopt -s extglob
+  cat $CHECKBOX_SHARE/jobs/multilevel.txt?(.in)

=== added directory 'plainbox/plainbox/impl/providers/stubbox/scripts'
=== added file 'plainbox/plainbox/impl/providers/stubbox/scripts/stub_package_list'
--- plainbox/plainbox/impl/providers/stubbox/scripts/stub_package_list	1970-01-01 00:00:00 +0000
+++ plainbox/plainbox/impl/providers/stubbox/scripts/stub_package_list	2013-09-13 17:12:45 +0000
@@ -0,0 +1,3 @@
+#!/bin/sh
+echo "name: checkbox"
+echo ""

=== added file 'plainbox/plainbox/impl/providers/test_checkbox.py'
--- plainbox/plainbox/impl/providers/test_checkbox.py	1970-01-01 00:00:00 +0000
+++ plainbox/plainbox/impl/providers/test_checkbox.py	2013-09-13 17:12:45 +0000
@@ -0,0 +1,40 @@
+# This file is part of Checkbox.
+#
+# Copyright 2012, 2013 Canonical Ltd.
+# Written by:
+#   Zygmunt Krynicki <zygmunt.krynicki@xxxxxxxxxxxxx>
+#
+# Checkbox is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Checkbox is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Checkbox.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+plainbox.impl.test_checkbox
+===========================
+
+Test definitions for plainbox.impl.checkbox module
+"""
+
+from plainbox.impl.providers.checkbox import CheckBoxAutoProvider
+from plainbox.testing_utils.testcases import TestCaseWithParameters
+
+
+class TestCheckBox(TestCaseWithParameters):
+    parameter_names = ('job',)
+
+    @classmethod
+    def get_parameter_values(cls):
+        for job in CheckBoxAutoProvider().get_builtin_jobs():
+            yield (job,)
+
+    def test_job_resource_expression(self):
+        self.parameters.job.get_resource_program()

=== added file 'plainbox/plainbox/impl/providers/test_special.py'
--- plainbox/plainbox/impl/providers/test_special.py	1970-01-01 00:00:00 +0000
+++ plainbox/plainbox/impl/providers/test_special.py	2013-09-13 17:12:45 +0000
@@ -0,0 +1,104 @@
+# This file is part of Checkbox.
+#
+# Copyright 2013 Canonical Ltd.
+# Written by:
+#   Zygmunt Krynicki <zygmunt.krynicki@xxxxxxxxxxxxx>
+#
+# Checkbox is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Checkbox is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Checkbox.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+plainbox.impl.providers.test_special
+====================================
+
+Test definitions for plainbox.impl.providers.special module
+"""
+
+from unittest import TestCase
+
+from plainbox.impl.applogic import WhiteList
+from plainbox.impl.providers.special import IHVProvider
+from plainbox.impl.providers.v1 import DummyProvider1
+from plainbox.impl.providers.v1 import Provider1
+from plainbox.impl.testing_utils import make_job
+
+
+class TestIHVProvider(TestCase):
+
+    def setUp(self):
+        self.job_list = [make_job('foo'), make_job('bar')]
+        self.whitelist_list = [
+            WhiteList([], name='ihv-foo'), WhiteList([], name='other')]
+        self.real_provider = DummyProvider1(
+            job_list=self.job_list, whitelist_list=self.whitelist_list)
+        self.ihv_provider = IHVProvider(self.real_provider)
+
+    def test_default_settings(self):
+        provider = IHVProvider()
+        # It is either CheckBoxSrcProvider or CheckBoxDebProvider but it's not
+        # easy to test that IMHO. This just ensures we got something there.
+        self.assertIsInstance(provider._real, Provider1)
+
+    def test_name(self):
+        """
+        verify IHVProvider.name property
+        """
+        self.assertEqual(self.ihv_provider.name, "ihv")
+
+    def test_description(self):
+        """
+        verify IHVProvider.description property
+        """
+        self.assertEqual(self.ihv_provider.description, "IHV")
+
+    def test_get_builtin_jobs(self):
+        """
+        verify that IHVProvider.get_builtin_jobs() just returns all jobs
+        """
+        self.assertEqual(self.ihv_provider.get_builtin_jobs(), self.job_list)
+
+    def test_get_builtin_whitelists(self):
+        """
+        verify that IHVProvider.get_builtin_whitelists() returns only
+        whitelists for which name starts with 'ihv-'.
+        """
+        self.assertEqual(
+            self.ihv_provider.get_builtin_whitelists(),
+            [self.whitelist_list[0]])
+
+    def test_CHECKBOX_SHARE(self):
+        """
+        verify that IHVProvider.CHECKBOX_SHARE property just returns the
+        value from the real provider
+        """
+        self.assertEqual(
+            self.ihv_provider.CHECKBOX_SHARE,
+            self.real_provider.CHECKBOX_SHARE)
+
+    def test_extra_PYTHONPATH(self):
+        """
+        verify that IHVProvider.extra_PYTHONPATH property just returns the
+        value from the real provider
+        """
+        self.assertEqual(
+            self.ihv_provider.extra_PYTHONPATH,
+            self.real_provider.extra_PYTHONPATH)
+
+    def test_extra_PATH(self):
+        """
+        verify that IHVProvider.extra_PATH property just returns the
+        value from the real provider
+        """
+        self.assertEqual(
+            self.ihv_provider.extra_PATH,
+            self.real_provider.extra_PATH)

=== added file 'plainbox/plainbox/impl/providers/v1.py'
--- plainbox/plainbox/impl/providers/v1.py	1970-01-01 00:00:00 +0000
+++ plainbox/plainbox/impl/providers/v1.py	2013-09-13 17:12:45 +0000
@@ -0,0 +1,229 @@
+# This file is part of Checkbox.
+#
+# Copyright 2013 Canonical Ltd.
+# Written by:
+#   Zygmunt Krynicki <zygmunt.krynicki@xxxxxxxxxxxxx>
+#
+# Checkbox is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Checkbox is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Checkbox.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+:mod:`plainbox.impl.providers.v1` -- Implementation of V1 provider
+==================================================================
+"""
+
+import logging
+import os
+import io
+
+from plainbox.abc import IProvider1, IProviderBackend1
+from plainbox.impl.applogic import WhiteList
+from plainbox.impl.job import JobDefinition
+from plainbox.impl.plugins import PlugInCollection
+from plainbox.impl.rfc822 import load_rfc822_records
+
+
+logger = logging.getLogger("plainbox.providers.v1")
+
+
+class Provider1(IProvider1, IProviderBackend1):
+    """
+    A v1 provider implementation.
+
+    This base class implements a checkbox-like provider object. Subclasses are
+    only required to implement a single method that designates the base
+    location for all other data.
+    """
+
+    def __init__(self, base_dir, name, description):
+        """
+        Initialize the provider with the associated base directory.
+
+        All of the typical v1 provider data is relative to this directory. It
+        can be customized by subclassing and overriding the particular methods
+        of the IProviderBackend1 class but that should not be necessary in
+        normal operation.
+        """
+        self._base_dir = base_dir
+        self._name = name
+        self._description = description
+
+    @property
+    def name(self):
+        """
+        name of this provider
+        """
+        return self._name
+
+    @property
+    def description(self):
+        """
+        description of this provider
+        """
+        return self._description
+
+    @property
+    def jobs_dir(self):
+        """
+        Return an absolute path of the jobs directory
+        """
+        return os.path.join(self._base_dir, "jobs")
+
+    @property
+    def scripts_dir(self):
+        """
+        Return an absolute path of the scripts directory
+
+        .. note::
+            The scripts may not work without setting PYTHONPATH and
+            CHECKBOX_SHARE.
+        """
+        return os.path.join(self._base_dir, "scripts")
+
+    @property
+    def whitelists_dir(self):
+        """
+        Return an absolute path of the whitelist directory
+        """
+        return os.path.join(self._base_dir, "data", "whitelists")
+
+    @property
+    def CHECKBOX_SHARE(self):
+        """
+        Return the required value of CHECKBOX_SHARE environment variable.
+
+        .. note::
+            This variable is only required by one script.
+            It would be nice to remove this later on.
+        """
+        return self._base_dir
+
+    @property
+    def extra_PYTHONPATH(self):
+        """
+        Return additional entry for PYTHONPATH, if needed.
+
+        This entry is required for CheckBox scripts to import the correct
+        CheckBox python libraries.
+
+        .. note::
+            The result may be None
+        """
+        return None
+
+    @property
+    def extra_PATH(self):
+        """
+        Return additional entry for PATH
+
+        This entry is required to lookup CheckBox scripts.
+        """
+        # NOTE: This is always the script directory. The actual logic for
+        # locating it is implemented in the property accessors.
+        return self.scripts_dir
+
+    def get_builtin_whitelists(self):
+        logger.debug("Loading built-in whitelists...")
+        whitelist_list = []
+        for name in os.listdir(self.whitelists_dir):
+            if name.endswith(".whitelist"):
+                whitelist_list.append(
+                    WhiteList.from_file(os.path.join(
+                        self.whitelists_dir, name)))
+        return sorted(whitelist_list, key=lambda whitelist: whitelist.name)
+
+    def get_builtin_jobs(self):
+        logger.debug("Loading built-in jobs...")
+        job_list = []
+        for name in os.listdir(self.jobs_dir):
+            if name.endswith(".txt") or name.endswith(".txt.in"):
+                job_list.extend(
+                    self.load_jobs(
+                        os.path.join(self.jobs_dir, name)))
+        return sorted(job_list, key=lambda job: job.name)
+
+    def load_jobs(self, somewhere):
+        """
+        Load job definitions from somewhere
+        """
+        if isinstance(somewhere, str):
+            # Load data from a file with the given name
+            filename = somewhere
+            with open(filename, 'rt', encoding='UTF-8') as stream:
+                return self.load_jobs(stream)
+        if isinstance(somewhere, io.TextIOWrapper):
+            stream = somewhere
+            logger.debug("Loading jobs definitions from %r...", stream.name)
+            record_list = load_rfc822_records(stream)
+            job_list = []
+            for record in record_list:
+                job = JobDefinition.from_rfc822_record(record)
+                job._provider = self
+                logger.debug("Loaded %r", job)
+                job_list.append(job)
+            return job_list
+        else:
+            raise TypeError(
+                "Unsupported type of 'somewhere': {!r}".format(
+                    type(somewhere)))
+
+
+class DummyProvider1(IProvider1, IProviderBackend1):
+    """
+    Dummy provider useful for creating isolated test cases
+    """
+
+    def __init__(self, job_list=None, whitelist_list=None, **extras):
+        self._job_list = job_list or []
+        self._whitelist_list = whitelist_list or []
+        self._extras = extras
+        self._patch_provider_field()
+
+    def _patch_provider_field(self):
+        # NOTE: each v1 job needs a _provider attribute that points to the
+        # provider. Since many tests use make_job() which does not set it for
+        # obvious reasons it needs to be patched-in.
+        for job in self._job_list:
+            if job._provider is None:
+                job._provider = self
+
+    @property
+    def name(self):
+        return self._extras.get('name', "dummy")
+
+    @property
+    def description(self):
+        return self._extras.get(
+            'description', "A dummy provider useful for testing")
+
+    @property
+    def CHECKBOX_SHARE(self):
+        return self._extras.get('CHECKBOX_SHARE', "")
+
+    @property
+    def extra_PYTHONPATH(self):
+        return self._extras.get("PYTHONPATH")
+
+    @property
+    def extra_PATH(self):
+        return self._extras.get("PATH", "")
+
+    def get_builtin_whitelists(self):
+        return self._whitelist_list
+
+    def get_builtin_jobs(self):
+        return self._job_list
+
+
+# Collection of all providers
+all_providers = PlugInCollection('plainbox.provider.v1')

=== modified file 'plainbox/plainbox/impl/result.py'
--- plainbox/plainbox/impl/result.py	2013-02-25 11:02:59 +0000
+++ plainbox/plainbox/impl/result.py	2013-09-13 17:12:45 +0000
@@ -21,18 +21,20 @@
 :mod:`plainbox.impl.result` -- job result
 =========================================
 
-.. warning::
-
-    THIS MODULE DOES NOT HAVE STABLE PUBLIC API
+This module has two basic implementation of :class:`IJobResult`:
+:class:`MemoryJobResult` and :class:`DiskJobResult`.
 """
 
 from collections import namedtuple
 import base64
+import gzip
+import io
 import json
 import logging
-import os
+import inspect
 
 from plainbox.abc import IJobResult
+from plainbox.impl.signal import Signal
 
 logger = logging.getLogger("plainbox.result")
 
@@ -50,117 +52,220 @@
 IOLogRecord = namedtuple("IOLogRecord", "delay stream_name data".split())
 
 
-class JobResult(IJobResult):
-    """
-    Result of running a JobDefinition.
-    """
-
-    # The outcome of a job is a one-word classification how how it ran.  There
-    # are several values that were not used in the original implementation but
-    # their existence helps to organize and implement plainbox. They are
-    # discussed below to make their intended meaning more detailed than is
-    # possible from the variable name alone.
-    #
-    # The None outcome - a job that basically did not run at all.
-    OUTCOME_NONE = None
-    # The pass and fail outcomes are the two most essential, and externally
-    # visible, job outcomes. They can be provided by either automated or manual
-    # "classifier" - a script or a person that clicks a "pass" or "fail"
-    # button.
-    OUTCOME_PASS = 'pass'
-    OUTCOME_FAIL = 'fail'
-    # The skip outcome is used when the operator selected a job but then
-    # skipped it. This is typically used for a manual job that is tedious or
-    # was selected by accident.
-    OUTCOME_SKIP = 'skip'
-    # The not supported outcome is used when a job was about to run but a
-    # dependency or resource requirement prevent it from running.  XXX: perhaps
-    # this should be called "not available", not supported has the "unsupported
-    # code" feeling associated with it.
-    OUTCOME_NOT_SUPPORTED = 'not-supported'
-    # A temporary state that should be removed later on, used to indicate that
-    # job runner is not implemented but the job "ran" so to speak.
-    OUTCOME_NOT_IMPLEMENTED = 'not-implemented'
-
-    # XXX: how to support attachments?
+class _JobResultBase(IJobResult):
+    """
+    Base class for :`IJobResult` implementations.
+
+    This class defines base properties common to all variants of `IJobResult`
+    """
 
     def __init__(self, data):
         """
         Initialize a new result with the specified data
+
+        Data is a dictionary that can hold arbitrary values. At least some
+        values are explicitly used, such as 'outcome', 'comments' and
+        'return_code' but all of those are optional.
         """
-        # XXX: consider moving job to a dedicated field as we want to serialize
-        # results without putting the job reference in there (a job name would
-        # be a fine substitute). It would also make the 'job is required'
-        # requirement spelled out below explicit)
-        #
-        # TODO: Do some basic validation, at least 'job' must be set.
         self._data = data
 
     def __str__(self):
-        return "{}: {}".format(
-            self.job.name, self.outcome)
+        return str(self.outcome)
 
     def __repr__(self):
-        return "<{} job:{!r} outcome:{!r}>".format(
-            self.__class__.__name__, self.job, self.outcome)
+        return "<{} outcome:{!r}>".format(
+            self.__class__.__name__, self.outcome)
 
-    @property
-    def job(self):
-        return self._data['job']
+    @Signal.define
+    def on_outcome_changed(self, old, new):
+        """
+        Signal sent when ``outcome`` property value is changed
+        """
 
     @property
     def outcome(self):
+        """
+        outcome of running this job.
+
+        The outcome ultimately classifies jobs (tests) as failures or
+        successes.  There are several other types of outcome that all basically
+        mean that the job did not run for some particular reason.
+        """
         return self._data.get('outcome', self.OUTCOME_NONE)
 
+    @outcome.setter
+    def outcome(self, new):
+        old = self.outcome
+        if old != new:
+            self._data['outcome'] = new
+            self.on_outcome_changed(old, new)
+
+    @property
+    def execution_duration(self):
+        """
+        The amount of time in seconds it took to run this
+        jobs command.
+        """
+        return self._data.get('execution_duration')
+
     @property
     def comments(self):
+        """
+        comments of the test operator
+        """
         return self._data.get('comments')
 
-    @property
-    def io_log(self):
-        if os.path.exists(self._data.get('io_log', '')):
-            with open(self._data.get('io_log')) as f:
-                return json.load(f, cls=IoLogDecoder)
-        else:
-            return ()
+    @comments.setter
+    def comments(self, new):
+        old = self.comments
+        if old != new:
+            self._data['comments'] = new
+            self.on_comments_changed(old, new)
+
+    @Signal.define
+    def on_comments_changed(self, old, new):
+        """
+        Signal sent when ``comments`` property value is changed
+        """
 
     @property
     def return_code(self):
+        """
+        return code of the command associated with the job, if any
+        """
         return self._data.get('return_code')
 
-    def _get_persistance_subset(self):
-        state = {}
-        state['data'] = {}
-        for key, value in self._data.items():
-            state['data'][key] = value
-        return state
-
-    @classmethod
-    def from_json_record(cls, record):
-        """
-        Create a JobResult instance from JSON record
-        """
-        return cls(record['data'])
-
-
-class IoLogEncoder(json.JSONEncoder):
-    """
-    JSON Serialize helper to encode binary io logs
-    """
-
-    def default(self, obj):
-        return base64.standard_b64encode(obj).decode('ASCII')
-
-
-class IoLogDecoder(json.JSONDecoder):
-    """
-    JSON Decoder helper for io logs objects
-    """
-
-    def decode(self, obj):
-        return tuple([IOLogRecord(
-            # io logs namedtuple are recorded as list in json, using _asdict()
-            # would require too much space for little benefit.
-            # IOLogRecord are re created using the list ordering
-            log[0], log[1], base64.standard_b64decode(log[2].encode('ASCII')))
-            for log in super().decode(obj)])
+    @property
+    def io_log(self):
+        return tuple(self.get_io_log())
+
+
+class MemoryJobResult(_JobResultBase):
+    """
+    A :class:`IJobResult` that keeps IO logs in memory.
+
+    This type of JobResult is indented for writing unit tests where the hassle
+    of going through the filesystem would make them needlessly complicated.
+    """
+
+    def get_io_log(self):
+        io_log_data = self._data.get('io_log', ())
+        for entry in io_log_data:
+            if isinstance(entry, IOLogRecord):
+                yield entry
+            elif isinstance(entry, tuple):
+                yield IOLogRecord(*entry)
+            else:
+                raise TypeError(
+                    "each item in io_log must be either a tuple"
+                    " or special the IOLogRecord tuple")
+
+
+class GzipFile(gzip.GzipFile):
+    """
+    Subclass of GzipFile that works around missing read1() on python3.2
+
+    See: http://bugs.python.org/issue10791
+    """
+
+    def read1(self, n):
+        return self.read(n)
+
+
+class DiskJobResult(_JobResultBase):
+    """
+    A :class:`IJobResult` that keeps IO logs on disk.
+
+    This type of JobResult is intended for working with most results. It does
+    not store IO logs in memory so it is scalable to arbitrary IO log sizes.
+    Each instance just knows where the log file is located (using the
+    'io_log_filename' attribute for that) and offers streaming API for
+    accessing particular parts of the log.
+    """
+
+    @property
+    def io_log_filename(self):
+        """
+        pathname of the file containing serialized IO log records
+        """
+        return self._data.get("io_log_filename")
+
+    def get_io_log(self):
+        record_path = self.io_log_filename
+        if record_path:
+            with GzipFile(record_path, mode='rb') as gzip_stream, \
+                    io.TextIOWrapper(gzip_stream, encoding='UTF-8') as stream:
+                for record in IOLogRecordReader(stream):
+                    yield record
+
+    @property
+    def io_log(self):
+        caller_frame, filename, lineno = inspect.stack(0)[1][:3]
+        logger.warning(
+            "Expensive DiskJobResult.io_log property access from %s:%d",
+            filename, lineno)
+        return super(DiskJobResult, self).io_log
+
+
+class IOLogRecordWriter:
+    """
+    Class for writing :class:`IOLogRecord` instances to a text stream
+    """
+
+    def __init__(self, stream):
+        self.stream = stream
+
+    def close(self):
+        self.stream.close()
+
+    def write_record(self, record):
+        """
+        Write an :class:`IOLogRecord` to the stream.
+        """
+        text = json.dumps([
+            record[0], record[1],
+            base64.standard_b64encode(record[2]).decode("ASCII")],
+            check_circular=False, ensure_ascii=True, indent=None,
+            separators=(',', ':'))
+        logger.debug("Encoded %r into string %r", record, text)
+        assert "\n" not in text
+        self.stream.write(text)
+        self.stream.write('\n')
+
+
+class IOLogRecordReader:
+    """
+    Class for streaming :class`IOLogRecord` instances from a text stream
+    """
+
+    def __init__(self, stream):
+        self.stream = stream
+
+    def close(self):
+        self.stream.close()
+
+    def read_record(self):
+        """
+        Read the next record from the stream.
+
+        :returns: None if the stream is empty
+        :returns: next :class:`IOLogRecord` as found in the stream.
+        """
+        text = self.stream.readline()
+        if len(text) == 0:
+            return
+        data = json.loads(text)
+        return IOLogRecord(
+            data[0], data[1],
+            base64.standard_b64decode(data[2].encode("ASCII")))
+
+    def __iter__(self):
+        """
+        Iterate over the entire stream generating subsequent
+        :class:`IOLogRecord` entries.
+        """
+        while True:
+            record = self.read_record()
+            if record is None:
+                break
+            yield record

=== modified file 'plainbox/plainbox/impl/rfc822.py'
--- plainbox/plainbox/impl/rfc822.py	2013-05-08 07:35:19 +0000
+++ plainbox/plainbox/impl/rfc822.py	2013-09-13 17:12:45 +0000
@@ -30,6 +30,7 @@
 
 import logging
 
+from functools import total_ordering
 from inspect import cleandoc
 
 from plainbox.impl.secure.checkbox_trusted_launcher import RFC822SyntaxError
@@ -37,7 +38,7 @@
 
 logger = logging.getLogger("plainbox.rfc822")
 
-
+@total_ordering
 class Origin:
     """
     Simple class for tracking where something came from
@@ -64,6 +65,14 @@
         return "{}:{}-{}".format(
             self.filename, self.line_start, self.line_end)
 
+    def __eq__(self, other):
+        return (self.filename, self.line_start, self.line_end) == \
+               (other.filename, other.line_start, other.line_end)
+
+    def __gt__(self, other):
+        return (self.filename, self.line_start, self.line_end) > \
+               (other.filename, other.line_start, other.line_end)
+
 
 class RFC822Record(BaseRFC822Record):
     """
@@ -148,7 +157,8 @@
             filename = stream.name
         except AttributeError:
             filename = None
-        origin = Origin(filename, None, None)
+        if filename:
+            origin = Origin(filename, None, None)
         data = data_cls()
         record = RFC822Record(data, origin)
 
@@ -166,14 +176,15 @@
         """
         Remember the line number of the record start unless already set
         """
-        if record.origin.line_start is None:
+        if origin and record.origin.line_start is None:
             record.origin.line_start = lineno
 
     def _update_end_lineno():
         """
         Update the line number of the record tail
         """
-        record.origin.line_end = lineno
+        if origin:
+            record.origin.line_end = lineno
 
     # Start with an empty record
     _new_record()

=== modified file 'plainbox/plainbox/impl/runner.py'
--- plainbox/plainbox/impl/runner.py	2013-06-22 06:06:21 +0000
+++ plainbox/plainbox/impl/runner.py	2013-09-13 17:12:45 +0000
@@ -28,15 +28,22 @@
 
 import collections
 import datetime
-import json
+import gzip
+import io
 import logging
 import os
 import string
+import time
 
 from plainbox.vendor import extcmd
 
-from plainbox.abc import IJobRunner
-from plainbox.impl.result import JobResult, IOLogRecord, IoLogEncoder
+from plainbox.abc import IJobRunner, IJobResult
+from plainbox.impl.providers.checkbox import CheckBoxSrcProvider
+from plainbox.impl.result import DiskJobResult
+from plainbox.impl.result import IOLogRecord
+from plainbox.impl.result import IOLogRecordWriter
+from plainbox.impl.result import MemoryJobResult
+from plainbox.impl.signal import Signal
 
 logger = logging.getLogger("plainbox.runner")
 
@@ -51,15 +58,6 @@
     return ''.join(c if c in valid_chars else '_' for c in _string)
 
 
-def io_log_write(log, stream):
-    """
-    JSON call to serialize io_log objects to disk
-    """
-    json.dump(
-        log, stream, ensure_ascii=False, indent=None, cls=IoLogEncoder,
-        separators=(',', ':'))
-
-
 def authenticate_warmup():
     """
     Call the checkbox trusted launcher in warmup mode.
@@ -72,13 +70,9 @@
         ['pkexec', 'checkbox-trusted-launcher', '--warmup'])
 
 
-class CommandIOLogBuilder(extcmd.DelegateBase):
+class IOLogRecordGenerator(extcmd.DelegateBase):
     """
-    Delegate for extcmd that builds io_log entries.
-
-    IO log entries are records kept by JobResult.io_log and correspond to all
-    of the data that was written by called process. The format is a sequence of
-    tuples (delay, stream_name, data).
+    Delegate for extcmd that generates io_log entries.
     """
 
     def on_begin(self, args, kwargs):
@@ -86,27 +80,33 @@
         Internal method of extcmd.DelegateBase
 
         Called when a command is being invoked.
-        Begins tracking time (relative time entries) and creates the empty
-        io_log list.
+
+        Begins tracking time (relative time entries)
         """
-        logger.debug("io log starting for command: %r", args)
-        self.io_log = []
         self.last_msg = datetime.datetime.utcnow()
 
     def on_line(self, stream_name, line):
         """
-        Internal method of IOLogBuilder
+        Internal method of extcmd.DelegateBase
 
-        Appends each line to the io_log. Maintains a timestamp of the last
-        message so that approximate delay between each piece of output can be
-        recorded as well.
+        Creates a new IOLogRecord and passes it to :meth:`on_new_record()`.
+        Maintains a timestamp of the last message so that approximate delay
+        between each piece of output can be recorded as well.
         """
         now = datetime.datetime.utcnow()
         delay = now - self.last_msg
         self.last_msg = now
         record = IOLogRecord(delay.total_seconds(), stream_name, line)
-        self.io_log.append(record)
-        logger.debug("io log captured %r", record)
+        self.on_new_record(record)
+
+    @Signal.define
+    def on_new_record(self, record):
+        """
+        Internal signal method of :class:`IOLogRecordGenerator`
+
+        Called when a new record is generated and needs to be processed.
+        """
+        logger.debug("io log generated %r", record)
 
 
 class CommandOutputWriter(extcmd.DelegateBase):
@@ -198,7 +198,7 @@
     _DRY_RUN_PLUGINS = ('local', 'resource', 'attachment')
 
     def __init__(self, session_dir, jobs_io_log_dir,
-                 command_io_delegate=None, outcome_callback=None,
+                 command_io_delegate=None, interaction_callback=None,
                  dry_run=False):
         """
         Initialize a new job runner.
@@ -211,7 +211,7 @@
         self._session_dir = session_dir
         self._jobs_io_log_dir = jobs_io_log_dir
         self._command_io_delegate = command_io_delegate
-        self._outcome_callback = outcome_callback
+        self._interaction_callback = interaction_callback
         self._dry_run = dry_run
 
     def run_job(self, job, config=None):
@@ -223,9 +223,8 @@
         try:
             runner = getattr(self, func_name)
         except AttributeError:
-            return JobResult({
-                'job': job,
-                'outcome': JobResult.OUTCOME_NOT_IMPLEMENTED,
+            return MemoryJobResult({
+                'outcome': IJobResult.OUTCOME_NOT_IMPLEMENTED,
                 'comment': 'This plugin is not supported'
             })
         else:
@@ -238,9 +237,8 @@
         """
         Produce the result that is used when running in dry-run mode
         """
-        return JobResult({
-            'job': job,
-            'outcome': JobResult.OUTCOME_SKIP,
+        return MemoryJobResult({
+            'outcome': IJobResult.OUTCOME_SKIP,
             'comments': "Job skipped in dry-run mode"
         })
 
@@ -256,35 +254,32 @@
         return self._just_run_command(job, config)
 
     def _plugin_manual(self, job, config):
-        if self._outcome_callback is None:
-            return JobResult({
-                'job': job,
-                'outcome': JobResult.OUTCOME_SKIP,
-                'comment': "non-interactive test run"
-            })
+        # Get the outcome from the callback, if available,
+        # or put the special OUTCOME_UNDECIDED value.
+        if self._interaction_callback is not None:
+            return self._interaction_callback(self, job, config)
         else:
-            result = self._just_run_command(job, config)
-            # XXX: make outcome writable
-            result._data['outcome'] = self._outcome_callback()
-            return result
+            return DiskJobResult({'outcome': IJobResult.OUTCOME_UNDECIDED})
 
     _plugin_user_interact = _plugin_manual
     _plugin_user_verify = _plugin_manual
 
     def _just_run_command(self, job, config):
         # Run the embedded command
-        return_code, io_log = self._run_command(job, config)
+        start_time = time.time()
+        return_code, record_path = self._run_command(job, config)
+        execution_duration = time.time() - start_time
         # Convert the return of the command to the outcome of the job
         if return_code == 0:
-            outcome = JobResult.OUTCOME_PASS
+            outcome = IJobResult.OUTCOME_PASS
         else:
-            outcome = JobResult.OUTCOME_FAIL
+            outcome = IJobResult.OUTCOME_FAIL
         # Create a result object and return it
-        return JobResult({
-            'job': job,
+        return DiskJobResult({
             'outcome': outcome,
             'return_code': return_code,
-            'io_log': io_log
+            'io_log_filename': record_path,
+            'execution_duration': execution_duration
         })
 
     def _get_script_env(self, job, config=None, only_changes=False):
@@ -296,7 +291,9 @@
         # Use non-internationalized environment
         env['LANG'] = 'C.UTF-8'
         # Allow the job to customize anything
-        job.modify_execution_environment(env, self._session_dir, config)
+        job.modify_execution_environment(env, self._session_dir,
+                                         self._checkbox_data_dir,
+                                         config)
         # If a differential environment is requested return only the subset
         # that has been altered.
         #
@@ -341,81 +338,108 @@
         cmd += ['bash', '-c', job.command]
         return cmd
 
-    def _run_command(self, job, config):
-        """
-        Run the shell command associated with the specified job.
-
-        Returns a tuple (return_code, io_log)
-        """
-        # Bail early if there is nothing do do
-        if job.command is None:
-            return None, ()
+    def _prepare_io_handling(self, job, config):
         ui_io_delegate = self._command_io_delegate
         # If there is no UI delegate specified create a simple
         # delegate that logs all output to the console
         if ui_io_delegate is None:
             ui_io_delegate = FallbackCommandOutputPrinter(job.name)
+        # Compute a shared base filename for all logging activity associated
+        # with this job (aka: the slug)
+        slug = slugify(job.name)
         # Create a delegate that writes all IO to disk
-        slug = slugify(job.name)
         output_writer = CommandOutputWriter(
-            stdout_path=os.path.join(self._jobs_io_log_dir,
-                                     "{}.stdout".format(slug)),
-            stderr_path=os.path.join(self._jobs_io_log_dir,
-                                     "{}.stderr".format(slug)))
-        # Create a delegate that builds a log of all IO
-        io_log_builder = CommandIOLogBuilder()
+            stdout_path=os.path.join(
+                self._jobs_io_log_dir, "{}.stdout".format(slug)),
+            stderr_path=os.path.join(
+                self._jobs_io_log_dir, "{}.stderr".format(slug)))
+        # Create a delegate for converting regular IO to IOLogRecords.
+        # It takes no arguments as all the interesting stuff is added as a
+        # signal listener.
+        io_log_gen = IOLogRecordGenerator()
         # Create the delegate for routing IO
         #
-        #
         # Split the stream of data into three parts (each part is expressed as
         # an element of extcmd.Chain()).
         #
         # Send the first copy of the data through bytes->text decoder and
         # then to the UI delegate. This cold be something provided by the
-        # higher level caller or the default CommandOutputLogger.
+        # higher level caller or the default FallbackCommandOutputPrinter.
         #
-        # Send the second copy of the data to the _IOLogBuilder() instance that
-        # just concatenates subsequent bytes into neat time-stamped records.
+        # Send the second copy of the data to the IOLogRecordGenerator instance
+        # that converts raw bytes into neat IOLogRecord objects. This generator
+        # has a on_new_record signal that can be used to do stuff when a new
+        # record is generated.
         #
         # Send the third copy to the output writer that writes everything to
         # disk.
-        delegate = extcmd.Chain([
-            ui_io_delegate,
-            io_log_builder,
-            output_writer])
+        delegate = extcmd.Chain([ui_io_delegate, io_log_gen, output_writer])
         logger.debug("job[%s] extcmd delegate: %r", job.name, delegate)
+        # Attach listeners to io_log_gen (the IOLogRecordGenerator instance)
+        # One listener appends each record to an array
+        return delegate, io_log_gen
+
+    def _run_command(self, job, config):
+        """
+        Run the shell command associated with the specified job.
+
+        :returns: (return_code, record_path) where return_code is the number
+        returned by the exiting child process while record_path is a pathname
+        of a gzipped content readable with :class:`IOLogRecordReader`
+        """
+        # Bail early if there is nothing do do
+        if job.command is None:
+            return None, ()
+        # Create an equivalent of the CHECKBOX_DATA directory used by
+        # some jobs to store logs and other files that may later be used
+        # by other jobs.
+        self._checkbox_data_dir = os.path.join(
+            self._session_dir, "CHECKBOX_DATA")
+        if not os.path.isdir(self._checkbox_data_dir):
+            os.makedirs(self._checkbox_data_dir)
+        # Get an extcmd delegate for observing all the IO the way we need
+        delegate, io_log_gen = self._prepare_io_handling(job, config)
         # Create a subprocess.Popen() like object that uses the delegate
         # system to observe all IO as it occurs in real time.
-        logging_popen = extcmd.ExternalCommandWithDelegate(delegate)
-        # Start the process and wait for it to finish getting the
-        # result code. This will actually call a number of callbacks
-        # while the process is running. It will also spawn a few
-        # threads although all callbacks will be fired from a single
-        # thread (which is _not_ the main thread)
-        logger.debug("job[%s] starting command: %s", job.name, job.command)
+        extcmd_popen = extcmd.ExternalCommandWithDelegate(delegate)
+        # Stream all IOLogRecord entries to disk
+        record_path = os.path.join(
+            self._jobs_io_log_dir, "{}.record.gz".format(
+                slugify(job.name)))
+        with gzip.open(record_path, mode='wb') as gzip_stream, \
+                io.TextIOWrapper(
+                    gzip_stream, encoding='UTF-8') as record_stream:
+            writer = IOLogRecordWriter(record_stream)
+            io_log_gen.on_new_record.connect(writer.write_record)
+            # Start the process and wait for it to finish getting the
+            # result code. This will actually call a number of callbacks
+            # while the process is running. It will also spawn a few
+            # threads although all callbacks will be fired from a single
+            # thread (which is _not_ the main thread)
+            logger.debug("job[%s] starting command: %s", job.name, job.command)
+            # Run the job command using extcmd
+            return_code = self._run_extcmd(job, config, extcmd_popen)
+            logger.debug(
+                "job[%s] command return code: %r", job.name, return_code)
+        return return_code, record_path
+
+    def _run_extcmd(self, job, config, extcmd_popen):
+        # If we need to switch user use pkexec for that
+        # otherwise just run the command directly.
         if job.user is not None:
-            if job._checkbox._mode == 'src':
+            # Use regular pkexec in src mode (when the provider is in the
+            # source tree), or basically when working from trunk. Use the
+            # trusted launcher otherwise (to get all the pkexec policy applied)
+            if isinstance(job._provider, CheckBoxSrcProvider):
                 cmd = self._get_command_src(job, config)
             else:
                 cmd = self._get_command_trusted(job, config)
             cmd = ['pkexec', '--user', job.user] + cmd
-            logger.debug("job[%s] executing %r", job.name, cmd)
-            return_code = logging_popen.call(cmd)
+            env = None
         else:
             # XXX: sadly using /bin/sh results in broken output
             # XXX: maybe run it both ways and raise exceptions on differences?
             cmd = ['bash', '-c', job.command]
-            logger.debug("job[%s] executing %r", job.name, cmd)
-            return_code = logging_popen.call(
-                cmd, env=self._get_script_env(job, config))
-        logger.debug("job[%s] command return code: %r",
-                     job.name, return_code)
-        # XXX: Perhaps handle process dying from signals here
-        # When the process is killed proc.returncode is not set
-        # and another (cannot remember now) attribute is set
-        fjson = os.path.join(self._jobs_io_log_dir, "{}.json".format(slug))
-        with open(fjson, "wt") as stream:
-            io_log_write(io_log_builder.io_log, stream)
-            stream.flush()
-            os.fsync(stream.fileno())
-        return return_code, fjson
+            env = self._get_script_env(job, config)
+        logger.debug("job[%s] executing %r with env %r", job.name, cmd, env)
+        return extcmd_popen.call(cmd, env=env)

=== modified file 'plainbox/plainbox/impl/secure/checkbox_trusted_launcher.py'
--- plainbox/plainbox/impl/secure/checkbox_trusted_launcher.py	2013-06-22 06:06:21 +0000
+++ plainbox/plainbox/impl/secure/checkbox_trusted_launcher.py	2013-09-13 17:12:45 +0000
@@ -42,56 +42,69 @@
     Base Job definition class.
     """
 
+    def __init__(self, data):
+        self.__data = data
+        self._checksum = None
+
+    @property
+    def _data(self):
+        raise AttributeError("Hey, poking at _data is forbidden!")
+
+    def get_record_value(self, name, default=None):
+        """
+        Obtain the value of the specified record attribute
+        """
+        return self.__data.get(name, default)
+
     @property
     def plugin(self):
-        return self.__getattr__('plugin')
+        return self.get_record_value('plugin')
 
     @property
     def command(self):
-        try:
-            return self.__getattr__('command')
-        except AttributeError:
-            return None
+        return self.get_record_value('command')
 
     @property
     def environ(self):
-        try:
-            return self.__getattr__('environ')
-        except AttributeError:
-            return None
+        return self.get_record_value('environ')
 
     @property
     def user(self):
-        try:
-            return self.__getattr__('user')
-        except AttributeError:
-            return None
-
-    def __init__(self, data):
-        self._data = data
-
-    def __getattr__(self, attr):
-        if attr in self._data:
-            return self._data[attr]
-        raise AttributeError(attr)
+        return self.get_record_value('user')
 
     def get_checksum(self):
         """
         Compute a checksum of the job definition.
 
-        This method can be used to compute the checksum of the canonical form
-        of the job definition.  The canonical form is the UTF-8 encoded JSON
+        """
+        return self.checksum
+
+    @property
+    def checksum(self):
+        """
+        Checksum of the job definition.
+
+        This property can be used to compute the checksum of the canonical form
+        of the job definition. The canonical form is the UTF-8 encoded JSON
         serialization of the data that makes up the full definition of the job
         (all keys and values). The JSON serialization uses no indent and
         minimal separators.
 
         The checksum is defined as the SHA256 hash of the canonical form.
         """
+        if self._checksum is None:
+            self._checksum = self._compute_checksum()
+        return self._checksum
+
+    def _compute_checksum(self):
+        """
+        Compute the value for :meth:`get_checksum()` and :attr:`checksum`.
+        """
         # Ideally we'd use simplejson.dumps() with sorted keys to get
         # predictable serialization but that's another dependency. To get
         # something simple that is equally reliable, just sort all the keys
         # manually and ask standard json to serialize that..
-        sorted_data = collections.OrderedDict(sorted(self._data.items()))
+        sorted_data = collections.OrderedDict(sorted(self.__data.items()))
         # Compute the canonical form which is arbitrarily defined as sorted
         # json text with default indent and separator settings.
         canonical_form = json.dumps(

=== added file 'plainbox/plainbox/impl/service.py'
--- plainbox/plainbox/impl/service.py	1970-01-01 00:00:00 +0000
+++ plainbox/plainbox/impl/service.py	2013-09-13 17:12:45 +0000
@@ -0,0 +1,1058 @@
+# This file is part of Checkbox.
+#
+# Copyright 2013 Canonical Ltd.
+# Written by:
+#   Zygmunt Krynicki <zygmunt.krynicki@xxxxxxxxxxxxx>
+#
+# Checkbox is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Checkbox is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Checkbox.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+:mod:`plainbox.impl.service` -- DBus service for PlainBox
+=========================================================
+"""
+
+from threading import Thread
+import collections
+import functools
+import itertools
+import logging
+import random
+
+try:
+    from inspect import Signature
+except ImportError:
+    try:
+        from plainbox.vendor.funcsigs import Signature
+    except ImportError:
+        raise SystemExit("DBus parts require 'funcsigs' from pypi.")
+from plainbox.vendor import extcmd
+
+from plainbox.impl import dbus
+from plainbox.impl.dbus import OBJECT_MANAGER_IFACE
+from plainbox.impl.result import DiskJobResult
+from plainbox.impl.runner import JobRunner
+from plainbox.impl.signal import Signal
+
+
+logger = logging.getLogger("plainbox.service")
+
+_BASE_IFACE = "com.canonical.certification."
+
+SERVICE_IFACE = _BASE_IFACE + "PlainBox.Service1"
+SESSION_IFACE = _BASE_IFACE + "PlainBox.Session1"
+PROVIDER_IFACE = _BASE_IFACE + "PlainBox.Provider1"
+JOB_IFACE = _BASE_IFACE + "PlainBox.JobDefinition1"
+JOB_RESULT_IFACE = _BASE_IFACE + "PlainBox.Result1"
+JOB_STATE_IFACE = _BASE_IFACE + "PlainBox.JobState1"
+WHITELIST_IFACE = _BASE_IFACE + "PlainBox.WhiteList1"
+CHECKBOX_JOB_IFACE = _BASE_IFACE + "CheckBox.JobDefinition1"
+RUNNING_JOB_IFACE = _BASE_IFACE + "PlainBox.RunningJob1"
+
+
+class PlainBoxObjectWrapper(dbus.service.ObjectWrapper):
+    """
+    Wrapper for exporting PlainBox object over DBus.
+
+    Allows to keep the python object logic separate from the DBus counterpart.
+    Has a set of utility methods to publish the object and any children objects
+    to DBus.
+    """
+
+    # Use a different logger for the translate decorator.
+    # This is just so that we don't spam people that want to peek
+    # at the service module.
+    _logger = logging.getLogger("plainbox.dbus.service.translate")
+
+    def __init__(self,
+                 native,
+                 conn=None, object_path=None, bus_name=None,
+                 **kwargs):
+        super(PlainBoxObjectWrapper, self).__init__(
+            native, conn, object_path, bus_name)
+        logger.debug("Created DBus wrapper for: %r", self.native)
+        self.__shared_initialize__(**kwargs)
+
+    def __shared_initialize__(self, **kwargs):
+        """
+        Optional initialize method that can use any unused keyword arguments
+        that were originally passed to __init__(). This makes it far easier to
+        subclass as __init__() is rather complicated.
+
+        Inspired by STANDARD GENERIC FUNCTION SHARED-INITIALIZE
+        See hyperspec page: http://clhs.lisp.se/Body/f_shared.htm
+        """
+
+    def _get_preferred_object_path(self):
+        """
+        Return the preferred object path of this object on DBus
+        """
+        return "/plainbox/{}/{}".format(
+            self.native.__class__.__name__, id(self.native))
+
+    def publish_self(self, connection):
+        """
+        Publish this object to the connection
+        """
+        object_path = self._get_preferred_object_path()
+        self.add_to_connection(connection, object_path)
+        logger.debug("Published DBus wrapper for %r as %s",
+                     self.native, object_path)
+
+    def publish_related_objects(self, connection):
+        """
+        Publish this and any other objects to the connection
+
+        Do not send ObjectManager events, just register any additional objects
+        on the bus. By default only the object itself is published but
+        collection managers are expected to publish all of the children here.
+        """
+        self.publish_self(connection)
+
+    def publish_managed_objects(self):
+        """
+        This method is specific to ObjectManager, it basically adds children
+        and sends the right events. This is a separate stage so that the whole
+        hierarchy can first put all of the objects on the bus and then tell the
+        world about it in one big signal message.
+        """
+
+    @classmethod
+    def translate(cls, func):
+        """
+        Decorator for Wrapper methods.
+
+        The decorated method does not need to manually lookup objects when the
+        caller (across DBus) passes an object path. Type information is
+        provided using parameter annotations.
+
+        The annotation accepts DBus type expressions (but in practice it is
+        very limited). For the moment it cannot infer the argument types from
+        the decorator for dbus.service.method.
+        """
+        sig = Signature.from_function(func)
+
+        def translate_o(object_path):
+            try:
+                obj = cls.find_object_by_path(object_path)
+            except KeyError as exc:
+                raise dbus.exceptions.DBusException((
+                    "object path {} does not designate an existing"
+                    " object").format(exc))
+            else:
+                return obj.native
+
+        def translate_ao(object_path_list):
+            try:
+                obj_list = [cls.find_object_by_path(object_path)
+                            for object_path in object_path_list]
+            except KeyError as exc:
+                raise dbus.exceptions.DBusException((
+                    "object path {} does not designate an existing"
+                    " object").format(exc))
+            else:
+                return [obj.native for obj in obj_list]
+
+        def translate_return_o(obj):
+            try:
+                return cls.find_wrapper_by_native(obj)
+            except KeyError:
+                raise dbus.exceptions.DBusException(
+                    "(o) internal error, unable to lookup object wrapper")
+
+        def translate_return_ao(object_list):
+            try:
+                return dbus.types.Array([
+                    cls.find_wrapper_by_native(obj)
+                    for obj in object_list
+                ], signature='o')
+            except KeyError:
+                raise dbus.exceptions.DBusException(
+                    "(ao) internal error, unable to lookup object wrapper")
+
+        def translate_return_a_brace_so_brace(mapping):
+            try:
+                return dbus.types.Dictionary({
+                    key: cls.find_wrapper_by_native(value)
+                    for key, value in mapping.items()
+                }, signature='so')
+            except KeyError:
+                raise dbus.exceptions.DBusException(
+                    "(a{so}) internal error, unable to lookup object wrapper")
+
+        @functools.wraps(func)
+        def wrapper(*args, **kwargs):
+            bound = sig.bind(*args, **kwargs)
+            cls._logger.debug(
+                "wrapped %s called with %s", func, bound.arguments)
+            for param in sig.parameters.values():
+                if param.annotation is Signature.empty:
+                    pass
+                elif param.annotation == 'o':
+                    object_path = bound.arguments[param.name]
+                    bound.arguments[param.name] = translate_o(object_path)
+                elif param.annotation == 'ao':
+                    object_path_list = bound.arguments[param.name]
+                    bound.arguments[param.name] = translate_ao(
+                        object_path_list)
+                elif param.annotation in ('s', 'as'):
+                    strings = bound.arguments[param.name]
+                    bound.arguments[param.name] = strings
+                else:
+                    raise ValueError(
+                        "unsupported translation {!r}".format(
+                            param.annotation))
+            cls._logger.debug(
+                "unwrapped %s called with %s", func, bound.arguments)
+            retval = func(**bound.arguments)
+            cls._logger.debug("unwrapped %s returned %r", func, retval)
+            if sig.return_annotation is Signature.empty:
+                pass
+            elif sig.return_annotation == 'o':
+                retval = translate_return_o(retval)
+            elif sig.return_annotation == 'ao':
+                retval = translate_return_ao(retval)
+            elif sig.return_annotation == 'a{so}':
+                retval = translate_return_a_brace_so_brace(retval)
+            else:
+                raise ValueError(
+                    "unsupported translation {!r}".format(
+                        sig.return_annotation))
+            cls._logger.debug("wrapped %s returned  %r", func, retval)
+            return retval
+        return wrapper
+
+
+class JobDefinitionWrapper(PlainBoxObjectWrapper):
+    """
+    Wrapper for exposing JobDefinition objects on DBus
+    """
+
+    HIDDEN_INTERFACES = frozenset([
+        OBJECT_MANAGER_IFACE,
+    ])
+
+    # Some internal helpers
+
+    def __shared_initialize__(self, **kwargs):
+        self._checksum = self.native.get_checksum()
+
+    def _get_preferred_object_path(self):
+        # TODO: this clashes with providers, maybe use a random ID instead
+        return "/plainbox/job/{}".format(self._checksum)
+
+    # PlainBox properties
+
+    @dbus.service.property(dbus_interface=JOB_IFACE, signature="s")
+    def name(self):
+        return self.native.name
+
+    @dbus.service.property(dbus_interface=JOB_IFACE, signature="s")
+    def description(self):
+        return self.native.description or ""
+
+    @dbus.service.property(dbus_interface=JOB_IFACE, signature="s")
+    def checksum(self):
+        # This is a bit expensive to compute so let's keep it cached
+        return self._checksum
+
+    @dbus.service.property(dbus_interface=JOB_IFACE, signature="s")
+    def requires(self):
+        return self.native.requires or ""
+
+    @dbus.service.property(dbus_interface=JOB_IFACE, signature="s")
+    def depends(self):
+        return self.native.depends or ""
+
+    @dbus.service.property(dbus_interface=JOB_IFACE, signature="d")
+    def estimated_duration(self):
+        return self.native.estimated_duration or -1
+
+    # PlainBox methods
+
+    @dbus.service.method(dbus_interface=JOB_IFACE,
+                         in_signature='', out_signature='as')
+    def GetDirectDependencies(self):
+        return self.native.get_direct_dependencies()
+
+    @dbus.service.method(dbus_interface=JOB_IFACE,
+                         in_signature='', out_signature='as')
+    def GetResourceDependencies(self):
+        return self.native.get_resource_dependencies()
+
+    @dbus.service.method(dbus_interface=CHECKBOX_JOB_IFACE,
+                         in_signature='', out_signature='as')
+    def GetEnvironSettings(self):
+        return self.native.get_environ_settings()
+
+    # CheckBox properties
+
+    @dbus.service.property(dbus_interface=CHECKBOX_JOB_IFACE, signature="s")
+    def plugin(self):
+        return self.native.plugin
+
+    @dbus.service.property(dbus_interface=CHECKBOX_JOB_IFACE, signature="s")
+    def via(self):
+        return self.native.via or ""
+
+    @dbus.service.property(
+        dbus_interface=CHECKBOX_JOB_IFACE, signature="(suu)")
+    def origin(self):
+        if self.native.origin is not None:
+            return dbus.Struct([
+                self.native.origin.filename,
+                self.native.origin.line_start,
+                self.native.origin.line_end
+            ], signature="suu")
+        else:
+            return dbus.Struct(["", 0, 0], signature="suu")
+
+    @dbus.service.property(dbus_interface=CHECKBOX_JOB_IFACE, signature="s")
+    def command(self):
+        return self.native.command or ""
+
+    @dbus.service.property(dbus_interface=CHECKBOX_JOB_IFACE, signature="s")
+    def environ(self):
+        return self.native.environ or ""
+
+    @dbus.service.property(dbus_interface=CHECKBOX_JOB_IFACE, signature="s")
+    def user(self):
+        return self.native.user or ""
+
+
+class WhiteListWrapper(PlainBoxObjectWrapper):
+    """
+    Wrapper for exposing WhiteList objects on DBus
+    """
+
+    HIDDEN_INTERFACES = frozenset([
+        OBJECT_MANAGER_IFACE,
+    ])
+
+    # Some internal helpers
+
+    def _get_preferred_object_path(self):
+        # TODO: this clashes with providers, maybe use a random ID instead
+        return "/plainbox/whitelist/{}".format(self.native.name.replace("-", "_"))
+
+    # Value added
+
+    @dbus.service.property(dbus_interface=WHITELIST_IFACE, signature="s")
+    def name(self):
+        """
+        name of this whitelist
+        """
+        return self.native.name or ""
+
+    @dbus.service.method(
+        dbus_interface=WHITELIST_IFACE, in_signature='', out_signature='as')
+    def GetPatternList(self):
+        """
+        Get a list of regular expression patterns that make up this whitelist
+        """
+        return [qualifier.pattern_text
+                for qualifier in self.native.inclusive_qualifier_list]
+
+    @dbus.service.method(
+        dbus_interface=WHITELIST_IFACE, in_signature='o', out_signature='b')
+    @PlainBoxObjectWrapper.translate
+    def Designates(self, job: 'o'):
+        return self.native.designates(job)
+
+
+class JobResultWrapper(PlainBoxObjectWrapper):
+    """
+    Wrapper for exposing JobResult objects on DBus
+    """
+
+    HIDDEN_INTERFACES = frozenset([
+        OBJECT_MANAGER_IFACE,
+    ])
+
+    def __shared_initialize__(self, **kwargs):
+        self.native.on_comments_changed.connect(self.on_comments_changed)
+        self.native.on_outcome_changed.connect(self.on_outcome_changed)
+
+    def __del__(self):
+        self.native.on_comments_changed.disconnect(self.on_comments_changed)
+        self.native.on_outcome_changed.disconnect(self.on_outcome_changed)
+
+    # Value added
+
+    @dbus.service.property(dbus_interface=JOB_RESULT_IFACE, signature="s")
+    def outcome(self):
+        """
+        outcome of the job
+
+        The result is one of a set of fixed strings
+        """
+        # XXX: it would be nice if we could not do this remapping.
+        return self.native.outcome or "none"
+
+    @outcome.setter
+    def outcome(self, new_value):
+        """
+        set outcome of the job to a new value
+        """
+        # XXX: it would be nice if we could not do this remapping.
+        if new_value == "none":
+            new_value = None
+        self.native.outcome = new_value
+
+    @Signal.define
+    def on_outcome_changed(self, old, new):
+        logger.debug("on_outcome_changed(%r, %r)", old, new)
+        self.PropertiesChanged(JOB_RESULT_IFACE, {
+            self.__class__.outcome._dbus_property: new
+        }, [])
+
+    @dbus.service.property(dbus_interface=JOB_RESULT_IFACE, signature="d")
+    def execution_duration(self):
+        """
+        The amount of time in seconds it took to run this jobs command.
+
+        :returns:
+            The value of execution_duration or -1.0 if the command was not
+            executed yet.
+        """
+        execution_duration = self.native.execution_duration
+        if execution_duration is None:
+            return -1.0
+        else:
+            return execution_duration
+
+    @dbus.service.property(dbus_interface=JOB_RESULT_IFACE, signature="v")
+    def return_code(self):
+        """
+        return code of the called program
+        """
+        value = self.native.return_code
+        if value is None:
+            return ""
+        else:
+            return value
+
+    # comments are settable, useful thing that
+
+    @dbus.service.property(dbus_interface=JOB_RESULT_IFACE, signature="s")
+    def comments(self):
+        """
+        Comment added by the operator
+        """
+        return self.native.comments or ""
+
+    @comments.setter
+    def comments(self, value):
+        self.native.comments = value
+
+    @Signal.define
+    def on_comments_changed(self, old, new):
+        logger.debug("on_comments_changed(%r, %r)", old, new)
+        self.PropertiesChanged(JOB_RESULT_IFACE, {
+            self.__class__.comments._dbus_property: new
+        }, [])
+
+    @dbus.service.property(
+        dbus_interface=JOB_RESULT_IFACE, signature="a(dsay)")
+    def io_log(self):
+        """
+        The input-output log.
+
+        Contains a record of all of the output (actually,
+        it has no input logs) that was sent by the called program.
+
+        The format is: array<struct<double, string, array<bytes>>>
+        """
+        return dbus.types.Array(self.native.get_io_log(), signature="(dsay)")
+
+
+class JobStateWrapper(PlainBoxObjectWrapper):
+    """
+    Wrapper for exposing JobState objects on DBus
+    """
+
+    HIDDEN_INTERFACES = frozenset([
+        OBJECT_MANAGER_IFACE,
+    ])
+
+    def __shared_initialize__(self, **kwargs):
+        self._result_wrapper = JobResultWrapper(self.native.result)
+
+    def publish_related_objects(self, connection):
+        super(JobStateWrapper, self).publish_related_objects(connection)
+        self._result_wrapper.publish_related_objects(connection)
+
+    # Value added
+
+    @dbus.service.method(
+        dbus_interface=JOB_STATE_IFACE, in_signature='', out_signature='b')
+    def CanStart(self):
+        """
+        Quickly check if the associated job can run right now.
+        """
+        return self.native.can_start()
+
+    @dbus.service.method(
+        dbus_interface=JOB_STATE_IFACE, in_signature='', out_signature='s')
+    def GetReadinessDescription(self):
+        """
+        Get a human readable description of the current readiness state
+        """
+        return self.native.get_readiness_description()
+
+    @dbus.service.property(dbus_interface=JOB_STATE_IFACE, signature='o')
+    @PlainBoxObjectWrapper.translate
+    def job(self) -> 'o':
+        """
+        Job associated with this state
+        """
+        return self.native.job
+
+    @dbus.service.property(dbus_interface=JOB_STATE_IFACE, signature='o')
+    @PlainBoxObjectWrapper.translate
+    def result(self) -> 'o':
+        """
+        Result of running the associated job
+        """
+        return self.native.result
+
+    @Signal.define
+    def on_result_changed(self):
+        result_wrapper = JobResultWrapper(self.native.result)
+        try:
+            result_wrapper.publish_related_objects(self.connection)
+        except KeyError:
+            logger.warning("Result already exists for: %r", result_wrapper)
+            self.PropertiesChanged(JOB_STATE_IFACE, {
+                self.__class__.result._dbus_property:
+                result_wrapper._get_preferred_object_path()
+            }, [])
+        else:
+            self.PropertiesChanged(JOB_STATE_IFACE, {
+                self.__class__.result._dbus_property: result_wrapper
+            }, [])
+
+    @dbus.service.property(dbus_interface=JOB_STATE_IFACE, signature='a(isss)')
+    def readiness_inhibitor_list(self):
+        """
+        The list of readiness inhibitors of the associated job
+
+        The list is represented as an array of structures. Each structure
+        has a integer and two strings. The integer encodes the cause
+        of inhibition.
+
+        Cause may have one of the following values:
+
+        0 - UNDESIRED:
+            This job was not selected to run in this session
+
+        1 - PENDING_DEP:
+           This job depends on another job which was not started yet
+
+        2 - FAILED_DEP:
+            This job depends on another job which was started and failed
+
+        3 - PENDING_RESOURCE:
+            This job has a resource requirement expression that uses a resource
+            produced by another job which was not started yet
+
+        4 - FAILED_RESOURCE:
+            This job has a resource requirement that evaluated to a false value
+
+        The next two strings are the name of the related job and the name
+        of the related expression. Either may be empty.
+        """
+        return dbus.types.Array([
+            (inhibitor.cause,
+             inhibitor.cause_name,
+             (inhibitor.related_job.name
+              if inhibitor.related_job is not None else ""),
+             (inhibitor.related_expression.text
+              if inhibitor.related_expression is not None else ""))
+            for inhibitor in self.native.readiness_inhibitor_list
+        ], signature="(isss)")
+
+
+class SessionWrapper(PlainBoxObjectWrapper):
+    """
+    Wrapper for exposing SessionState objects on DBus
+    """
+
+    HIDDEN_INTERFACES = frozenset()
+
+    # XXX: those will change to SessionManager later and session state will be
+    # a part of that (along with session storage)
+
+    def __shared_initialize__(self, **kwargs):
+        self._job_state_map_wrapper = {
+            job_name: JobStateWrapper(job_state)
+            for job_name, job_state in self.native.job_state_map.items()
+        }
+
+    def publish_related_objects(self, connection):
+        self.publish_self(connection)
+        for job_state in self._job_state_map_wrapper.values():
+            job_state.publish_related_objects(connection)
+
+    def publish_managed_objects(self):
+        wrapper_list = list(self._iter_wrappers())
+        self.add_managed_object_list(wrapper_list)
+        for wrapper in wrapper_list:
+            wrapper.publish_managed_objects()
+
+    def _iter_wrappers(self):
+        return itertools.chain(
+            # Get all of the JobResult wrappers
+            self._job_state_map_wrapper.values(),
+            # And all the JobDefinition wrappers
+            [self.find_wrapper_by_native(job_state_wrapper.native.result)
+             for job_state_wrapper in self._job_state_map_wrapper.values()])
+
+    def check_and_wrap_new_jobs(self):
+        # Since new jobs may have been added, we need to create and publish
+        # new JobDefinitionWrappers for them.
+        for job in self.native.job_list:
+            key = id(job)
+            if not key in self._native_id_to_wrapper_map:
+                logger.debug("Creating a new JobDefinitionWrapper for %s",
+                             job.name)
+                wrapper = JobDefinitionWrapper(job)
+                wrapper.publish_related_objects(self.connection)
+                #Newly created jobs also need a JobState.
+                #Note that publishing the JobState also should automatically
+                #publish the MemoryJobResult.
+                self._job_state_map_wrapper[job.name] = JobStateWrapper(
+                        self.native.job_state_map[job.name])
+                self._job_state_map_wrapper[job.name].publish_related_objects(
+                        self.connection)
+
+    # Value added
+
+    @dbus.service.method(
+        dbus_interface=SESSION_IFACE, in_signature='ao', out_signature='as')
+    @PlainBoxObjectWrapper.translate
+    def UpdateDesiredJobList(self, desired_job_list: 'ao'):
+        logger.info("UpdateDesiredJobList(%r)", desired_job_list)
+        problem_list = self.native.update_desired_job_list(desired_job_list)
+        # Do the necessary housekeeping for any new jobs
+        self.check_and_wrap_new_jobs()
+        # TODO: map each problem into a structure (check which fields should be
+        # presented). Document this in the docstring.
+        return [str(problem) for problem in problem_list]
+
+    @dbus.service.method(
+        dbus_interface=SESSION_IFACE, in_signature='oo', out_signature='')
+    @PlainBoxObjectWrapper.translate
+    def UpdateJobResult(self, job: 'o', result: 'o'):
+        self.native.update_job_result(job, result)
+
+    @dbus.service.method(
+        dbus_interface=SESSION_IFACE, in_signature='', out_signature='(dd)')
+    def GetEstimatedDuration(self):
+        automated, manual = self.native.get_estimated_duration()
+        if automated is None:
+            automated = -1.0
+        if manual is None:
+            manual = -1.0
+        return automated, manual
+
+    @dbus.service.method(
+        dbus_interface=SESSION_IFACE, in_signature='', out_signature='s')
+    def PreviousSessionFile(self):
+        previous_session_file = self.native.previous_session_file()
+        if previous_session_file:
+            return previous_session_file
+        else:
+            return ''
+
+    @dbus.service.method(
+        dbus_interface=SESSION_IFACE, in_signature='', out_signature='')
+    def Resume(self):
+        #FIXME TODO XXX KLUDGE ALERT
+        #The way we replace restored job definitions with the ones from the
+        #freshly-initialized session is extremely kludgy. This implementation
+        #needs to be revisited at some point and made cleaner. It was done this
+        #way to unblock usage of this API under time pressure.
+        #
+        #First, we take a snapshot of job definitions from the "pristine"
+        #session. These already have JobStateWrappers over DBus pointing to
+        #the ones created when the provider was exposed.
+        old_jobs = [state.job for state in self.native.job_state_map.values()]
+        #Now, native resume. This is the only non-kludgy line in this method.
+        self.native.resume()
+        #After the native resume completes, we need to "synchronize"
+        #the new job_list and job_state_map over DBus. This is very similar
+        # to what we do
+        #when adding jobs from a local job, with the exception that here,
+        #*all* the jobs need a JobStateWrapper because since they weren't
+        #known when the session was created, they don't have one yet.
+        # Also, we need to take the jobs as contained in the job_state_map,
+        #rather than job_list, otherwise they won't point to the correct
+        #dbus JobDefinition.
+        #Finally, the KLUDGE is that we look at the job definitions for each
+        #JobState. These were reconstructed from restored session information,
+        #and unfortunately they don't map to the exposed-over-dbus JobDefs.
+        #However, we can't just create the JobDefinition because since they're
+        #exposed over DBus using their unique checksum, trying to expose an
+        #identical JobDefinition will create a clash and a crash.
+        #The solution, then, is to replace each JobState's "job" attribute
+        #with the equivalent job from the old_jobs map. Those *should* be
+        #identical value-wise but point to the correct, already-exposed
+        #JobDefinition and their wrapper.
+        for job_state in self.native.job_state_map.values():
+            job = job_state.job
+            #Find old equivalent of this job
+            if job in old_jobs:
+                 index = old_jobs.index(job)
+                 #Next three statements are for debugging only, they further
+                 #underline the kludgy nature of this section of code.
+                 old_id = id(job)
+                 new_id = id(old_jobs[index])
+                 logger.debug("Replacing object %s with %s for job %s" %
+                              (old_id, new_id, job.name))
+                 job_state.job = old_jobs[index]
+            else:
+                #Here we just create new JobDefinitionWrappers, like we
+                #do in check_and_wrap_new_jobs, in case the session contained
+                #a job whose definition we don't have (i.e. one created by
+                #local jobs). I haven't seen this happen yet.
+                if not id(job) in self._native_id_to_wrapper_map:
+                    logger.debug("Creating a new JobDefinitionWrapper for %s",
+                                 job.name)
+                    wrapper = JobDefinitionWrapper(job)
+                    wrapper.publish_related_objects(self.connection)
+
+            #By here, either job definitions already exist, or they
+            #have been created. Create and publish the corresponding
+            #JobStateWrapper. Note that the JobStates already  had their
+            #'job' attribute pointed to an existing, mapped JobDefinition
+            #object.
+            self._job_state_map_wrapper[job.name] = JobStateWrapper(
+                    self.native.job_state_map[job.name])
+            self._job_state_map_wrapper[job.name].publish_related_objects(
+                    self.connection)
+
+    @dbus.service.method(
+        dbus_interface=SESSION_IFACE, in_signature='', out_signature='')
+    def Clean(self):
+        self.native.clean()
+
+    @dbus.service.method(
+        dbus_interface=SESSION_IFACE, in_signature='', out_signature='')
+    def PersistentSave(self):
+        self.native.persistent_save()
+
+    @dbus.service.property(dbus_interface=SESSION_IFACE, signature='ao')
+    @PlainBoxObjectWrapper.translate
+    def job_list(self) -> 'ao':
+        return self.native.job_list
+
+    # TODO: signal<run_list>
+
+    @dbus.service.property(dbus_interface=SESSION_IFACE, signature='ao')
+    @PlainBoxObjectWrapper.translate
+    def desired_job_list(self) -> 'ao':
+        return self.native.desired_job_list
+
+    # TODO: signal<run_list>
+
+    @dbus.service.property(dbus_interface=SESSION_IFACE, signature='ao')
+    @PlainBoxObjectWrapper.translate
+    def run_list(self) -> 'ao':
+        return self.native.run_list
+
+    # TODO: signal<run_list>
+
+    @dbus.service.property(dbus_interface=SESSION_IFACE, signature='a{so}')
+    @PlainBoxObjectWrapper.translate
+    def job_state_map(self) -> 'a{so}':
+        return self.native.job_state_map
+
+    @Signal.define
+    def on_job_state_map_changed(self):
+        self.PropertiesChanged(SESSION_IFACE, {
+            self.__class__.job_state_map._dbus_property: self.job_state_map
+        }, [])
+
+    @dbus.service.property(dbus_interface=SESSION_IFACE, signature='a{sv}')
+    def metadata(self):
+        return dbus.types.Dictionary({
+            'title': self.native.metadata.title or "",
+            'flags': dbus.types.Array(
+                sorted(self.native.metadata.flags), signature='s'),
+            'running_job_name': self.native.metadata.running_job_name or ""
+        }, signature="sv")
+
+    @metadata.setter
+    def metadata(self, value):
+        self.native.metadata.title = value['title']
+        self.native.metadata.running_job_name = value['running_job_name']
+        self.native.metadata.flags = value['flags']
+
+    # TODO: signal<metadata>
+
+
+class ProviderWrapper(PlainBoxObjectWrapper):
+    """
+    Wrapper for exposing Provider1 objects on DBus
+    """
+
+    HIDDEN_INTERFACES = frozenset()
+
+    def __shared_initialize__(self, **kwargs):
+        self._job_wrapper_list = [
+            JobDefinitionWrapper(job)
+            for job in self.native.get_builtin_jobs()]
+        self._whitelist_wrapper_list = [
+            WhiteListWrapper(whitelist)
+            for whitelist in self.native.get_builtin_whitelists()]
+
+    def _get_preferred_object_path(self):
+        return "/plainbox/provider/{}".format(self.native.name)
+
+    def publish_related_objects(self, connection):
+        super(ProviderWrapper, self).publish_related_objects(connection)
+        wrapper_list = list(self._iter_wrappers())
+        for wrapper in wrapper_list:
+            wrapper.publish_related_objects(connection)
+
+    def publish_managed_objects(self):
+        wrapper_list = list(self._iter_wrappers())
+        self.add_managed_object_list(wrapper_list)
+
+    def _iter_wrappers(self):
+        return itertools.chain(
+            self._job_wrapper_list,
+            self._whitelist_wrapper_list)
+
+    # Value added
+
+    @dbus.service.property(dbus_interface=PROVIDER_IFACE, signature="s")
+    def name(self):
+        """
+        name of this provider
+        """
+        return self.native.name
+
+    @dbus.service.property(dbus_interface=PROVIDER_IFACE, signature="s")
+    def description(self):
+        """
+        description of this provider
+        """
+        return self.native.description
+
+
+class ServiceWrapper(PlainBoxObjectWrapper):
+    """
+    Wrapper for exposing Service objects on DBus
+    """
+
+    HIDDEN_INTERFACES = frozenset()
+
+    # Internal setup stuff
+
+    def __shared_initialize__(self, on_exit, **kwargs):
+        self._on_exit = on_exit
+        self._provider_wrapper_list = [
+            ProviderWrapper(provider)
+            for provider in self.native.provider_list]
+
+    def _get_preferred_object_path(self):
+        return "/plainbox/service1"
+
+    def publish_related_objects(self, connection):
+        super(ServiceWrapper, self).publish_related_objects(connection)
+        for wrapper in self._provider_wrapper_list:
+            wrapper.publish_related_objects(connection)
+
+    def publish_managed_objects(self):
+        # First publish all of our providers
+        self.add_managed_object_list(self._provider_wrapper_list)
+        # Then ask the providers to publish their own objects
+        for wrapper in self._provider_wrapper_list:
+            wrapper.publish_managed_objects()
+
+    # Value added
+
+    @dbus.service.property(dbus_interface=SERVICE_IFACE, signature="s")
+    def version(self):
+        """
+        version of this provider
+        """
+        return self.native.version
+
+    @dbus.service.method(
+        dbus_interface=SERVICE_IFACE, in_signature='', out_signature='')
+    def Exit(self):
+        """
+        Shut down the service and terminate
+        """
+        # TODO: raise exception when job is in progress
+        self._on_exit()
+
+    @dbus.service.method(
+        dbus_interface=SERVICE_IFACE, in_signature='', out_signature='a{sas}')
+    def GetAllExporters(self):
+        """
+        Get all exporters names and their respective options
+        """
+        return self.native.get_all_exporters()
+
+    @dbus.service.method(
+        dbus_interface=SERVICE_IFACE, in_signature='osas', out_signature='s')
+    @PlainBoxObjectWrapper.translate
+    def ExportSession(self, session: 'o', output_format: 's',
+                      option_list: 'as'):
+        return self.native.export_session(session, output_format, option_list)
+
+    @dbus.service.method(
+        dbus_interface=SERVICE_IFACE, in_signature='osass', out_signature='s')
+    @PlainBoxObjectWrapper.translate
+    def ExportSessionToFile(self, session: 'o', output_format: 's',
+                      option_list: 'as', output_file: 's'):
+        return self.native.export_session_to_file(session, output_format, option_list,
+                                          output_file)
+
+    @dbus.service.method(
+        dbus_interface=SERVICE_IFACE, in_signature='ao', out_signature='o')
+    @PlainBoxObjectWrapper.translate
+    def CreateSession(self, job_list: 'ao'):
+        # Create a session
+        session_obj = self.native.create_session(job_list)
+        # Wrap it
+        session_wrp = SessionWrapper(session_obj)
+        # Publish all objects
+        session_wrp.publish_related_objects(self.connection)
+        # Announce the session is there
+        self.add_managed_object(session_wrp)
+        # Announce any session children
+        session_wrp.publish_managed_objects()
+        # Return the session wrapper back
+        return session_wrp
+
+    @dbus.service.method(
+        dbus_interface=SERVICE_IFACE, in_signature='oo', out_signature='')
+    @PlainBoxObjectWrapper.translate
+    def RunJob(self, session: 'o', job: 'o'):
+        running_job_wrp = RunningJob(job, session, conn=self.connection)
+        self.native.run_job(session, job, running_job_wrp)
+
+
+class UIOutputPrinter(extcmd.DelegateBase):
+    """
+    Delegate for extcmd that redirect all output to the UI.
+    """
+
+    def __init__(self, runner):
+        self._lineno = collections.defaultdict(int)
+        self._runner = runner
+
+    def on_line(self, stream_name, line):
+        # FIXME: this is not a line number,
+        # TODO: tie this into existing code in runner.py (the module)
+        self._lineno[stream_name] += 1
+        self._runner.IOLogGenerated(self._lineno[stream_name],
+                                    stream_name, line)
+
+
+class RunningJob(dbus.service.Object):
+    """
+    DBus representation of a running job.
+    """
+
+    def __init__(self, job, session, conn=None, object_path=None,
+                 bus_name=None):
+        if object_path is None:
+            object_path = "/plainbox/jobrunner/{}".format(id(self))
+        self.path = object_path
+        dbus.service.Object.__init__(self, conn, self.path, bus_name)
+        self.job = job
+        self.session = session
+        self.result = {}
+        self.ui_io_delegate = UIOutputPrinter(self)
+
+    @dbus.service.method(
+        dbus_interface=RUNNING_JOB_IFACE, in_signature='', out_signature='')
+    def Kill(self):
+        pass
+
+    @dbus.service.property(dbus_interface=RUNNING_JOB_IFACE, signature="s")
+    def outcome_from_command(self):
+        if self.result.get('return_code') is not None:
+            if self.result.get('return_code') == 0:
+                return "pass"
+            else:
+                return "fail"
+        else:
+            return ""
+
+    @dbus.service.method(
+        dbus_interface=RUNNING_JOB_IFACE, in_signature='ss', out_signature='')
+    def SetOutcome(self, outcome, comments=None):
+        self.result['outcome'] = outcome
+        self.result['comments'] = comments
+        job_result = DiskJobResult(self.result)
+        self.emitJobResultAvailable(self.job, job_result)
+
+    def _command_callback(self, return_code, record_path):
+        self.result['return_code'] = return_code
+        self.result['io_log_filename'] = record_path
+        self.emitAskForOutcomeSignal()
+
+    def _run_command(self, session, job, parent):
+        """
+        Run a Job command in a separate thread
+        """
+        ui_io_delegate = UIOutputPrinter(self)
+        runner = JobRunner(session.session_dir, session.jobs_io_log_dir,
+                           command_io_delegate=ui_io_delegate)
+        return_code, record_path = runner._run_command(job, None)
+        parent._command_callback(return_code, record_path)
+
+    @dbus.service.method(
+        dbus_interface=RUNNING_JOB_IFACE, in_signature='', out_signature='')
+    def RunCommand(self):
+        # FIXME: this thread object leaks, it needs to be .join()ed
+        runner = Thread(target=self._run_command,
+                        args=(self.session, self.job, self))
+        runner.start()
+
+    @dbus.service.signal(
+        dbus_interface=SERVICE_IFACE, signature='dsay')
+    def IOLogGenerated(self, offset, name, data):
+        pass
+
+    # XXX: Try to use PlainBoxObjectWrapper.translate here instead of calling
+    # emitJobResultAvailable to do the translation
+    @dbus.service.signal(
+        dbus_interface=SERVICE_IFACE, signature='oo')
+    def JobResultAvailable(self, job, result):
+        pass
+
+    @dbus.service.signal(
+        dbus_interface=SERVICE_IFACE, signature='o')
+    def AskForOutcome(self, runner):
+        pass
+
+    def emitAskForOutcomeSignal(self, *args):
+        self.AskForOutcome(self.path)
+
+    def emitJobResultAvailable(self, job, result):
+        result_wrapper = JobResultWrapper(result)
+        result_wrapper.publish_related_objects(self.connection)
+        job_path = PlainBoxObjectWrapper.find_wrapper_by_native(job)
+        result_path = PlainBoxObjectWrapper.find_wrapper_by_native(result)
+        self.JobResultAvailable(job_path, result_path)
+
+    def update_job_result_callback(self, job, result):
+        self.emitJobResultAvailable(job, result)

=== added directory 'plainbox/plainbox/impl/session'
=== added file 'plainbox/plainbox/impl/session/__init__.py'
--- plainbox/plainbox/impl/session/__init__.py	1970-01-01 00:00:00 +0000
+++ plainbox/plainbox/impl/session/__init__.py	2013-09-13 17:12:45 +0000
@@ -0,0 +1,95 @@
+# This file is part of Checkbox.
+#
+# Copyright 2012, 2013 Canonical Ltd.
+# Written by:
+#   Zygmunt Krynicki <zygmunt.krynicki@xxxxxxxxxxxxx>
+#
+# Checkbox is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Checkbox is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Checkbox.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+:mod:`plainbox.impl.session` -- session handling
+================================================
+
+Sessions are central state holders and one of the most important classes in
+PlainBox. Since they are all named alike it's a bit hard to find what the
+actual responsibilities are. Here's a small shortcut, do read the description
+of each module and class for additional details though.
+
+
+:class:`SessionState`
+
+    This a class that holds all of the state and program logic. It
+    :class:`SessionManager` is a class that couples :class:`SessionState` and
+    :class:`SessionStorage`. It has the methods required to alter the state by
+    introducing additional jobs or results. It's main responsibility is to keep
+    track of all of the jobs, their results, if they are runnable or not
+    (technically what is preventing them from being runnable) and to compute
+    the order of execution that can satisfy all of the dependencies.
+
+    It holds a number of references to other pieces of PlainBox (jobs,
+    resources and other things) but one thing stands out. This class holds
+    references to a number of :class:`JobState` objects that couple a
+    :class:`JobResult` and :class:`JobDefinition` together.
+
+:class:`SessionStateLegacyAPI`
+
+    This class is a subclass of SessionState with additional methods for
+    suspend and resume. It should not be used in new applications and it will
+    be removed eventually, once the new manager-based API settles in. There are
+    two classes that actually implement this API, one based on the original
+    implementation and another one based on the new implementation. The data
+    they create is not compatible with each other. Currently the original
+    implementation is used. This will change very soon.
+
+:class:`JobState`
+
+    A coupling class between :class:`JobDefinition` and :class:`JobResult`.
+    This class also knows why a job cannot be started in a particular session,
+    by maintaining a set of "inhibitors" that prevent it from being runnable.
+    The actual inhibitors are managed by :class:`SessionState`.
+
+:class:`SessionStorage`
+
+    This class knows how properly to save and load bytes and manages a
+    directory for all the filesystem entries associated with a particular
+    session.  It holds no references to a session though. Typically the class
+    is not instantiated directly but instead comes from helper methods of
+    :class:`SessionStorageRepository`.
+
+:class:`SessionStorageRepository`
+
+    This class knows how to enumerate possible instances of
+    :class:`SessionStorage` from a given location in the filesystem. It also
+    knows how to obtain a default location using XDG standards.
+"""
+
+__all__ = [
+    'JobReadinessInhibitor',
+    'JobState',
+    'SessionManager',
+    'SessionState',
+    'SessionStateLegacyAPI',
+    'SessionStorage',
+    'SessionStorageRepository',
+    'UndesiredJobReadinessInhibitor',
+]
+
+from plainbox.impl.session.jobs import JobReadinessInhibitor
+from plainbox.impl.session.jobs import JobState
+from plainbox.impl.session.jobs import UndesiredJobReadinessInhibitor
+from plainbox.impl.session.legacy import SessionStateLegacyAPI
+from plainbox.impl.session.manager import SessionManager
+from plainbox.impl.session.state import SessionState
+from plainbox.impl.session.storage import SessionStorage
+from plainbox.impl.session.storage import SessionStorageRepository

=== added file 'plainbox/plainbox/impl/session/jobs.py'
--- plainbox/plainbox/impl/session/jobs.py	1970-01-01 00:00:00 +0000
+++ plainbox/plainbox/impl/session/jobs.py	2013-09-13 17:12:45 +0000
@@ -0,0 +1,290 @@
+# This file is part of Checkbox.
+#
+# Copyright 2012, 2013 Canonical Ltd.
+# Written by:
+#   Zygmunt Krynicki <zygmunt.krynicki@xxxxxxxxxxxxx>
+#
+# Checkbox is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Checkbox is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Checkbox.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+:mod:`plainbox.impl.session.jobs` -- jobs state handling
+========================================================
+
+This module contains a helper class for associating job state within a
+particular session. The :class:`JobState` class holds references to a
+:class:`JobDefinition` and :class:`JobResult` as well as a list of inhibitors
+that prevent the job from being runnable in a particular session.
+"""
+
+import logging
+
+from plainbox.abc import IJobResult
+from plainbox.impl.result import MemoryJobResult
+from plainbox.impl.signal import Signal
+
+logger = logging.getLogger("plainbox.session.jobs")
+
+
+class JobReadinessInhibitor:
+    """
+    Class representing the cause of a job not being ready to execute.
+
+    It is intended to be consumed by UI layers and to provide them with enough
+    information to render informative error messages or other visual feedback
+    that will aid the user in understanding why a job cannot be started.
+
+    There are four possible not ready causes:
+
+        UNDESIRED:
+            This job was not selected to run in this session
+
+        PENDING_DEP:
+           This job depends on another job which was not started yet
+
+        FAILED_DEP:
+            This job depends on another job which was started and failed
+
+        PENDING_RESOURCE:
+            This job has a resource requirement expression that uses a resource
+            produced by another job which was not started yet
+
+        FAILED_RESOURCE:
+            This job has a resource requirement that evaluated to a false value
+
+    All causes apart from UNDESIRED use the related_job property to encode a
+    job that is related to the problem. The PENDING_RESOURCE and
+    FAILED_RESOURCE causes also store related_expression that describes the
+    relevant requirement expression.
+
+    There are three attributes that can be accessed:
+
+        cause:
+            Encodes the reason why a job is not ready, see above.
+
+        related_job:
+            Provides additional context for the problem. This is not the job
+            that is affected, rather, the job that is causing the problem.
+
+        related_expression:
+            Provides additional context for the problem caused by a failing
+            resource expression.
+    """
+    # XXX: PENDING_RESOURCE is not strict, there are multiple states that are
+    # clumped here which is something I don't like. A resource may be still
+    # "pending" as in PENDING_DEP (it has not ran yet) or it could have ran but
+    # failed to produce any data, it could also be prevented from running
+    # because it has unmet dependencies. In essence it tells us nothing about
+    # if related_job.can_start() is true or not.
+    #
+    # XXX: FAILED_RESOURCE is "correct" but somehow misleading, FAILED_RESOURCE
+    # is used to represent a resource expression that evaluated to a non-True
+    # value
+
+    UNDESIRED, PENDING_DEP, FAILED_DEP, PENDING_RESOURCE, FAILED_RESOURCE \
+        = range(5)
+
+    _cause_display = {
+        UNDESIRED: "UNDESIRED",
+        PENDING_DEP: "PENDING_DEP",
+        FAILED_DEP: "FAILED_DEP",
+        PENDING_RESOURCE: "PENDING_RESOURCE",
+        FAILED_RESOURCE: "FAILED_RESOURCE"
+    }
+
+    def __init__(self, cause, related_job=None, related_expression=None):
+        """
+        Initialize a new inhibitor with the specified cause.
+
+        If cause is other than UNDESIRED a related_job is necessary. If cause
+        is either PENDING_RESOURCE or FAILED_RESOURCE related_expression is
+        necessary as well. A ValueError is raised when this is violated.
+        """
+        if cause not in self._cause_display:
+            raise ValueError("unsupported value for cause")
+        if cause != self.UNDESIRED and related_job is None:
+            raise ValueError("related_job must not be None when cause is"
+                             " {}".format(self._cause_display[cause]))
+        if cause in (self.PENDING_RESOURCE, self.FAILED_RESOURCE) \
+                and related_expression is None:
+            raise ValueError("related_expression must not be None when cause"
+                             "is {}".format(self._cause_display[cause]))
+        self.cause = cause
+        self.related_job = related_job
+        self.related_expression = related_expression
+
+    @property
+    def cause_name(self):
+        return self._cause_display[self.cause]
+
+    def __repr__(self):
+        return "<{} cause:{} related_job:{!r} related_expression:{!r}>".format(
+            self.__class__.__name__, self._cause_display[self.cause],
+            self.related_job, self.related_expression)
+
+    def __str__(self):
+        if self.cause == self.UNDESIRED:
+            return "undesired"
+        elif self.cause == self.PENDING_DEP:
+            return "required dependency {!r} did not run yet".format(
+                self.related_job.name)
+        elif self.cause == self.FAILED_DEP:
+            return "required dependency {!r} has failed".format(
+                self.related_job.name)
+        elif self.cause == self.PENDING_RESOURCE:
+            return ("resource expression {!r} could not be evaluated because"
+                    " the resource it depends on did not run yet").format(
+                        self.related_expression.text)
+        else:
+            assert self.cause == self.FAILED_RESOURCE
+            return "resource expression {!r} evaluates to false".format(
+                self.related_expression.text)
+
+
+# A global instance of :class:`JobReadinessInhibitor` with the UNDESIRED cause.
+# This is used a lot and it makes no sense to instantiate all the time.
+UndesiredJobReadinessInhibitor = JobReadinessInhibitor(
+    JobReadinessInhibitor.UNDESIRED)
+
+
+class JobState:
+    """
+    Class representing the state of a job in a session.
+
+    Contains two basic properties of each job:
+
+        * the readiness_inhibitor_list that prevent the job form starting
+        * the result (outcome) of the run (IJobResult)
+
+    For convenience (to SessionState implementation) it also has a reference to
+    the job itself.  This class is a pure state holder an will typically
+    collaborate with the SessionState class and the UI layer.
+    """
+
+    def __init__(self, job):
+        """
+        Initialize a new job state object.
+
+        The job will be inhibited by a single UNDESIRED inhibitor and will have
+        a result with OUTCOME_NONE that basically says it did not run yet.
+        """
+        self._job = job
+        self._readiness_inhibitor_list = [UndesiredJobReadinessInhibitor]
+        self._result = MemoryJobResult({
+            'outcome': IJobResult.OUTCOME_NONE
+        })
+
+    def __repr__(self):
+        return ("<{} job:{!r} readiness_inhibitor_list:{!r}"
+                " result:{!r}>").format(
+                    self.__class__.__name__, self._job,
+                    self._readiness_inhibitor_list, self._result)
+
+    @property
+    def job(self):
+        """
+        the job associated with this state
+        """
+        return self._job
+
+    @job.setter
+    def job(self, job):
+        """
+        Changes the job associated with this state
+        """
+        #FIXME: This setter should not exist. job attribute should be
+        #read-only. This is a temporary kludge to get session restoring
+        #over DBus working. Once a solution that doesn't involve setting
+        #a JobState's job attribute is implemented, please remove this
+        #awful method.
+        self._job = job
+
+    def _readiness_inhibitor_list():
+
+        doc = "the list of readiness inhibitors of the associated job"
+
+        def fget(self):
+            return self._readiness_inhibitor_list
+
+        def fset(self, value):
+            self._readiness_inhibitor_list = value
+
+        return (fget, fset, None, doc)
+
+    readiness_inhibitor_list = property(*_readiness_inhibitor_list())
+
+    def _result():
+        doc = "the result of running the associated job"
+
+        def fget(self):
+            return self._result
+
+        def fset(self, new):
+            old = self._result
+            if old != new:
+                self._result = new
+                self.on_result_changed(old, new)
+
+        return (fget, fset, None, doc)
+
+    result = property(*_result())
+
+    @Signal.define
+    def on_result_changed(self, old, new):
+        """
+        Event fired when the result associated with this job state changes
+        """
+        logger.info(
+            "Result for %s changed from %r to %r",
+            self.job.name, old, new)
+
+    def can_start(self):
+        """
+        Quickly check if the associated job can run right now.
+        """
+        return len(self._readiness_inhibitor_list) == 0
+
+    def get_readiness_description(self):
+        """
+        Get a human readable description of the current readiness state
+        """
+        if self._readiness_inhibitor_list:
+            return "job cannot be started: {}".format(
+                ", ".join((str(inhibitor)
+                           for inhibitor in self._readiness_inhibitor_list)))
+        else:
+            return "job can be started"
+
+    def _get_persistance_subset(self):
+        # Don't save resource job results, fresh data are required
+        # so we can't reuse the old ones
+        # The inhibitor list needs to be recomputed as well, don't save it.
+        state = {}
+        state['_job'] = self._job
+        if self._job.plugin == 'resource':
+            state['_result'] = MemoryJobResult({
+                'outcome': IJobResult.OUTCOME_NONE
+            })
+        else:
+            state['_result'] = self._result
+        return state
+
+    @classmethod
+    def from_json_record(cls, record):
+        """
+        Create a JobState instance from JSON record
+        """
+        obj = cls(record['_job'])
+        obj._readiness_inhibitor_list = [UndesiredJobReadinessInhibitor]
+        obj._result = record['_result']
+        return obj

=== added file 'plainbox/plainbox/impl/session/legacy.py'
--- plainbox/plainbox/impl/session/legacy.py	1970-01-01 00:00:00 +0000
+++ plainbox/plainbox/impl/session/legacy.py	2013-09-13 17:12:45 +0000
@@ -0,0 +1,271 @@
+# This file is part of Checkbox.
+#
+# Copyright 2012, 2013 Canonical Ltd.
+# Written by:
+#   Zygmunt Krynicki <zygmunt.krynicki@xxxxxxxxxxxxx>
+#
+# Checkbox is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Checkbox is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Checkbox.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+:mod:`plainbox.impl.session.legacy` -- Legacy suspend/resume API
+================================================================
+"""
+
+import abc
+import logging
+import os
+
+from plainbox.impl.session.state import SessionState
+from plainbox.impl.session.manager import SessionManager
+from plainbox.impl.session.storage import SessionStorageRepository
+
+logger = logging.getLogger("plainbox.session.legacy")
+
+
+class ISessionStateLegacyAPI(metaclass=abc.ABCMeta):
+    """
+    Interface describing legacy parts of the SessionState API.
+    """
+
+    session_data_filename = 'session.json'
+
+    @abc.abstractproperty
+    def session_dir(self):
+        """
+        pathname of a temporary directory for this session
+
+        This is not None only between calls to open() / close().
+        """
+
+    @abc.abstractproperty
+    def jobs_io_log_dir(self):
+        """
+        pathname of the jobs IO logs directory
+
+        This is not None only between calls to open() / close().
+        """
+
+    @abc.abstractproperty
+    def open(self):
+        """
+        Open session state for running jobs.
+
+        This function creates the cache directory where jobs can store their
+        data. See:
+        http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
+        """
+
+    @abc.abstractmethod
+    def clean(self):
+        """
+        Clean the session directory.
+        """
+
+    @abc.abstractmethod
+    def previous_session_file(self):
+        """
+        Check the filesystem for previous session data
+        Returns the full pathname to the session file if it exists
+        """
+
+    @abc.abstractmethod
+    def persistent_save(self):
+        """
+        Save to disk the minimum needed to resume plainbox where it stopped
+        """
+
+    @abc.abstractmethod
+    def resume(self):
+        """
+        Erase the job_state_map and desired_job_list with the saved ones
+        """
+
+    @abc.abstractmethod
+    def __enter__(self):
+        return self
+
+    @abc.abstractmethod
+    def __exit__(self, *args):
+        self.close()
+
+
+class SessionStateLegacyAPICompatImpl(SessionState, ISessionStateLegacyAPI):
+    """
+    Compatibility wrapper to use new suspend/resume implementation via the
+    original (legacy) suspend/resume API.
+
+    This subclass of SessionState implements the ISessionStateLegacyAPI
+    interface thus allowing applications to keep using suspend/resume as they
+    did before, without adjusting their code.
+
+    :ivar _manager:
+        Instance of SessionManager (this is a bit insane because
+        the manager actually knows about the session too)
+
+    :ivar _commit_hint:
+        Either None or a set of flags (strings) that determine what kind of
+        actions should take place before the next time the 'manager' property
+        gets accessed. This is used to implement lazy decision on how to
+        map the open/resume/clean methods onto the SessionManager API
+    """
+
+    def __init__(self, job_list):
+        super(SessionStateLegacyAPICompatImpl, self).__init__(job_list)
+        self._manager = None
+        self._commit_hint = None
+
+    def open(self):
+        """
+        Open session state for running jobs.
+        """
+        logger.debug("SessionState.open()")
+        self._add_hint('open')
+        return self
+
+    def resume(self):
+        """
+        Erase the job_state_map and desired_job_list with the saved ones
+        """
+        logger.debug("SessionState.resume()")
+        self._add_hint('resume')
+        self._commit_manager()
+
+    def clean(self):
+        """
+        Clean the session directory.
+        """
+        logger.debug("SessionState.clean()")
+        self._add_hint('clean')
+        self._commit_manager()
+
+    def close(self):
+        """
+        Close the session.
+
+        Legacy API, this function does absolutely nothing
+        """
+        logger.debug("SessionState.close()")
+        self._manager = None
+        self._commit_hint = None
+
+    def _add_hint(self, hint):
+        if self._commit_hint is None:
+            self._commit_hint = set()
+        self._commit_hint.add(hint)
+
+    @property
+    def manager(self):
+        logger.debug(".manager accessed")
+        if self._commit_hint is not None:
+            self._commit_manager()
+        if self._manager is None:
+            raise AttributeError("Session not ready, did you call open()?")
+        return self._manager
+
+    def _commit_manager(self):
+        """
+        Commit the new value of the '_manager' instance attribute.
+
+        This method looks at '_commit_hint' to figure out if the semantics
+        of open(), resume() or clean() should be applied on the SessionManager
+        instance that this class is tracking.
+        """
+        logger.debug("_commit_manager(), _commit_hint: %r", self._commit_hint)
+        assert isinstance(self._commit_hint, set)
+        if 'open' in self._commit_hint:
+            if 'resume' in self._commit_hint:
+                self._commit_resume()
+            elif 'clean' in self._commit_hint:
+                self._commit_clean()
+            else:
+                self._commit_open()
+        self._commit_hint = None
+
+    def _commit_open(self):
+        logger.debug("_commit_open()")
+        self._manager = SessionManager.create_session(
+            self.job_list, legacy_mode=True)
+        # Compatibility hack. Since session manager is supposed to
+        # create and manage both session state and session storage
+        # we need to inject ourselves into its internal attribute.
+        # This way it will keep operating on this instance in the
+        # essential checkpoint() method.
+        self._manager._state = self
+
+    def _commit_clean(self):
+        logger.debug("_commit_clean()")
+        if self._manager:
+            self._manager.destroy()
+            self._manager.create_session(self.job_list)
+        self._manager = SessionManager.create_session(
+            self.job_list, legacy_mode=True)
+        self._manager._state = self
+
+    def _commit_resume(self):
+        logger.debug("_commit_resume()")
+        last_storage = SessionStorageRepository().get_last_storage()
+        assert last_storage is not None, "no saved session to resume"
+        self._manager = SessionManager.load_session(
+            self.job_list, last_storage, lambda session: self)
+        logger.debug("_commit_resume() finished")
+
+    @property
+    def session_dir(self):
+        """
+        pathname of a temporary directory for this session
+
+        This is not None only between calls to open() / close().
+        """
+        if self._commit_hint is not None:
+            self._commit_manager()
+        if self._manager is None:
+            return None
+        else:
+            return self.manager.storage.location
+
+    @property
+    def jobs_io_log_dir(self):
+        """
+        pathname of the jobs IO logs directory
+
+        This is not None only between calls to open() / close().
+        """
+        # TODO: use well-known dir helper
+        return os.path.join(self.manager.storage.location, 'io-logs')
+
+    def previous_session_file(self):
+        """
+        Check the filesystem for previous session data
+        Returns the full pathname to the session file if it exists
+        """
+        last_storage = SessionStorageRepository().get_last_storage()
+        if last_storage:
+            return last_storage.location
+        else:
+            return None
+
+    def persistent_save(self):
+        """
+        Save to disk the minimum needed to resume plainbox where it stopped
+        """
+        self.manager.checkpoint()
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, *args):
+        self.close()
+
+
+SessionStateLegacyAPI = SessionStateLegacyAPICompatImpl

=== added file 'plainbox/plainbox/impl/session/manager.py'
--- plainbox/plainbox/impl/session/manager.py	1970-01-01 00:00:00 +0000
+++ plainbox/plainbox/impl/session/manager.py	2013-09-13 17:12:45 +0000
@@ -0,0 +1,241 @@
+# This file is part of Checkbox.
+#
+# Copyright 2012, 2013 Canonical Ltd.
+# Written by:
+#   Zygmunt Krynicki <zygmunt.krynicki@xxxxxxxxxxxxx>
+#
+# Checkbox is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Checkbox is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Checkbox.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+:mod:`plainbox.impl.session.manager` -- manager for sessions
+============================================================
+
+This module contains glue code that allows one to create and manage sessions
+and their filesystem presence. It allows
+:class:`~plainbox.impl.session.state.SessionState` to be de-coupled
+from :class:`~plainbox.impl.session.storage.SessionStorageRepository`,
+:class:`~plainbox.impl.session.storage.SessionStorage`,
+:class:`~plainbox.impl.session.suspend.SessionSuspendHelper`
+and :class:`~plainbox.impl.session.suspend.SessionResumeHelper`.
+"""
+
+import os
+import logging
+
+from plainbox.impl.session.resume import SessionResumeHelper
+from plainbox.impl.session.state import SessionState
+from plainbox.impl.session.storage import LockedStorageError
+from plainbox.impl.session.storage import SessionStorage
+from plainbox.impl.session.storage import SessionStorageRepository
+from plainbox.impl.session.suspend import SessionSuspendHelper
+
+logger = logging.getLogger("plainbox.session.manager")
+
+
+class WellKnownDirsHelper:
+    """
+    Helper class that knows about well known directories for SessionStorage.
+
+    This class simply gets rid of various magic directory names that we
+    associate with session storage. It also provides a convenience utility
+    method :meth:`populate()` to create all of those directories, if needed.
+    """
+
+    def __init__(self, storage):
+        assert isinstance(storage, SessionStorage)
+        self._storage = storage
+
+    @property
+    def storage(self):
+        """
+        :class:`~plainbox.impl.session.storage.SessionStorage` associated with
+        this helper
+        """
+        return self._storage
+
+    def populate(self):
+        """
+        Create all of the well known directories that are expected to exist
+        inside a freshly created session storage directory
+        """
+        for dirname in self.all_directories:
+            if not os.path.exists(dirname):
+                os.makedirs(dirname)
+
+    @property
+    def all_directories(self):
+        """
+        a list of all well-known directories
+        """
+        return [self.io_log_pathname]
+
+    @property
+    def io_log_pathname(self):
+        """
+        full path of the directory where per-job IO logs are stored
+        """
+        return os.path.join(self.storage.location, "io-logs")
+
+
+class SessionManager:
+    """
+    Manager class for coupling SessionStorage with SessionState.
+
+    This class allows application code to manage disk state of sessions. Using
+    the :meth:`checkpoint()` method applications can create persistent
+    snapshots of the :class:`~plainbox.impl.session.state.SessionState`
+    associated with each :class:`SessionManager`.
+    """
+
+    def __init__(self, state, storage):
+        """
+        Initialize a manager with a specific
+        :class:`~plainbox.impl.session.state.SessionState` and
+        :class:`~plainbox.impl.session.storage.SessionStorage`.
+        """
+        assert isinstance(state, SessionState)
+        assert isinstance(storage, SessionStorage)
+        self._state = state
+        self._storage = storage
+        logger.debug(
+            "Created SessionManager with state:%r and storage:%r",
+            state, storage)
+
+    @property
+    def state(self):
+        """
+        :class:`~plainbox.impl.session.state.SessionState` associated with
+        this manager
+        """
+        return self._state
+
+    @property
+    def storage(self):
+        """
+        :class:`~plainbox.impl.session.storage.SessionStorage` associated with
+        this manager
+        """
+        return self._storage
+
+    @classmethod
+    def create_session(cls, job_list=None, repo=None, legacy_mode=False):
+        """
+        Create a session manager with a fresh session.
+
+        This method populates the session storage with all of the well known
+        directories (using :meth:`WellKnownDirsHelper.populate()`)
+
+        :param job_list:
+            If specified then this will be the initial list of jobs known
+            by the session state object. This can be specified for convenience
+            but is really optional since the application can always add more
+            jobs to an existing session.
+        :ptype job_list:
+            list of :class:`~plainbox.abc.IJobDefinition`.
+        :param repo:
+            If specified then this particular repository will be used to create
+            the storage for this session. If left out, a new repository is
+            constructed with the default location.
+        :ptype repo:
+            :class:`~plainbox.impl.session.storage.SessionStorageRepository`.
+        :param legacy_mode:
+            Propagated to
+            :meth:`~plainbox.impl.session.storage.SessionStorage.create()`
+            to ensure that legacy (single session) mode is used.
+        :ptype legacy_mode:
+            bool
+        :return:
+            fresh :class:`SessionManager` instance
+        """
+        logger.debug("SessionManager.create_session()")
+        if job_list is None:
+            job_list = []
+        state = SessionState(job_list)
+        if repo is None:
+            repo = SessionStorageRepository()
+        storage = SessionStorage.create(repo.location, legacy_mode)
+        WellKnownDirsHelper(storage).populate()
+        return cls(state, storage)
+
+    @classmethod
+    def load_session(cls, job_list, storage, early_cb=None):
+        """
+        Open a previously checkpointed session.
+
+        This method allows one to re-open a session that was previously
+        created by :meth:`SessionManager.checkpoint()`
+
+        :param job_list:
+            List of all known jobs. This argument is used to reconstruct the
+            session from a dormant state. Since the suspended data cannot
+            capture implementation details of each job reliably actual jobs
+            need to be provided externally. Unlike in :meth:`create_session()`
+            this list really needs to be complete, it must also include
+            any generated jobs.
+        :param storage:
+            The storage that should be used for this particular session.
+            The storage object holds references to existing directories
+            in the file system. When restoring an existing dormant session
+            it is important to use the correct storage object, the one that
+            corresponds to the file system location used be the session
+            before it was saved.
+        :ptype storage:
+            :class:`~plainbox.impl.session.storage.SessionStorage`
+        :param early_cb:
+            A callback that allows the caller to "see" the session object
+            early, before the bulk of resume operation happens. This method can
+            be used to register callbacks on the new session before this method
+            call returns. The callback accepts one argument, session, which is
+            being resumed. This is being passed directly to
+            :meth:`plainbox.impl.session.resume.SessionResumeHelper.resume()`
+        :raises:
+            Anything that can be raised by
+            :meth:`~plainbox.impl.session.storage.SessionStorage.
+            load_checkpoint()` and :meth:`~plainbox.impl.session.suspend.
+            SessionResumeHelper.resume()`
+        :returns:
+            Fresh instance of :class:`SessionManager`
+        """
+        logger.debug("SessionManager.open_session()")
+        data = storage.load_checkpoint()
+        state = SessionResumeHelper(job_list).resume(data, early_cb)
+        return cls(state, storage)
+
+    def checkpoint(self):
+        """
+        Create a checkpoint of the session.
+
+        After calling this method you can later reopen the same session with
+        :meth:`SessionManager.open_session()`.
+        """
+        logger.debug("SessionManager.checkpoint()")
+        data = SessionSuspendHelper().suspend(self.state)
+        logger.debug(
+            "Saving %d bytes of checkpoint data to %r",
+            len(data), self.storage.location)
+        try:
+            self.storage.save_checkpoint(data)
+        except LockedStorageError:
+            self.storage.break_lock()
+            self.storage.save_checkpoint(data)
+
+    def destroy(self):
+        """
+        Destroy all of the filesystem artifacts of the session.
+
+        This basically calls
+        :meth:`~plainbox.impl.session.storage.SessionStorage.remove()`
+        """
+        logger.debug("SessionManager.destroy()")
+        self.storage.remove()

=== added file 'plainbox/plainbox/impl/session/resume.py'
--- plainbox/plainbox/impl/session/resume.py	1970-01-01 00:00:00 +0000
+++ plainbox/plainbox/impl/session/resume.py	2013-09-13 17:12:45 +0000
@@ -0,0 +1,477 @@
+# This file is part of Checkbox.
+#
+# Copyright 2012, 2013 Canonical Ltd.
+# Written by:
+#   Zygmunt Krynicki <zygmunt.krynicki@xxxxxxxxxxxxx>
+#
+# Checkbox is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Checkbox is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Checkbox.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+:mod:`plainbox.impl.session.resume` -- session resume handling
+==============================================================
+
+This module contains classes that can resume a dormant session from
+a binary representation. See docs for the suspend module for details.
+
+The resume logic provides a compromise between usefulness and correctness
+so two assumptions are made:
+
+* We assume that a checksum of a job changes when their behavior changes.
+  This way we can detect when job definitions were updated after
+  suspending but before resuming.
+
+* We assume that software and hardware *may* change while the session is
+  suspended but this is not something that framework (PlainBox) is
+  concerned with. Applications should provide job definitions that
+  are capable of detecting this and acting appropriately.
+
+  This is true since the user may install additional packages
+  or upgrade existing packages. The user can also add or remove pluggable
+  hardware. Lastly actual machine suspend (or hibernate) and resume *may*
+  cause alterations to the hardware as it is visible from within
+  the system. In any case the framework does not care about this.
+"""
+
+from collections import deque
+import base64
+import binascii
+import gzip
+import json
+import logging
+
+from plainbox.abc import IJobResult
+from plainbox.impl.result import DiskJobResult
+from plainbox.impl.result import MemoryJobResult
+from plainbox.impl.result import IOLogRecord
+from plainbox.impl.session.state import SessionState
+
+logger = logging.getLogger("plainbox.session.resume")
+
+
+class SessionResumeError(Exception):
+    """
+    Base class for exceptions that can be raised when attempting to
+    resume a dormant session.
+    """
+
+
+class CorruptedSessionError(SessionResumeError):
+    """
+    Exception raised when :class:`SessionResumeHelper` cannot decode
+    the session byte stream. This exception will be raised with additional
+    context that captures the actual underlying cause. Having this exception
+    class makes it easier to handle resume errors.
+    """
+
+
+class IncompatibleSessionError(SessionResumeError):
+    """
+    Exception raised when :class:`SessionResumeHelper` comes across malformed
+    or unsupported data that was (presumably) produced by
+    :class:`SessionSuspendHelper`
+    """
+
+
+class IncompatibleJobError(SessionResumeError):
+    """
+    Exception raised when :class:`SessionResumeHelper` detects that the set of
+    jobs it knows about is incompatible with what was saved before.
+    """
+
+
+class SessionResumeHelper:
+    """
+    Helper class for implementing session resume feature
+
+    This class works with data constructed by
+    :class:`~plainbox.impl.session.suspend.SessionSuspendHelper`.
+
+    Due to the constraints of what can be represented in a suspended session,
+    this class cannot work in isolation. It must operate with a list of
+    know jobs.
+
+    Since (most of the) jobs are being provided externally (as they represent
+    the non-serialized parts of checkbox or other job providers) several
+    failure modes are possible. Those are documented in :meth:`resume()`
+    """
+
+    def __init__(self, job_list):
+        """
+        Initialize the helper with a list of known jobs.
+        """
+        self.job_list = job_list
+
+    def resume(self, data, early_cb=None):
+        """
+        Resume a dormant session.
+
+        :param data:
+            Bytes representing the dormant session
+        :param early_cb:
+            A callback that allows the caller to "see" the session object
+            early, before the bulk of resume operation happens. This method can
+            be used to register signal listeners on the new session before this
+            method call returns. The callback accepts one argument, session,
+            which is being resumed.
+        :returns:
+            resumed session instance
+        :rtype:
+            :class:`~plainbox.impl.session.state.SessionState`
+
+        This method validates the representation of a dormant session and
+        re-creates a similar-but-not-identical SessionState instance. It can
+        fail in multiple ways, some of which are a part of normal operation and
+        should always be handled (:class:`IncompatibleJobError` and
+        :class:`IncompatibleJobError`). Applications may wish to capture
+        :class:`SessionResumeError` as a generic base exception for all the
+        possible problems.
+
+        :raises CorruptedSessionError:
+            if the representation of the session is corrupted in any way
+        :raises IncompatibleSessionError:
+            if session serialization format is not supported
+        :raises IncompatibleJobError:
+            if serialized jobs are not the same as current jobs
+        """
+        try:
+            data = gzip.decompress(data)
+        except IOError:
+            raise CorruptedSessionError("Cannot decompress session data")
+        try:
+            text = data.decode("UTF-8")
+        except UnicodeDecodeError:
+            raise CorruptedSessionError("Cannot decode session text")
+        try:
+            json_repr = json.loads(text)
+        except ValueError:
+            raise CorruptedSessionError("Cannot interpret session JSON")
+        return self._resume_json(json_repr, early_cb)
+
+    def _resume_json(self, json_repr, early_cb=None):
+        """
+        Resume a SessionState object from the JSON representation.
+
+        This method is called by :meth:`resume()` after the initial envelope
+        and parsing is done. The only error conditions that can happen
+        are related to semantic incompatibilities or corrupted internal state.
+        """
+        logger.debug("Resuming from json... (see below)")
+        logger.debug(json.dumps(json_repr, indent=4))
+        _validate(json_repr, value_type=dict)
+        _validate(json_repr, key="version", choice=[1])
+        session_repr = _validate(json_repr, key='session', value_type=dict)
+        return self._build_SessionState(session_repr, early_cb)
+
+    def _build_SessionState(self, session_repr, early_cb=None):
+        """
+        Reconstruct the session state object.
+
+        This method creates a fresh SessionState instance and restores
+        jobs, results, meta-data and desired job list using helper methods.
+        """
+        # Construct a fresh session object.
+        session = SessionState(self.job_list)
+        logger.debug("Constructed new session for resume %r", session)
+        # Give early_cb a chance to see the session before we start resuming.
+        # This way applications can see, among other things, generated jobs
+        # as they are added to the session, by registering appropriate signal
+        # handlers on the freshly-constructed session instance.
+        if early_cb is not None:
+            logger.debug("Invoking early callback %r", early_cb)
+            new_session = early_cb(session)
+            if new_session is not None:
+                logger.debug(
+                    "Using different session for resume: %r", new_session)
+                session = new_session
+        # Restore bits and pieces of state
+        logger.debug("Starting to restore jobs and results to %r...", session)
+        self._restore_SessionState_jobs_and_results(session, session_repr)
+        logger.debug("Starting to restore metadata...")
+        self._restore_SessionState_metadata(session, session_repr)
+        logger.debug("Starting to restore desired job list...")
+        self._restore_SessionState_desired_job_list(session, session_repr)
+        # Return whatever we've got
+        logger.debug("Resume complete!")
+        return session
+
+    def _restore_SessionState_jobs_and_results(self, session, session_repr):
+        """
+        Process representation of a session and restore jobs and results.
+
+        This method reconstructs all jobs and results in several stages.
+        The first pass just goes over all the jobs and results and restores
+        all of the non-generated jobs using :meth:`_process_job()` method.
+        Any jobs that cannot be processed (generated job) is saved for further
+        processing.
+        """
+        # Representation of all of the job definitions
+        jobs_repr = _validate(session_repr, key='jobs', value_type=dict)
+        # Representation of all of the job results
+        results_repr = _validate(session_repr, key='results', value_type=dict)
+        # List of jobs (names) that could not be processed on the first pass
+        leftover_jobs = deque()
+        # Run a first pass through jobs and results. Anything that didn't
+        # work (generated jobs) gets added to leftover_jobs list.
+        # To make this bit deterministic (we like determinism) we're always
+        # going to process job results in alphabetic orderer.
+        first_pass_list = sorted(
+            set(jobs_repr.keys()) | set(results_repr.keys()))
+        for job_name in first_pass_list:
+            try:
+                self._process_job(session, jobs_repr, results_repr, job_name)
+            except KeyError:
+                leftover_jobs.append(job_name)
+        # Process leftovers. For each iteration the leftover_jobs list should
+        # shrink or we're not making any progress. If that happens we've got
+        # undefined jobs (in general the session is corrupted)
+        while leftover_jobs:
+            # Append a sentinel object so that we can know when we're
+            # done "iterating" over the collection once.
+            # Also: https://twitter.com/zygoon/status/370213046678872065
+            leftover_jobs.append(None)
+            leftover_shrunk = False
+            while leftover_jobs:  # pragma: no branch
+                job_name = leftover_jobs.popleft()
+                # Treat the sentinel None object as the end of the iteration
+                if job_name is None:
+                    break
+                try:
+                    self._process_job(
+                        session, jobs_repr, results_repr, job_name)
+                except KeyError:
+                    leftover_jobs.append(job_name)
+                else:
+                    leftover_shrunk = True
+            # Check if we're making any progress.
+            # We don't want to keep spinning on a list of some bogus jobs
+            # that nothing generated so we need an end condition for that case
+            if not leftover_shrunk:
+                raise CorruptedSessionError(
+                    "Unknown jobs remaining: {}".format(
+                        ", ".join(leftover_jobs)))
+
+    def _process_job(self, session, jobs_repr, results_repr, job_name):
+        """
+        Process all representation details associated with a particular job
+
+        This method takes a session object, representation of all the jobs
+        and all the results (and a job name) and tries to reconstruct the
+        state associated with that job in the session object.
+
+        Jobs are verified to match existing (known) jobs. Results are
+        rebuilt from their representation and presented back to the session
+        for processing (this restores resources and generated jobs).
+
+        This method can fail in normal operation, when the job that was
+        being processed is a generated job and has not been reintroduced into
+        the session. When that happens a KeyError is raised.
+
+        .. note::
+            Since the representation format for results can support storing
+            and restoring a list of results (per job) but the SessionState
+            cannot yet do that the implementation of this method restores
+            the state of the _last_ result object only.
+        """
+        _validate(job_name, value_type=str)
+        # Get the checksum from the representation
+        checksum = _validate(
+            jobs_repr, key=job_name, value_type=str)
+        # Look up the actual job definition in the session.
+        # This can raise KeyError but it is okay, callers expect that
+        job = session.job_state_map[job_name].job
+        # Check if job definition has not changed
+        if job.get_checksum() != checksum:
+            raise IncompatibleJobError(
+                "Definition of job {!r} has changed".format(job_name))
+        # Collect all of the result objects into result_list
+        result_list = []
+        result_list_repr = _validate(
+            results_repr, key=job_name, value_type=list, value_none=True)
+        for result_repr in result_list_repr:
+            _validate(result_repr, value_type=dict)
+            result = self._build_JobResult(result_repr)
+            result_list.append(result)
+        # Show the _LAST_ result to the session. Currently we only store one
+        # result but showing the most recent (last) result should be good
+        # in general.
+        if len(result_list) > 0:
+            logger.debug(
+                "calling update_job_result(%r, %r)", job, result_list[-1])
+            session.update_job_result(job, result_list[-1])
+
+    @classmethod
+    def _restore_SessionState_metadata(cls, session, session_repr):
+        """
+        Extract meta-data information from the representation of the session
+        and set it in the given session object
+        """
+        # Get the representation of the meta-data
+        metadata_repr = _validate(
+            session_repr, key='metadata', value_type=dict)
+        # Set each bit back to the session
+        session.metadata.title = _validate(
+            metadata_repr, key='title', value_type=str, value_none=True)
+        session.metadata.flags = set([
+            _validate(
+                flag, value_type=str,
+                value_type_msg="Each flag must be a string")
+            for flag in _validate(
+                metadata_repr, key='flags', value_type=list)])
+        session.metadata.running_job_name = _validate(
+            metadata_repr, key='running_job_name', value_type=str,
+            value_none=True)
+        logger.debug("restored metadata %r", session.metadata)
+
+    @classmethod
+    def _restore_SessionState_desired_job_list(cls, session, session_repr):
+        """
+        Extract the representation of desired_job_list from the session and
+        set it back to the session object. This method should be called after
+        all the jobs are discovered.
+
+        :raises CorruptedSessionError:
+            if desired_job_list refers to unknown job
+        """
+        # List of all the _names_ of the jobs that were selected
+        desired_job_list = [
+            _validate(
+                job_name, value_type=str,
+                value_type_msg="Each job name must be a string")
+            for job_name in _validate(
+                session_repr, key='desired_job_list', value_type=list)]
+        # Restore job selection
+        logger.debug("calling update_desired_job_list(%r)", desired_job_list)
+        try:
+            session.update_desired_job_list([
+                session.job_state_map[job_name].job
+                for job_name in desired_job_list])
+        except KeyError as exc:
+            raise CorruptedSessionError(
+                "'desired_job_list' refers to unknown job {!r}".format(
+                    exc.args[0]))
+
+    @classmethod
+    def _build_JobResult(cls, result_repr):
+        """
+        Convert the representation of MemoryJobResult or DiskJobResult
+        back into an actual instance.
+        """
+        # Load all common attributes...
+        outcome = _validate(
+            result_repr, key='outcome', value_type=str,
+            value_choice=IJobResult.ALL_OUTCOME_LIST, value_none=True)
+        comments = _validate(
+            result_repr, key='comments', value_type=str, value_none=True)
+        return_code = _validate(
+            result_repr, key='return_code', value_type=int, value_none=True)
+        execution_duration = _validate(
+            result_repr, key='execution_duration', value_type=float,
+            value_none=True)
+        # Construct either DiskJobResult or MemoryJobResult
+        if 'io_log_filename' in result_repr:
+            io_log_filename = _validate(
+                result_repr, key='io_log_filename', value_type=str)
+            return DiskJobResult({
+                'outcome': outcome,
+                'comments': comments,
+                'execution_duration': execution_duration,
+                'io_log_filename': io_log_filename,
+                'return_code': return_code
+            })
+        else:
+            io_log = [
+                cls._build_IOLogRecord(record_repr)
+                for record_repr in _validate(
+                    result_repr, key='io_log', value_type=list)]
+            return MemoryJobResult({
+                'outcome': outcome,
+                'comments': comments,
+                'execution_duration': execution_duration,
+                'io_log': io_log,
+                'return_code': return_code
+            })
+
+    @classmethod
+    def _build_IOLogRecord(cls, record_repr):
+        """
+        Convert the representation of IOLogRecord back the the object
+        """
+        _validate(record_repr, value_type=list)
+        delay = _validate(record_repr, key=0, value_type=float)
+        if delay < 0:
+            raise CorruptedSessionError("delay cannot be negative")
+        stream_name = _validate(
+            record_repr, key=1, value_type=str,
+            value_choice=['stdout', 'stderr'])
+        data = _validate(record_repr, key=2, value_type=str)
+        # Each data item is a base64 string created by encoding the bytes and
+        # converting them to ASCII. To get the original we need to undo that
+        # operation.
+        try:
+            data = data.encode("ASCII")
+        except UnicodeEncodeError:
+            raise CorruptedSessionError(
+                "record data {!r} is not ASCII", data)
+        try:
+            data = base64.standard_b64decode(data)
+        except binascii.Error:
+            raise CorruptedSessionError(
+                "record data {!r} is not correct base64")
+        return IOLogRecord(delay, stream_name, data)
+
+
+def _validate(obj, **flags):
+    """
+    Multi-purpose extraction and validation function.
+    """
+    # Fetch data from the container OR use json_repr directly
+    if 'key' in flags:
+        key = flags['key']
+        obj_name = "key {!r}".format(key)
+        try:
+            value = obj[key]
+        except (TypeError, IndexError, KeyError):
+            error_msg = flags.get(
+                "missing_key_msg",
+                "Missing value for key {!r}".format(key))
+            raise CorruptedSessionError(error_msg)
+    else:
+        value = obj
+        obj_name = "object"
+    # Check if value can be None (defaulting to "no")
+    value_none = flags.get('value_none', False)
+    if value is None and value_none is False:
+        error_msg = flags.get(
+            "value_none_msg",
+            "Value of {} cannot be None".format(obj_name))
+        raise CorruptedSessionError(error_msg)
+    # Check if value is of correct type
+    if value is not None and "value_type" in flags:
+        value_type = flags['value_type']
+        if not isinstance(value, value_type):
+            error_msg = flags.get(
+                "value_type_msg",
+                "Value of {} is of incorrect type {}".format(
+                    obj_name, type(value).__name__))
+            raise CorruptedSessionError(error_msg)
+    # Check if value is in the set of correct values
+    if "value_choice" in flags:
+        value_choice = flags['value_choice']
+        if value not in value_choice:
+            error_msg = flags.get(
+                "value_choice_msg",
+                "Value for {} not in allowed set {!r}".format(
+                    obj_name, value_choice))
+            raise CorruptedSessionError(error_msg)
+    return value

=== added file 'plainbox/plainbox/impl/session/state.py'
--- plainbox/plainbox/impl/session/state.py	1970-01-01 00:00:00 +0000
+++ plainbox/plainbox/impl/session/state.py	2013-09-13 17:12:45 +0000
@@ -0,0 +1,651 @@
+# This file is part of Checkbox.
+#
+# Copyright 2012, 2013 Canonical Ltd.
+# Written by:
+#   Zygmunt Krynicki <zygmunt.krynicki@xxxxxxxxxxxxx>
+#
+# Checkbox is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Checkbox is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Checkbox.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+:mod:`plainbox.impl.session.state` -- session state handling
+============================================================
+"""
+import logging
+
+from plainbox.abc import IJobResult
+from plainbox.impl.depmgr import DependencyDuplicateError
+from plainbox.impl.depmgr import DependencyError
+from plainbox.impl.depmgr import DependencySolver
+from plainbox.impl.resource import ExpressionCannotEvaluateError
+from plainbox.impl.resource import ExpressionFailedError
+from plainbox.impl.resource import Resource
+from plainbox.impl.rfc822 import gen_rfc822_records
+from plainbox.impl.rfc822 import RFC822SyntaxError
+from plainbox.impl.session.jobs import JobReadinessInhibitor
+from plainbox.impl.session.jobs import JobState
+from plainbox.impl.session.jobs import UndesiredJobReadinessInhibitor
+from plainbox.impl.signal import Signal
+
+
+logger = logging.getLogger("plainbox.session.state")
+
+
+class SessionMetaData:
+    """
+    Class representing non-critical state of the session.
+
+    The data held here allows applications to reason about sessions in general
+    but is not relevant to the runner or the core in general
+    """
+
+    # Flag indicating that the testing session is not complete and additional
+    # testing is expected. Applications are encouraged to add this flag
+    # immediately after creating a new session. Applications are also
+    # encouraged to remove this flag after the expected test plan is complete
+    FLAG_INCOMPLETE = "incomplete"
+
+    # Flag indicating that results of this testing session have been submitted
+    # to some central results repository. Applications are encouraged to
+    # set this flag after successfully sending the result somewhere.
+    FLAG_SUBMITTED = "submitted"
+
+    def __init__(self, title=None, flags=None, running_job_name=None):
+        if flags is None:
+            flags = []
+        self._title = title
+        self._flags = set(flags)
+        self._running_job_name = running_job_name
+
+    def __repr__(self):
+        return "<{} title:{!r} flags:{!r} running_job_name:{!r}>".format(
+            self.__class__.__name__, self.title, self.flags,
+            self.running_job_name)
+
+    @property
+    def title(self):
+        """
+        the session title.
+
+        Title is just an arbitrary string that can be used to distinguish
+        between multiple sessions.
+
+        The value can be changed at any time.
+        """
+        return self._title
+
+    @title.setter
+    def title(self, title):
+        self._title = title
+
+    @property
+    def flags(self):
+        """
+        a set of flags that are associated with this session.
+
+        This set is persisted by persistent_save() and can be used to keep
+        track of how the application wants to interpret this session state.
+
+        Intended usage is to keep track of "testing finished" and
+        "results submitted" flags. Some flags are added as constants to this
+        class.
+        """
+        return self._flags
+
+    @flags.setter
+    def flags(self, flags):
+        self._flags = flags
+
+    @property
+    def running_job_name(self):
+        """
+        name of the running job
+
+        This property should be updated to keep track of the name of the
+        job that is being executed. When either plainbox or the machine it
+        was running on crashes during the execution of a job this value
+        should be preserved and can help the GUI to resume and provide an
+        error message.
+
+        The property MUST be set before starting the job itself.
+        """
+        return self._running_job_name
+
+    @running_job_name.setter
+    def running_job_name(self, running_job_name):
+        self._running_job_name = running_job_name
+
+
+class SessionState:
+    """
+    Class representing all state needed during a single program session.
+
+    This is the central glue/entry-point for applications. It connects user
+    intents to the rest of the system / plumbing and keeps all of the state in
+    one place.
+
+    The set of utility methods and properties allow applications to easily
+    handle the lower levels of dependencies, resources and ready states.
+
+    :class:`SessionState` has the following instance variables, all of which
+    are currently exposed as properties.
+
+    :ivar list job_list: A list of all known jobs
+
+        Not all the jobs from this list are going to be executed (or selected
+        for execution) by the user.
+
+        It may change at runtime because of local jobs. Note that in upcoming
+        changes this will start out empty and will be changeable dynamically.
+        It can still change due to local jobs but there is no API yes.
+
+        This list cannot have any duplicates, if that is the case a
+        :class:`DependencyDuplicateError` is raised. This has to be handled
+        externally and is a sign that the job database is corrupted or has
+        wrong data. As an exception if duplicates are perfectly identical this
+        error is silently corrected.
+
+    :ivar dict job_state_map: mapping that tracks the state of each job
+
+        Mapping from job name to :class:`JobState`. This basically has the test
+        result and the inhibitor of each job. It also serves as a
+        :attr:`plainbox.impl.job.JobDefinition.name`-> job lookup helper.
+
+        Directly exposed with the intent to fuel part of the UI. This is a way
+        to get at the readiness state, result and readiness inhibitors, if any.
+
+        XXX: this can loose data job_list has jobs with the same name. It would
+        be better to use job id as the keys here. A separate map could be used
+        for the name->job lookup. This will be fixed when session controller
+        branch lands in trunk as then jobs are dynamically added to the system
+        one at a time and proper error conditions can be detected and reported.
+
+    :ivar list desired_job_list: subset of jobs selected for execution
+
+        This is used to compute :attr:`run_list`. It can only be changed by
+        calling :meth:`update_desired_job_list()` which returns meaningful
+        values so this is not a settable property.
+
+    :ivar list run_list: sorted list of jobs to execute
+
+        This is basically a superset of desired_job_list and a subset of
+        job_list that is topologically sorted to allowing all desired jobs to
+        run. This property is updated whenever desired_job_list is changed.
+
+    :ivar dict resource_map: all known resources
+
+        A mapping from resource name to a list of
+        :class:`plainbox.impl.resource.Resource` objects. This encapsulates all
+        "knowledge" about the system plainbox is running on.
+
+
+        It is needed to compute job readiness (as it stores resource data
+        needed by resource programs). It is also available to exporters.
+
+        This is computed internally from the output of checkbox resource jobs,
+        it can only be changed by calling :meth:`update_job_result()`
+
+    :ivar dict metadata: instance of :class:`SessionMetaData`
+    """
+
+    @Signal.define
+    def on_job_state_map_changed(self):
+        """
+        Signal fired after job_state_map is changed in any way.
+
+        This signal is always fired before any more specialized signals
+        such as :meth:`on_job_result_changed()` and :meth:`on_job_added()`.
+
+        This signal is fired pretty often, each time a job result is
+        presented to the session and each time a job is added. When
+        both of those events happen at the same time only one notification
+        is sent. The actual state is not sent as it is quite extensive
+        and can be easily looked at by the application.
+        """
+
+    @Signal.define
+    def on_job_result_changed(self, job, result):
+        """
+        Signal fired after a job get changed (set)
+
+        This signal is fired each time a result is presented to the session.
+
+        This signal is fired **after** :meth:`on_job_state_map_changed()`
+        """
+        logger.info("Job %s result changed to %r", job, result)
+
+    @Signal.define
+    def on_job_added(self, job):
+        """
+        Signal sent whenever a job is added to the session.
+
+        This signal is fired **after** :meth:`on_job_state_map_changed()`
+        """
+        logger.info("New job defined: %r", job)
+
+    def __init__(self, job_list):
+        """
+        Initialize a new SessionState with a given list of jobs.
+
+        The jobs are all of the jobs that the session knows about.
+        """
+        # Start by making a copy of job_list as we may modify it below
+        job_list = job_list[:]
+        while True:
+            try:
+                # Construct a solver with the job list as passed by the caller.
+                # This will do a little bit of validation and might raise
+                # DepdendencyDuplicateError if there are any duplicates at this
+                # stage.
+                #
+                # There's a single case that is handled here though, if both
+                # jobs are identical this problem is silently fixed. This
+                # should not happen in normal circumstances but is non the less
+                # harmless (as long as both jobs are perfectly identical)
+                #
+                # Since this problem can happen any number of times (many
+                # duplicates) this is performed in a loop. The loop breaks when
+                # we cannot solve the problem _OR_ when no error occurs.
+                DependencySolver(job_list)
+            except DependencyDuplicateError as exc:
+                # If both jobs are identical then silently fix the problem by
+                # removing one of the jobs (here the second one we've seen but
+                # it's not relevant as they are possibly identical) and try
+                # again
+                if exc.job == exc.duplicate_job:
+                    job_list.remove(exc.duplicate_job)
+                    continue
+                else:
+                    # If the jobs differ report this back to the caller
+                    raise
+            else:
+                # If there are no problems then break the loop
+                break
+        self._job_list = job_list
+        self._job_state_map = {job.name: JobState(job)
+                               for job in self._job_list}
+        self._desired_job_list = []
+        self._run_list = []
+        self._resource_map = {}
+        self._metadata = SessionMetaData()
+        super(SessionState, self).__init__()
+
+    def update_desired_job_list(self, desired_job_list):
+        """
+        Update the set of desired jobs (that ought to run)
+
+        This method can be used by the UI to recompute the dependency graph.
+        The argument 'desired_job_list' is a list of jobs that should run.
+        Those jobs must be a sub-collection of the job_list argument that was
+        passed to the constructor.
+
+        It never fails although it may reduce the actual permitted
+        desired_job_list to an empty list. It returns a list of problems (all
+        instances of DependencyError class), one for each job that had to be
+        removed.
+        """
+        # Remember a copy of original desired job list. We may modify this list
+        # so let's not mess up data passed by the caller.
+        self._desired_job_list = list(desired_job_list)
+        # Reset run list just in case desired_job_list is empty
+        self._run_list = []
+        # Try to solve the dependency graph. This is done in a loop as may need
+        # to remove a problematic job and re-try. The loop provides a stop
+        # condition as we will eventually run out of jobs.
+        problems = []
+        while self._desired_job_list:
+            # XXX: it might be more efficient to incorporate this 'recovery
+            # mode' right into the solver, this way we'd probably save some
+            # resources or runtime complexity.
+            try:
+                self._run_list = DependencySolver.resolve_dependencies(
+                    self._job_list, self._desired_job_list)
+            except DependencyError as exc:
+                # When a dependency error is detected remove the affected job
+                # form _desired_job_list and try again.
+                self._desired_job_list.remove(exc.affected_job)
+                # Remember each problem, this can be presented by the UI
+                problems.append(exc)
+                continue
+            else:
+                # Don't iterate the loop if there was no exception
+                break
+        # Update all job readiness state
+        self._recompute_job_readiness()
+        # Return all dependency problems to the caller
+        return problems
+
+    def get_estimated_duration(self, manual_overhead=30.0):
+        """
+        Provide the estimated duration of the jobs that have been selected
+        to run in this session (maintained by calling update_desired_job_list).
+
+        Manual jobs have an arbitrary figure added to their runtime to allow
+        for execution of the test steps and verification of the result.
+
+        :returns: (estimate_automated, estimate_manual)
+
+        where estimate_automated is the value for automated jobs only and
+        estimate_manual is the value for manual jobs only. These can be
+        easily combined. Either value can be None if the  value could not be
+        calculated due to any job lacking the required estimated_duration
+        field.
+        """
+        estimate_automated = 0.0
+        estimate_manual = 0.0
+        for job in self._run_list:
+            if job.automated and estimate_automated is not None:
+                if job.estimated_duration is not None:
+                    estimate_automated += job.estimated_duration
+                elif job.plugin != 'local':
+                    estimate_automated = None
+            elif not job.automated and estimate_manual is not None:
+                # We add a fixed extra amount of seconds to the run time
+                # for manual jobs to account for the time taken in reading
+                # the description and performing any necessary steps
+                estimate_manual += manual_overhead
+                if job.estimated_duration is not None:
+                    estimate_manual += job.estimated_duration
+                elif job.command:
+                    estimate_manual = None
+        return (estimate_automated, estimate_manual)
+
+    def update_job_result(self, job, result):
+        """
+        Notice the specified test result and update readiness state.
+
+        This function updates the internal result collection with the data from
+        the specified test result. Results can safely override older results.
+        Results also change the ready map (jobs that can run) because of
+        dependency relations.
+
+        Some results have deeper meaning, those are results for local and
+        resource jobs. They are discussed in detail below:
+
+        Resource jobs produce resource records which are used as data to run
+        requirement expressions against. Each time a result for a resource job
+        is presented to the session it will be parsed as a collection of RFC822
+        records. A new entry is created in the resource map (entirely replacing
+        any old entries), with a list of the resources that were parsed from
+        the IO log.
+
+        Local jobs produce more jobs. Like with resource jobs, their IO log is
+        parsed and interpreted as additional jobs. Unlike in resource jobs
+        local jobs don't replace anything. They cannot replace an existing job
+        with the same name.
+        """
+        assert job in self._job_list
+        # Store the result in job_state_map
+        self._job_state_map[job.name].result = result
+        self.on_job_state_map_changed()
+        self.on_job_result_changed(job, result)
+        # Treat some jobs specially and interpret their output
+        if job.plugin == "resource":
+            self._process_resource_result(job, result)
+        elif job.plugin == "local":
+            self._process_local_result(job, result)
+        # Update all job readiness state
+        self._recompute_job_readiness()
+
+    def add_job(self, new_job):
+        """
+        Add a new job to the session
+
+        :param new_job: the job being added
+
+        :raises DependencyDuplicateError:
+            if a duplicate, clashing job definition is detected
+
+        The new_job gets added to all the state tracking objects of the
+        session.  The job is initially not selected to run (it is not in the
+        desired_job_list and has the undesired inhibitor).
+
+        The new_job may clash with an existing job with the same name. Unless
+        both jobs are identical this will cause DependencyDuplicateError to be
+        raised. Identical jobs are silently discarded.
+
+        .. note::
+
+            This method recomputes job readiness for all jobs
+        """
+        # See if we have a job with the same name already
+        try:
+            existing_job = self._job_state_map[new_job.name].job
+        except KeyError:
+            # Register the new job in our state
+            self._job_state_map[new_job.name] = JobState(new_job)
+            self._job_list.append(new_job)
+            self.on_job_state_map_changed()
+            self.on_job_added(new_job)
+        else:
+            # If there is a clash report DependencyDuplicateError only when the
+            # hashes are different. This prevents a common "problem" where
+            # "__foo__" local jobs just load all jobs from the "foo" category.
+            if new_job != existing_job:
+                raise DependencyDuplicateError(existing_job, new_job)
+        # Update all job readiness state
+        self._recompute_job_readiness()
+
+    def set_resource_list(self, resource_name, resource_list):
+        """
+        Add or change a resource with the given name.
+
+        Resources silently overwrite any old resources with the same name.
+        """
+        self._resource_map[resource_name] = resource_list
+
+    def _process_resource_result(self, job, result):
+        """
+        Analyze a result of a CheckBox "resource" job and generate
+        or replace resource records.
+        """
+        new_resource_list = []
+        for record in self._gen_rfc822_records_from_io_log(job, result):
+            # XXX: Consider forwarding the origin object here.  I guess we
+            # should have from_frc822_record as with JobDefinition
+            resource = Resource(record.data)
+            logger.info("Storing resource record %r: %s", job.name, resource)
+            new_resource_list.append(resource)
+        # Replace any old resources with the new resource list
+        self._resource_map[job.name] = new_resource_list
+
+    def _process_local_result(self, job, result):
+        """
+        Analyze a result of a CheckBox "local" job and generate
+        additional job definitions
+        """
+        # TODO: refactor using add_job() but make sure we compute
+        # job state map at most once
+
+        # First parse all records and create a list of new jobs (confusing
+        # name, not a new list of jobs)
+        new_job_list = []
+        for record in self._gen_rfc822_records_from_io_log(job, result):
+            new_job = job.create_child_job_from_record(record)
+            new_job_list.append(new_job)
+        # Then for each new job, add it to the job_list, unless it collides
+        # with another job with the same name.
+        for new_job in new_job_list:
+            try:
+                existing_job = self._job_state_map[new_job.name].job
+            except KeyError:
+                self._job_state_map[new_job.name] = JobState(new_job)
+                self._job_list.append(new_job)
+                self.on_job_state_map_changed()
+                self.on_job_added(new_job)
+            else:
+                # XXX: there should be a channel where such errors could be
+                # reported back to the UI layer. Perhaps update_job_result()
+                # could simply return a list of problems in a similar manner
+                # how update_desired_job_list() does.
+                if new_job != existing_job:
+                    logging.warning(
+                        ("Local job %s produced job %r that collides with"
+                         " an existing job %r, the new job was discarded"),
+                        job, new_job, existing_job)
+                else:
+                    if not existing_job.via:
+                        existing_job._via = new_job.via
+
+    def _gen_rfc822_records_from_io_log(self, job, result):
+        """
+        Convert io_log from a job result to a sequence of rfc822 records
+        """
+        logger.debug("processing output from a job: %r", job)
+        # Select all stdout lines from the io log
+        line_gen = (record[2].decode('UTF-8', errors='replace')
+                    for record in result.get_io_log()
+                    if record[1] == 'stdout')
+        try:
+            # Parse rfc822 records from the subsequent lines
+            for record in gen_rfc822_records(line_gen):
+                yield record
+        except RFC822SyntaxError as exc:
+            # When this exception happens we will _still_ store all the
+            # preceding records. This is worth testing
+            logger.warning(
+                "local script %s returned invalid RFC822 data: %s",
+                job, exc)
+
+    @property
+    def job_list(self):
+        """
+        List of all known jobs.
+
+        Not necessarily all jobs from this list can be, or are desired to run.
+        For API simplicity this variable is read-only, if you wish to alter the
+        list of all jobs re-instantiate this class please.
+        """
+        return self._job_list
+
+    @property
+    def desired_job_list(self):
+        """
+        List of jobs that are on the "desired to run" list
+
+        This is a list, not a set, because the dependency solver algorithm
+        retains as much of the original ordering as possible. Having said that,
+        the actual order can differ widely (for instance, be reversed)
+        """
+        return self._desired_job_list
+
+    @property
+    def run_list(self):
+        """
+        List of jobs that were intended to run, in the proper order
+
+        The order is a result of topological sorting of the desired_job_list.
+        This value is recomputed when change_desired_run_list() is called. It
+        may be shorter than desired_run_list due to dependency errors.
+        """
+        return self._run_list
+
+    @property
+    def job_state_map(self):
+        """
+        Map from job name to JobState that encodes the state of each job.
+        """
+        return self._job_state_map
+
+    @property
+    def resource_map(self):
+        """
+        Map from resource name to a list of resource records
+        """
+        return self._resource_map
+
+    @property
+    def metadata(self):
+        """
+        metadata object associated with this session state.
+        """
+        return self._metadata
+
+    def _recompute_job_readiness(self):
+        """
+        Internal method of SessionState.
+
+        Re-computes [job_state.ready
+                     for job_state in _job_state_map.values()]
+        """
+        # Reset the state of all jobs to have the undesired inhibitor. Since
+        # we maintain a state object for _all_ jobs (including ones not in the
+        # _run_list this correctly updates all values in the _job_state_map
+        # (the UI can safely use the readiness state of all jobs)
+        for job_state in self._job_state_map.values():
+            job_state.readiness_inhibitor_list = [
+                UndesiredJobReadinessInhibitor]
+        # Take advantage of the fact that run_list is topologically sorted and
+        # do a single O(N) pass over _run_list. All "current/update" state is
+        # computed before it needs to be observed (thanks to the ordering)
+        for job in self._run_list:
+            job_state = self._job_state_map[job.name]
+            # Remove the undesired inhibitor as we want to run this job
+            job_state.readiness_inhibitor_list.remove(
+                UndesiredJobReadinessInhibitor)
+            # Check if all job resource requirements are met
+            prog = job.get_resource_program()
+            if prog is not None:
+                try:
+                    prog.evaluate_or_raise(self._resource_map)
+                except ExpressionCannotEvaluateError as exc:
+                    # Lookup the related job (the job that provides the
+                    # resources needed by the expression that cannot be
+                    # evaluated)
+                    related_job = self._job_state_map[
+                        exc.expression.resource_name].job
+                    # Add A PENDING_RESOURCE inhibitor as we are unable to
+                    # determine if the resource requirement is met or not. This
+                    # can happen if the resource job did not ran for any reason
+                    # (it can either be prevented from running by normal means
+                    # or simply be on the run_list but just was not executed
+                    # yet).
+                    inhibitor = JobReadinessInhibitor(
+                        cause=JobReadinessInhibitor.PENDING_RESOURCE,
+                        related_job=related_job,
+                        related_expression=exc.expression)
+                    job_state.readiness_inhibitor_list.append(inhibitor)
+                except ExpressionFailedError as exc:
+                    # Lookup the related job (the job that provides the
+                    # resources needed by the expression that failed)
+                    related_job = self._job_state_map[
+                        exc.expression.resource_name].job
+                    # Add a FAILED_RESOURCE inhibitor as we have all the data
+                    # to run the requirement program but it simply returns a
+                    # non-True value. This typically indicates a missing
+                    # software package or necessary hardware.
+                    inhibitor = JobReadinessInhibitor(
+                        cause=JobReadinessInhibitor.FAILED_RESOURCE,
+                        related_job=related_job,
+                        related_expression=exc.expression)
+                    job_state.readiness_inhibitor_list.append(inhibitor)
+            # Check if all job dependencies ran successfully
+            for dep_name in sorted(job.get_direct_dependencies()):
+                dep_job_state = self._job_state_map[dep_name]
+                # If the dependency did not have a chance to run yet add the
+                # PENDING_DEP inhibitor.
+                if dep_job_state.result.outcome == IJobResult.OUTCOME_NONE:
+                    inhibitor = JobReadinessInhibitor(
+                        cause=JobReadinessInhibitor.PENDING_DEP,
+                        related_job=dep_job_state.job)
+                    job_state.readiness_inhibitor_list.append(inhibitor)
+                # If the dependency is anything but successful add the
+                # FAILED_DEP inhibitor. In theory the PENDING_DEP code above
+                # could be discarded but this would loose context and would
+                # prevent the operator from actually understanding why a job
+                # cannot run.
+                elif dep_job_state.result.outcome != IJobResult.OUTCOME_PASS:
+                    inhibitor = JobReadinessInhibitor(
+                        cause=JobReadinessInhibitor.FAILED_DEP,
+                        related_job=dep_job_state.job)
+                    job_state.readiness_inhibitor_list.append(inhibitor)

=== added file 'plainbox/plainbox/impl/session/storage.py'
--- plainbox/plainbox/impl/session/storage.py	1970-01-01 00:00:00 +0000
+++ plainbox/plainbox/impl/session/storage.py	2013-09-13 17:12:45 +0000
@@ -0,0 +1,608 @@
+# This file is part of Checkbox.
+#
+# Copyright 2012, 2013 Canonical Ltd.
+# Written by:
+#   Zygmunt Krynicki <zygmunt.krynicki@xxxxxxxxxxxxx>
+#
+# Checkbox is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Checkbox is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Checkbox.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+:mod:`plainbox.impl.session.storage` -- storage for sessions
+============================================================
+
+This module contains storage support code for handling sessions. Using the
+:class:`SessionStorageRepository` one can enumerate sessions at a particular
+location. Each location is wrapped by a :class:`SessionStorage` instance. That
+latter class be used to create (allocate) and remove all of the files
+associated with a particular session.
+"""
+
+import errno
+import logging
+import os
+import shutil
+import stat
+import sys
+import tempfile
+
+logger = logging.getLogger("plainbox.session.storage")
+
+
+class SessionStorageRepository:
+    """
+    Helper class to enumerate filesystem artefacts of current or past Sessions
+
+    This class collaborates with :class:`SessionStorage`. The basic
+    use-case is to open a well-known location and enumerate all the sessions
+    that are stored there. This allows to create :class:`SessionStorage`
+    instances to further manage each session (such as remove them by calling
+    :meth:SessionStorage.remove()`)
+    """
+
+    _LAST_SESSION_SYMLINK = "last-session"
+
+    def __init__(self, location=None):
+        """
+        Initialize new repository at the specified location.
+
+        The location does not have to be an existing directory. It will be
+        created on demand. Typically it should be instantiated with the default
+        location.
+        """
+        if location is None:
+            location = self.get_default_location()
+        self._location = location
+
+    @property
+    def location(self):
+        """
+        pathname of the repository
+        """
+        return self._location
+
+    def get_last_storage(self):
+        """
+        Find the last session storage object created in this repository.
+
+        :returns:
+            SessionStorage object associated with the last session created in
+            this repository using legacy mode.
+
+        .. note::
+            This will only return storage objects that were created using
+            legacy mode. Nonlegacy storage objects will not be returned this
+            way.
+        """
+        pathname = os.path.join(self.location, self._LAST_SESSION_SYMLINK)
+        try:
+            last_storage = os.readlink(pathname)
+        except OSError:
+            # The symlink can be gone or not be a real symlink
+            # in that case just ignore it and return None
+            return None
+        else:
+            # The link may be relative so let's ensure we know the full
+            # pathname for the subsequent check (which may be performed
+            # from another directory)
+            last_storage = os.path.join(self._location, last_storage)
+        # If the link points to a directory, assume it's okay
+        if os.path.isdir(last_storage):
+            return SessionStorage(last_storage)
+
+    def get_storage_list(self):
+        """
+        Enumerate stored sessions in the repository.
+
+        If the repository directory is not present then an empty list is
+        returned.
+
+        :returns:
+            list of :class:`SessionStorage` representing discovered sessions
+        """
+        logger.debug("Enumerating sessions in %s", self._location)
+        try:
+            # Try to enumerate the directory
+            item_list = os.listdir(self._location)
+        except OSError as exc:
+            # If the directory does not exist,
+            # silently return empty collection
+            if exc.errno == errno.ENOENT:
+                return []
+            # Don't silence any other errors
+            raise
+        session_list = []
+        # Check each item by looking for directories
+        for item in item_list:
+            pathname = os.path.join(self.location, item)
+            # Make sure not to follow any symlinks here
+            stat_result = os.lstat(pathname)
+            # Consider directories only
+            if stat.S_ISDIR(stat_result.st_mode):
+                logger.debug("Found possible session in %r", pathname)
+                session = SessionStorage(pathname)
+                session_list.append(session)
+        # Return the full list
+        return session_list
+
+    def __iter__(self):
+        """
+        Same as :meth:`get_storage_list()`
+        """
+        return iter(self.get_storage_list())
+
+    @classmethod
+    def get_default_location(cls):
+        """
+        Compute the default location of the session state repository
+
+        :returns: ${XDG_CACHE_HOME:-$HOME/.cache}/plainbox/sessions
+        """
+        # Pick XDG_CACHE_HOME from environment
+        xdg_cache_home = os.environ.get('XDG_CACHE_HOME')
+        # If not set or empty use the default ~/.cache/
+        if not xdg_cache_home:
+            xdg_cache_home = os.path.join(os.path.expanduser('~'), '.cache')
+        # Use a directory relative to XDG_CACHE_HOME
+        return os.path.join(xdg_cache_home, 'plainbox', 'sessions')
+
+
+class LockedStorageError(IOError):
+    """
+    Exception raised when SessionStorage.save_checkpoint() finds an existing
+    'next' file from a (presumably) previous call to save_checkpoint() that
+    got interrupted
+    """
+
+
+class SessionStorage:
+    """
+    Abstraction for storage area that is used by :class:`SessionState` to
+    keep some persistent and volatile data.
+
+    This class implements functions performing input/output operations
+    on session checkpoint data. The location property can be used for keeping
+    any additional files or directories but keep in mind that they will
+    be removed by :meth:`SessionStorage.remove()`
+
+    This class indirectly collaborates with :class:`SessionSuspendHelper` and
+    :class:`SessionResumeHelper`.
+    """
+
+    _SESSION_FILE = 'session'
+
+    _SESSION_FILE_NEXT = 'session.next'
+
+    def __init__(self, location):
+        """
+        Initialize a :class:`SessionStorage` with the given location.
+
+        The location is not created. If you want to ensure that it exists
+        call :meth:`create()` instead.
+        """
+        self._location = location
+
+    def __repr__(self):
+        return "<{} location:{!r}>".format(
+            self.__class__.__name__, self.location)
+
+    @property
+    def location(self):
+        """
+        location of the session storage
+        """
+        return self._location
+
+    @classmethod
+    def create(cls, base_dir, legacy_mode=True):
+        """
+        Create a new :class:`SessionStorage` in a random subdirectory
+        of the specified base directory. The base directory is also
+        created if necessary.
+
+        :param base_dir:
+            Directory in which a random session directory will be created.
+            Typically the base directory should be obtained from
+            :meth:`SessionStorageRepository.get_default_location()`
+
+        :param legacy_mode:
+            If False (defaults to True) then the caller is expected to
+            handle multiple sessions by itself.
+
+        .. note::
+            Legacy mode is where applications using PlainBox API can only
+            handle one session. Creating another session replaces whatever was
+            stored before. In non-legacy mode applications can enumerate
+            sessions, create arbitrary number of sessions at the same time
+            and remove sessions once they are no longer necessary.
+
+            Legacy mode is implemented with a symbolic link called
+            'last-session' that keeps track of the last session created using
+            ``legacy_mode=True``. When a new legacy-mode session is created
+            the target of that symlink is read and recursively removed.
+        """
+        if not os.path.exists(base_dir):
+            os.makedirs(base_dir)
+        location = tempfile.mkdtemp(
+            prefix='pbox-', suffix='.session', dir=base_dir)
+        logger.debug("Created new storage in %r", location)
+        self = cls(location)
+        if legacy_mode:
+            self._replace_legacy_session(base_dir)
+        return self
+
+    def _replace_legacy_session(self, base_dir):
+        """
+        Remove the previous legacy session and update the 'last-session'
+        symlink so that it points to this session storage directory.
+        """
+        symlink_pathname = os.path.join(
+            base_dir, SessionStorageRepository._LAST_SESSION_SYMLINK)
+        # Try to read and remove the storage referenced to by last-session
+        # symlink. This can fail if the link file is gone (which is harmless)
+        # or when it is not an actual symlink (which means that the
+        # repository is corrupted).
+        try:
+            symlink_target = os.readlink(symlink_pathname)
+        except OSError as exc:
+            if exc.errno == errno.ENOENT:
+                pass
+            elif exc.errno == errno.EINVAL:
+                logger.warning(
+                    "%r is not a symlink, repository %r must be corrupted",
+                    symlink_pathname, base_dir)
+            else:
+                logger.warning(
+                    "Unable to read symlink target from %r: %r",
+                    symlink_pathname, exc)
+        else:
+            logger.debug(
+                "Removing storage associated with last session %r",
+                symlink_target)
+            shutil.rmtree(symlink_target)
+            # Remove the last-session symlink itself
+            logger.debug(
+                "Removing symlink associated with last session: %r",
+                symlink_pathname)
+            os.unlink(symlink_pathname)
+        finally:
+            # Finally put the last-session synlink that points to this storage
+            logger.debug("Linking storage %r to last session", self.location)
+            try:
+                os.symlink(self.location, symlink_pathname)
+            except OSError as exc:
+                logger.error(
+                    "Cannot link %r as %r: %r",
+                    self.location, symlink_pathname, exc)
+
+    def remove(self):
+        """
+        Remove all filesystem entries associated with this instance.
+        """
+        logger.debug("Removing session storage from %r", self._location)
+        shutil.rmtree(self._location)
+
+    def load_checkpoint(self):
+        """
+        Load checkpoint data from the filesystem
+
+        :returns: data from the most recent checkpoint
+        :rtype: bytes
+
+        :raises IOError, OSError:
+            on various problems related to accessing the filesystem
+
+        :raises NotImplementedError:
+            when openat(2) is not available
+        """
+        if sys.version_info[0:2] >= (3, 3):
+            return self._load_checkpoint_unix_py33()
+        else:
+            return self._load_checkpoint_unix_py32()
+
+    def save_checkpoint(self, data):
+        """
+        Save checkpoint data to the filesystem.
+
+        The directory associated with this :class:`SessionStorage` must already
+        exist. Typically the instance should be obtained by calling
+        :meth:`SessionStorage.create()` which will ensure that this is already
+        the case.
+
+        :raises TypeError:
+            if data is not a bytes object.
+
+        :raises LockedStorageError:
+            if leftovers from previous save_checkpoint() have been detected.
+            Normally those should never be here but in certain cases that is
+            possible. Callers might want to call :meth:`break_lock()`
+            to resolve the problem and try again.
+
+        :raises IOError, OSError:
+            on various problems related to accessing the filesystem.
+            Typically permission errors may be reported here.
+
+        :raises NotImplementedError:
+            when openat(2), renameat(2), unlinkat(2) are not available on this
+            platform. Should never happen on Linux.
+        """
+        if sys.version_info[0:2] >= (3, 3):
+            return self._save_checkpoint_unix_py33(data)
+        else:
+            return self._save_checkpoint_unix_py32(data)
+
+    def break_lock(self):
+        """
+        Forcibly unlock the storage by removing a file created during
+        atomic filesystem operations of save_checkpoint().
+
+        This method might be useful if save_checkpoint()
+        raises LockedStorageError. It removes the "next" file that is used
+        for atomic rename.
+        """
+        _next_session_pathname = os.path.join(
+            self._location, self._SESSION_FILE_NEXT)
+        logger.debug(
+            "Forcibly unlinking 'next' file %r:", _next_session_pathname)
+        os.unlink(_next_session_pathname)
+
+    def _load_checkpoint_unix_py32(self):
+        _session_pathname = os.path.join(self._location, self._SESSION_FILE)
+        # Open the location directory
+        location_fd = os.open(self._location, os.O_DIRECTORY)
+        logger.debug(
+            "Opened session directory %r as descriptor %d",
+            self._location, location_fd)
+        try:
+            # Open the current session file in the location directory
+            session_fd = os.open(_session_pathname, os.O_RDONLY)
+            logger.debug(
+                "Opened session state file %r as descriptor %d",
+                _session_pathname, session_fd)
+            # Stat the file to know how much to read
+            session_stat = os.fstat(session_fd)
+            logger.debug(
+                "Stat'ed session state file: %s", session_stat)
+            try:
+                # Read session data
+                logger.debug(
+                    "Reading %d bytes of session state", session_stat.st_size)
+                data = os.read(session_fd, session_stat.st_size)
+                logger.debug("Read %d bytes of session state", len(data))
+                if len(data) != session_stat.st_size:
+                    raise IOError("partial read?")
+            finally:
+                # Close the session file
+                logger.debug("Closed descriptor %d", session_fd)
+                os.close(session_fd)
+        finally:
+            # Close the location directory
+            logger.debug("Closed descriptor %d", location_fd)
+            os.close(location_fd)
+        return data
+
+    def _load_checkpoint_unix_py33(self):
+        # Open the location directory
+        location_fd = os.open(self._location, os.O_DIRECTORY)
+        try:
+            # Open the current session file in the location directory
+            session_fd = os.open(
+                self._SESSION_FILE, os.O_RDONLY, dir_fd=location_fd)
+            # Stat the file to know how much to read
+            session_stat = os.fstat(session_fd)
+            try:
+                # Read session data
+                data = os.read(session_fd, session_stat.st_size)
+                if len(data) != session_stat.st_size:
+                    raise IOError("partial read?")
+            finally:
+                # Close the session file
+                os.close(session_fd)
+        finally:
+            # Close the location directory
+            os.close(location_fd)
+        return data
+
+    def _save_checkpoint_unix_py32(self, data):
+        # NOTE: this is like _save_checkpoint_py33 but without all the
+        # *at() functions (openat, renameat)
+        #
+        # Since we cannot those functions there is an implicit race condition
+        # on all open() calls with another process that renames any of
+        # the directories that are part of the opened path.
+        #
+        # I don't think we can really do anything about this in userspace
+        # so this, python 3.2 specific version, just does the best effort
+        # implementation. Some of the comments were redacted but
+        # but keep in mind that the rename race is always there.
+        if not isinstance(data, bytes):
+            raise TypeError("data must be bytes")
+        logger.debug(
+            "Saving %d bytes of data (UNIX, python 3.2 or older)", len(data))
+        # Helper pathnames, needed because we don't have *at functions
+        _next_session_pathname = os.path.join(
+            self._location, self._SESSION_FILE_NEXT)
+        _session_pathname = os.path.join(self._location, self._SESSION_FILE)
+        # Open the location directory, we need to fsync that later
+        # XXX: this may fail, maybe we should keep the fd open all the time?
+        location_fd = os.open(self._location, os.O_DIRECTORY)
+        logger.debug("Opened %r as descriptor %d", self._location, location_fd)
+        try:
+            # Open the "next" file in the location_directory
+            #
+            # Use "write" + "create" + "exclusive" flags so that no race
+            # condition is possible.
+            #
+            # This will never return -1, it throws IOError when anything is
+            # wrong. The caller has to catch this.
+            #
+            # As a special exception, this code handles EEXISTS and converts
+            # that to LockedStorageError that can be especially handled by
+            # some layer above.
+            try:
+                next_session_fd = os.open(
+                    _next_session_pathname,
+                    os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o644)
+            except IOError as exc:
+                if exc.errno == errno.EEXISTS:
+                    raise LockedStorageError()
+                else:
+                    raise
+            logger.debug(
+                "Opened next session file %s as descriptor %d",
+                _next_session_pathname, next_session_fd)
+            try:
+                # Write session data to disk
+                #
+                # I cannot find conclusive evidence but it seems that
+                # os.write() handles partial writes internally. In case we do
+                # get a partial write _or_ we run out of disk space, raise an
+                # explicit IOError.
+                num_written = os.write(next_session_fd, data)
+                logger.debug(
+                    "Wrote %d bytes of data to descriptor %d",
+                    num_written, next_session_fd)
+                if num_written != len(data):
+                    raise IOError("partial write?")
+            except:
+                # If anything goes wrong we should unlink the next file.
+                logger.warning("Unlinking %r", _next_session_pathname)
+                os.unlink(_next_session_pathname)
+            else:
+                # If the write was successful we must flush kernel buffers.
+                #
+                # We want to be sure this data is really on disk by now as we
+                # may crash the machine soon after this method exits.
+                logger.debug(
+                    "Calling fsync() on descriptor %d", next_session_fd)
+                os.fsync(next_session_fd)
+            finally:
+                # Close the new session file
+                logger.debug("Closing descriptor %d", next_session_fd)
+                os.close(next_session_fd)
+            # Rename FILE_NEXT over FILE.
+            logger.debug(
+                "Renaming %r to %r", _next_session_pathname, _session_pathname)
+            try:
+                os.rename(_next_session_pathname, _session_pathname)
+            except:
+                # Same as above, if we fail we need to unlink the next file
+                # otherwise any other attempts will not be able to open() it
+                # with O_EXCL flag.
+                logger.warning("Unlinking %r", _next_session_pathname)
+                os.unlink(_next_session_pathname)
+            # Flush kernel buffers on the directory.
+            #
+            # This should ensure the rename operation is really on disk by now.
+            # As noted above, this is essential for being able to survive
+            # system crash immediately after exiting this method.
+            logger.debug("Calling fsync() on descriptor %d", location_fd)
+            os.fsync(location_fd)
+        finally:
+            # Close the location directory
+            logger.debug("Closing descriptor %d", location_fd)
+            os.close(location_fd)
+
+    def _save_checkpoint_unix_py33(self, data):
+        if not isinstance(data, bytes):
+            raise TypeError("data must be bytes")
+        logger.debug(
+            "Saving %d bytes of data (UNIX, python 3.2 or older)", len(data))
+        # Open the location directory, we need to fsync that later
+        # XXX: this may fail, maybe we should keep the fd open all the time?
+        location_fd = os.open(self._location, os.O_DIRECTORY)
+        logger.debug("Opened %r as descriptor %d", self._location, location_fd)
+        try:
+            # Open the "next" file in the location_directory
+            #
+            # Use openat(2) to ensure we always open a file relative to the
+            # directory we already opened above. This is essential for fsync(2)
+            # calls made below.
+            #
+            # Use "write" + "create" + "exclusive" flags so that no race
+            # condition is possible.
+            #
+            # This will never return -1, it throws IOError when anything is
+            # wrong. The caller has to catch this.
+            #
+            # As a special exception, this code handles EEXISTS
+            # (FIleExistsError) and converts that to LockedStorageError
+            # that can be especially handled by some layer above.
+            try:
+                next_session_fd = os.open(
+                    self._SESSION_FILE_NEXT,
+                    os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o644,
+                    dir_fd=location_fd)
+            except FileExistsError:
+                raise LockedStorageError()
+            logger.debug(
+                "Opened next session file %s as descriptor %d",
+                self._SESSION_FILE_NEXT, next_session_fd)
+            try:
+                # Write session data to disk
+                #
+                # I cannot find conclusive evidence but it seems that
+                # os.write() handles partial writes internally. In case we do
+                # get a partial write _or_ we run out of disk space, raise an
+                # explicit IOError.
+                num_written = os.write(next_session_fd, data)
+                logger.debug(
+                    "Wrote %d bytes of data to descriptor %d",
+                    num_written, next_session_fd)
+                if num_written != len(data):
+                    raise IOError("partial write?")
+            except:
+                # If anything goes wrong we should unlink the next file. As
+                # with the open() call above we use unlinkat to prevent race
+                # conditions.
+                logger.warning("Unlinking %r", self._SESSION_FILE_NEXT)
+                os.unlink(self._SESSION_FILE_NEXT, dir_fd=location_fd)
+            else:
+                # If the write was successful we must flush kernel buffers.
+                #
+                # We want to be sure this data is really on disk by now as we
+                # may crash the machine soon after this method exits.
+                logger.debug(
+                    "Calling fsync() on descriptor %d", next_session_fd)
+                os.fsync(next_session_fd)
+            finally:
+                # Close the new session file
+                logger.debug("Closing descriptor %d", next_session_fd)
+                os.close(next_session_fd)
+            # Rename FILE_NEXT over FILE.
+            #
+            # Use renameat(2) to ensure that there is no race condition if the
+            # location (directory) is being moved
+            logger.debug(
+                "Renaming %r to %r",
+                self._SESSION_FILE_NEXT, self._SESSION_FILE)
+            try:
+                os.rename(self._SESSION_FILE_NEXT, self._SESSION_FILE,
+                          src_dir_fd=location_fd, dst_dir_fd=location_fd)
+            except:
+                # Same as above, if we fail we need to unlink the next file
+                # otherwise any other attempts will not be able to open() it
+                # with O_EXCL flag.
+                logger.warning("Unlinking %r", self._SESSION_FILE_NEXT)
+                os.unlink(self._SESSION_FILE_NEXT, dir_fd=location_fd)
+            # Flush kernel buffers on the directory.
+            #
+            # This should ensure the rename operation is really on disk by now.
+            # As noted above, this is essential for being able to survive
+            # system crash immediately after exiting this method.
+            logger.debug("Calling fsync() on descriptor %d", location_fd)
+            os.fsync(location_fd)
+        finally:
+            # Close the location directory
+            logger.debug("Closing descriptor %d", location_fd)
+            os.close(location_fd)

=== added file 'plainbox/plainbox/impl/session/suspend.py'
--- plainbox/plainbox/impl/session/suspend.py	1970-01-01 00:00:00 +0000
+++ plainbox/plainbox/impl/session/suspend.py	2013-09-13 17:12:45 +0000
@@ -0,0 +1,321 @@
+# This file is part of Checkbox.
+#
+# Copyright 2012, 2013 Canonical Ltd.
+# Written by:
+#   Zygmunt Krynicki <zygmunt.krynicki@xxxxxxxxxxxxx>
+#
+# Checkbox is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Checkbox is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Checkbox.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+:mod:`plainbox.impl.session.suspend` -- session suspend support
+===============================================================
+
+This module contains classes that can suspend an instance of
+:class:`~plainbox.impl.session.state.SessionState`. The general idea is that
+:class:`~plainbox.impl.session.resume.SessionSuspendHelper` knows how to
+describe the session and
+:class:`~plainbox.impl.session.resume.SessionResumeHelper` knows how to
+recreate the session from that description.
+
+Both of the helper classes are only used by
+:class:`~plainbox.impl.session.manager.SessionManager` and in the
+the legacy suspend/resume code paths of
+:class:`~plainbox.impl.session.state._LegacySessionState`.
+Applications should use one of those APIs to work with session snapshots.
+
+The design of the on-disk format is not like typical pickle or raw dump of all
+of the objects. Instead it is designed to create a smart representation of a
+subset of the data and explicitly support migrations, so that some future
+version of PlainBox can change the format and still read old sessions (to the
+extent that it makes sense) or at least reject them with an intelligent
+message.
+
+One important consideration of the format is that we suspend very often and
+resume very infrequently so everything is optimized around saving big
+chunks of data incrementally (all the big job results and their log files)
+and to keep most of the data we save over and over small.
+
+The key limitation in how the suspend code works is that we cannot really
+serialize jobs at all. There are two reasons for that, one very obvious
+and one which is more of a design decision.
+
+The basic reason for why we cannot serialize jobs is that we cannot really,
+meaningfully serialize the code that runs inside a job. That may the shell
+command or a call into python module. Without this limitation we would
+be basically pretending that we are running the same job as before while the
+job definition has transparently changed and the results would not be
+sensible anymore.
+
+The design decision is to allow abstract, opaque Providers to offer various
+types of JobDefinitions (that may be radically different to what current
+CheckBox jobs look like). This is why the resume interface requires one to
+provide a full list of job definitions to resume. This is also why the
+get_checksum() method can be implemented differently in non-CheckBox jobs.
+
+As an exception to this rule we _do_ serialize generated jobs. Those are a
+compromise between ease-of-use of the framework and the external
+considerations mentioned above. Generated jobs are re-created from whatever
+results that created them. The framework has special support code for knowing
+how to resume in light of the fact that some jobs might be generated during
+the resume process itself.
+"""
+
+import gzip
+import json
+import logging
+import base64
+
+from plainbox.impl.result import DiskJobResult
+from plainbox.impl.result import MemoryJobResult
+
+logger = logging.getLogger("plainbox.session.suspend")
+
+
+class SessionSuspendHelper:
+    """
+    Helper class for computing binary representation of a session.
+
+    The helper only creates a bytes object to save. Actual saving should
+    be performed using some other means, preferably using
+    :class:`~plainbox.impl.session.storage.SessionStorage`.
+    """
+
+    def suspend(self, session):
+        """
+        Compute the data that is saved by :class:`SessionStorage` as a
+        part of :meth:`SessionStorage.save_checkpoint()`.
+
+        :returns bytes: the serialized data
+        """
+        json_repr = self._json_repr(session)
+        data = json.dumps(
+            json_repr,
+            ensure_ascii=False,
+            sort_keys=True,
+            indent=None,
+            separators=(',', ':')
+        ).encode("UTF-8")
+        # NOTE: gzip.compress is not deterministic on python3.2
+        return gzip.compress(data)
+
+    def _json_repr(self, session):
+        """
+        Compute the representation of all of the data that needs to be saved.
+
+        :returns:
+            JSON-friendly representation
+        :rtype:
+            dict
+
+        The dictionary has the following keys:
+
+            ``version``
+                A integral number describing the version of the representation.
+                Currently only the first (1) version is defined.
+
+            ``session``
+                Representation of the session as computed by
+                :meth:`_repr_SessionState()`
+        """
+        return {
+            "version": 1,
+            "session": self._repr_SessionState(session),
+        }
+
+    def _repr_SessionState(self, obj):
+        """
+        Compute the representation of :class:`SessionState`
+
+        :returns:
+            JSON-friendly representation
+        :rtype:
+            dict
+
+        The result is a dictionary with the following items:
+
+            ``jobs``:
+                Dictionary mapping job name to job checksum.
+                The checksum is computed with
+                :meth:`~plainbox.impl.job.JobDefinition.get_checksum()`
+
+            ``results``
+                Dictionary mapping job name to a list of results.
+                Each result is represented by data computed by
+                :meth:`_repr_JobResult()`
+
+            ``desired_job_list``:
+                List of (names) of jobs that are desired (to be executed)
+
+            ``metadata``:
+                The representation of meta-data associated with the session
+                state object. This is encoded as a dictionary
+                with the following items:
+
+                    ``title``:
+                        Title of the session. Arbitrary text provided by
+                        the application.
+
+                    ``flags``:
+                        List of strings that enumerate the flags the session
+                        is in. There are some well-known flags but this list
+                        can have any items it it.
+
+                    ``running_job_name``:
+                        Name of the job that was about to be executed before
+                        snapshotting took place. Can be None.
+        """
+        return {
+            "jobs": {
+                state.job.name: state.job.get_checksum()
+                for state in obj.job_state_map.values()
+            },
+            "results": {
+                # Currently we store only one result but we may store
+                # more than that in a later version.
+                state.job.name: [self._repr_JobResult(state.result)]
+                for state in obj.job_state_map.values()
+            },
+            "desired_job_list": [
+                job.name for job in obj.desired_job_list
+            ],
+            "metadata": self._repr_SessionMetaData(obj.metadata),
+        }
+
+    def _repr_SessionMetaData(self, obj):
+        """
+        Compute the representation of :class:`SessionMetaData`.
+
+        :returns:
+            JSON-friendly representation.
+        :rtype:
+            dict
+        """
+        return {
+            "title": obj.title,
+            "flags": list(sorted(obj.flags)),
+            "running_job_name": obj.running_job_name
+        }
+
+    def _repr_JobResult(self, obj):
+        """
+        Compute the representation of one of IJobResult subclasses
+        """
+        if isinstance(obj, DiskJobResult):
+            return self._repr_DiskJobResult(obj)
+        elif isinstance(obj, MemoryJobResult):
+            return self._repr_MemoryJobResult(obj)
+        else:
+            raise TypeError(
+                "_repr_JobResult() supports DiskJobResult or MemoryJobResult")
+
+    def _repr_JobResultBase(self, obj):
+        """
+        Compute the representation of :class:`plainbox.impl.job._JobResultBase`
+
+        :returns:
+            JSON-friendly representation
+        :rtype:
+            dict
+
+        The dictionary has the following keys:
+
+            ``outcome``
+                The outcome of the test
+
+            ``execution_duration``
+                Time it took to execute the test command in seconds
+
+            ``comments``
+                Tester-supplied comments
+
+            ``return_code``
+                The exit code of the application.
+
+        .. note::
+            return_code can have unexpected values when the process was killed
+            by a signal
+        """
+        return {
+            "outcome": obj.outcome,
+            "execution_duration": obj.execution_duration,
+            "comments": obj.comments,
+            "return_code": obj.return_code,
+        }
+
+    def _repr_MemoryJobResult(self, obj):
+        """
+        Compute the representation of
+        :class:`plainbox.impl.job.MemoryJobResult`
+
+        :returns:
+            JSON-friendly representation
+        :rtype:
+            dict
+
+        The dictionary has the following keys *in addition to* what is
+        produced by :meth:`_repr_JobResultBase()`:
+
+            ``io_log``
+                Representation of the list of IO Log records
+        """
+        assert isinstance(obj, MemoryJobResult)
+        result = self._repr_JobResultBase(obj)
+        result.update({
+            "io_log": [self._repr_IOLogRecord(record)
+                       for record in obj.io_log],
+        })
+        return result
+
+    def _repr_DiskJobResult(self, obj):
+        """
+        Compute the representation of :class:`plainbox.impl.job.DiskJobResult`
+
+        :returns:
+            JSON-friendly representation
+        :rtype:
+            dict
+
+        The dictionary has the following keys *in addition to* what is
+        produced by :meth:`_repr_JobResultBase()`:
+
+            ``io_log_filename``
+                The name of the file that keeps the serialized IO log
+        """
+        assert isinstance(obj, DiskJobResult)
+        result = self._repr_JobResultBase(obj)
+        result.update({
+            "io_log_filename": obj.io_log_filename,
+        })
+        return result
+
+    def _repr_IOLogRecord(self, obj):
+        """
+        Compute the representation of
+        :class:`plainbox.impl.result.IOLogRecord`
+
+        :returns:
+            JSON-friendly representation
+        :rtype:
+            list
+
+        The list has three elements:
+
+        * delay, copied from :attr:`~plainbox.impl.result.IOLogRecord.delay`
+        * stream name, copied from
+          :attr:`~plainbox.impl.result.IOLogRecord.stream_name`
+        * data, base64 encoded ASCII string, computed from
+          :attr:`~plainbox.impl.result.IOLogRecord.data`
+        """
+        return [obj[0], obj[1],
+                base64.standard_b64encode(obj[2]).decode("ASCII")]

=== added file 'plainbox/plainbox/impl/session/test_jobs.py'
--- plainbox/plainbox/impl/session/test_jobs.py	1970-01-01 00:00:00 +0000
+++ plainbox/plainbox/impl/session/test_jobs.py	2013-09-13 17:12:45 +0000
@@ -0,0 +1,218 @@
+# This file is part of Checkbox.
+#
+# Copyright 2012, 2013 Canonical Ltd.
+# Written by:
+#   Zygmunt Krynicki <zygmunt.krynicki@xxxxxxxxxxxxx>
+#
+# Checkbox is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Checkbox is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Checkbox.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+plainbox.impl.test_session
+==========================
+
+Test definitions for plainbox.impl.session module
+"""
+
+import json
+
+from unittest import TestCase, expectedFailure
+
+from plainbox.abc import IJobResult
+from plainbox.impl.result import MemoryJobResult
+from plainbox.impl.session import JobReadinessInhibitor
+from plainbox.impl.session import JobState
+from plainbox.impl.session import UndesiredJobReadinessInhibitor
+from plainbox.impl.testing_utils import make_job, make_job_result
+
+
+class JobReadinessInhibitorTests(TestCase):
+
+    def test_bad_initialization(self):
+        self.assertRaises(ValueError, JobReadinessInhibitor,
+                          JobReadinessInhibitor.UNDESIRED - 1)
+        self.assertRaises(ValueError, JobReadinessInhibitor,
+                          JobReadinessInhibitor.FAILED_RESOURCE + 1)
+        self.assertRaises(ValueError, JobReadinessInhibitor,
+                          JobReadinessInhibitor.PENDING_DEP)
+        self.assertRaises(ValueError, JobReadinessInhibitor,
+                          JobReadinessInhibitor.FAILED_DEP)
+        self.assertRaises(ValueError, JobReadinessInhibitor,
+                          JobReadinessInhibitor.PENDING_RESOURCE)
+        self.assertRaises(ValueError, JobReadinessInhibitor,
+                          JobReadinessInhibitor.FAILED_RESOURCE)
+        job = make_job("A")
+        self.assertRaises(ValueError, JobReadinessInhibitor,
+                          JobReadinessInhibitor.PENDING_RESOURCE, job)
+        self.assertRaises(ValueError, JobReadinessInhibitor,
+                          JobReadinessInhibitor.FAILED_RESOURCE, job)
+
+    def test_unknown(self):
+        obj = JobReadinessInhibitor(JobReadinessInhibitor.UNDESIRED)
+        self.assertEqual(
+            repr(obj), (
+                "<JobReadinessInhibitor cause:UNDESIRED"
+                " related_job:None"
+                " related_expression:None>"))
+        self.assertEqual(str(obj), "undesired")
+
+    def test_pending_dep(self):
+        job = make_job("A")
+        obj = JobReadinessInhibitor(
+            JobReadinessInhibitor.PENDING_DEP, related_job=job)
+        self.assertEqual(
+            repr(obj), (
+                "<JobReadinessInhibitor cause:PENDING_DEP"
+                " related_job:<JobDefinition name:'A' plugin:'dummy'>"
+                " related_expression:None>"))
+        self.assertEqual(str(obj), "required dependency 'A' did not run yet")
+
+    def test_failed_dep(self):
+        job = make_job("A")
+        obj = JobReadinessInhibitor(
+            JobReadinessInhibitor.FAILED_DEP, related_job=job)
+        self.assertEqual(
+            repr(obj), (
+                "<JobReadinessInhibitor cause:FAILED_DEP"
+                " related_job:<JobDefinition name:'A' plugin:'dummy'>"
+                " related_expression:None>"))
+        self.assertEqual(str(obj), "required dependency 'A' has failed")
+
+    def test_pending_resource(self):
+        job = make_job("A", requires="resource.attr == 'value'")
+        expr = job.get_resource_program().expression_list[0]
+        obj = JobReadinessInhibitor(
+            JobReadinessInhibitor.PENDING_RESOURCE, related_job=job,
+            related_expression=expr)
+        self.assertEqual(
+            repr(obj), (
+                "<JobReadinessInhibitor cause:PENDING_RESOURCE"
+                " related_job:<JobDefinition name:'A' plugin:'dummy'>"
+                " related_expression:"
+                "<ResourceExpression text:\"resource.attr == 'value'\">>"))
+        self.assertEqual(
+            str(obj), (
+                "resource expression \"resource.attr == 'value'\" could not be"
+                " evaluated because the resource it depends on did not run"
+                " yet"))
+
+    def test_failed_resource(self):
+        job = make_job("A", requires="resource.attr == 'value'")
+        expr = job.get_resource_program().expression_list[0]
+        obj = JobReadinessInhibitor(
+            JobReadinessInhibitor.FAILED_RESOURCE, related_job=job,
+            related_expression=expr)
+        self.assertEqual(
+            repr(obj), (
+                "<JobReadinessInhibitor cause:FAILED_RESOURCE"
+                " related_job:<JobDefinition name:'A' plugin:'dummy'>"
+                " related_expression:"
+                "<ResourceExpression text:\"resource.attr == 'value'\">>"))
+        self.assertEqual(
+            str(obj), (
+                "resource expression \"resource.attr == 'value'\""
+                " evaluates to false"))
+
+    def test_unknown_global(self):
+        self.assertEqual(UndesiredJobReadinessInhibitor.cause,
+                         JobReadinessInhibitor.UNDESIRED)
+
+
+class JobStateTests(TestCase):
+
+    def setUp(self):
+        self.job = make_job("A")
+        self.job_state = JobState(self.job)
+
+    def test_smoke(self):
+        self.assertIsNotNone(self.job_state.result)
+        self.assertIs(self.job_state.result.outcome, IJobResult.OUTCOME_NONE)
+        self.assertEqual(self.job_state.readiness_inhibitor_list, [
+            UndesiredJobReadinessInhibitor])
+
+    def test_getting_job(self):
+        self.assertIs(self.job_state.job, self.job)
+
+    @expectedFailure
+    def test_setting_job_is_not_allowed(self):
+        #FIXME: We want this test to come back at some point so I didn't
+        #delete it, but at the moment we need it to always pass because
+        #a JobState's job attribute needs to be writable.
+        with self.assertRaises(AttributeError):
+            self.job_state.job = None
+
+    def test_setting_result(self):
+        result = make_job_result()
+        self.job_state.result = result
+        self.assertIs(self.job_state.result, result)
+
+    def test_setting_result_fires_signal(self):
+        """
+        verify that assigning state.result fires the on_result_changed signal
+        """
+        # Remember both new and old result for verification
+        new_result = make_job_result()
+        old_result = self.job_state.result
+
+        def changed_callback(old, new):
+            # Verify that new and old are correct and not swapped
+            self.assertIs(new, new_result)
+            self.assertIs(old, old_result)
+            # Set a flag that we verify below in case this never gets called
+            self.on_changed_fired = True
+        # Connect the signal handler
+        self.job_state.on_result_changed.connect(changed_callback)
+        # Assign the new result
+        self.job_state.result = new_result
+        # Ensure that the signal was fired and called our callback
+        self.assertTrue(self.on_changed_fired)
+
+    def test_setting_result_fires_signal_only_when_real_change_happens(self):
+        """
+        verify that assigning state.result does NOT fire the signal when the
+        new result is the same
+        """
+        # Assume we never get called and reset the flag
+        self.on_changed_fired = False
+
+        def changed_callback(old, new):
+            # Set the flag in case we do get called
+            self.on_changed_fired = True
+        # Connect the signal handler
+        self.job_state.on_result_changed.connect(changed_callback)
+        # Assign the same result again
+        self.job_state.result = self.job_state.result
+        # Ensure that the signal was NOT fired
+        self.assertFalse(self.on_changed_fired)
+
+    def test_setting_readiness_inhibitor_list(self):
+        inhibitor = JobReadinessInhibitor(JobReadinessInhibitor.UNDESIRED)
+        self.job_state.readiness_inhibitor_list = [inhibitor]
+        self.assertEqual(self.job_state.readiness_inhibitor_list, [inhibitor])
+
+    def test_can_start(self):
+        self.job_state.readiness_inhibitor_list = []
+        self.assertTrue(self.job_state.can_start())
+        self.job_state.readiness_inhibitor_list = [
+            UndesiredJobReadinessInhibitor]
+        self.assertFalse(self.job_state.can_start())
+
+    def test_readiness_description(self):
+        self.job_state.readiness_inhibitor_list = []
+        self.assertEqual(self.job_state.get_readiness_description(),
+                         "job can be started")
+        self.job_state.readiness_inhibitor_list = [
+            UndesiredJobReadinessInhibitor]
+        self.assertTrue(
+            self.job_state.get_readiness_description().startswith(
+                "job cannot be started: "))

=== added file 'plainbox/plainbox/impl/session/test_legacy.py'
--- plainbox/plainbox/impl/session/test_legacy.py	1970-01-01 00:00:00 +0000
+++ plainbox/plainbox/impl/session/test_legacy.py	2013-09-13 17:12:45 +0000
@@ -0,0 +1,65 @@
+# This file is part of Checkbox.
+#
+# Copyright 2012, 2013 Canonical Ltd.
+# Written by:
+#   Zygmunt Krynicki <zygmunt.krynicki@xxxxxxxxxxxxx>
+#
+# Checkbox is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Checkbox is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Checkbox.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+plainbox.impl.test_session
+==========================
+
+Test definitions for plainbox.impl.session module
+"""
+
+import json
+import os
+import tempfile
+import shutil
+
+from unittest import TestCase
+
+from plainbox.abc import IJobResult
+from plainbox.impl.result import MemoryJobResult
+from plainbox.impl.session.legacy import SessionStateLegacyAPICompatImpl
+from plainbox.impl.testing_utils import make_job
+
+
+class SessionStateSmokeTests(TestCase):
+
+    def setUp(self):
+        A = make_job('A', requires='R.attr == "value"')
+        B = make_job('B', depends='C')
+        C = make_job('C')
+        self.job_list = [A, B, C]
+        self.session_state = SessionStateLegacyAPICompatImpl(self.job_list)
+
+    def test_initial_job_list(self):
+        expected = self.job_list
+        observed = self.session_state.job_list
+        self.assertEqual(expected, observed)
+
+    def test_initial_desired_job_list(self):
+        expected = []
+        observed = self.session_state.desired_job_list
+        self.assertEqual(expected, observed)
+
+    def test_initial_run_list(self):
+        expected = []
+        observed = self.session_state.run_list
+        self.assertEqual(expected, observed)
+
+    def test_initial_session_dir(self):
+        self.assertIsNone(self.session_state.session_dir)

=== added file 'plainbox/plainbox/impl/session/test_resume.py'
--- plainbox/plainbox/impl/session/test_resume.py	1970-01-01 00:00:00 +0000
+++ plainbox/plainbox/impl/session/test_resume.py	2013-09-13 17:12:45 +0000
@@ -0,0 +1,1290 @@
+# This file is part of Checkbox.
+#
+# Copyright 2012, 2013 Canonical Ltd.
+# Written by:
+#   Zygmunt Krynicki <zygmunt.krynicki@xxxxxxxxxxxxx>
+#
+# Checkbox is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Checkbox is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Checkbox.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+:mod:`plainbox.impl.session.test_resume`
+========================================
+
+Test definitions for :mod:`plainbox.impl.session.resume` module
+"""
+
+from unittest import TestCase
+import base64
+import binascii
+import copy
+import gzip
+import json
+
+from plainbox.impl.job import JobDefinition
+from plainbox.impl.resource import Resource
+from plainbox.impl.result import DiskJobResult
+from plainbox.impl.result import IOLogRecord
+from plainbox.impl.result import MemoryJobResult
+from plainbox.impl.session.resume import CorruptedSessionError
+from plainbox.impl.session.resume import IncompatibleJobError
+from plainbox.impl.session.resume import IncompatibleSessionError
+from plainbox.impl.session.resume import SessionResumeError
+from plainbox.impl.session.resume import SessionResumeHelper
+from plainbox.impl.session.state import SessionState
+from plainbox.impl.testing_utils import make_job
+
+
+class SessionResumeExceptionTests(TestCase):
+
+    """
+    Tests for the various exceptions defined in the resume module
+    """
+
+    def test_resume_exception_inheritance(self):
+        """
+        verify that all three exception classes inherit from the common base
+        """
+        self.assertTrue(issubclass(
+            CorruptedSessionError, SessionResumeError))
+        self.assertTrue(issubclass(
+            IncompatibleSessionError, SessionResumeError))
+        self.assertTrue(issubclass(
+            IncompatibleJobError, SessionResumeError))
+
+
+class SessionResumeTests(TestCase):
+
+    """
+    Tests for :class:`~plainbox.impl.session.resume.SessionResumeHelper`
+    """
+
+    def test_resume_garbage_gzip(self):
+        """
+        verify that CorruptedSessionError raised when we try to decompress
+        garbage bytes. By "garbage" we mean that it's not a valid
+        gzip-compressed stream. Internally IOError is raised but we wrap
+        that for simplicity.
+        """
+        data = b"foo"
+        with self.assertRaises(CorruptedSessionError) as boom:
+            SessionResumeHelper([]).resume(data)
+        self.assertIsInstance(boom.exception.__context__, IOError)
+
+    def test_resume_garbage_unicode(self):
+        """
+        verify that CorruptedSessionError is raised when we try to interpret
+        incorrect bytes as UTF-8. Internally UnicodeDecodeError is raised
+        but we wrap that for simplicity.
+        """
+        # This is just a sanity check that b"\xff" is not a valid UTF-8 string
+        with self.assertRaises(UnicodeDecodeError):
+            b"\xff".decode('UTF-8')
+        data = gzip.compress(b"\xff")
+        with self.assertRaises(CorruptedSessionError) as boom:
+            SessionResumeHelper([]).resume(data)
+        self.assertIsInstance(boom.exception.__context__, UnicodeDecodeError)
+
+    def test_resume_garbage_json(self):
+        """
+        verify that CorruptedSessionError is raised when we try to interpret
+        malformed JSON text. Internally ValueError is raised but we wrap that
+        for simplicity.
+        """
+        data = gzip.compress(b"{")
+        with self.assertRaises(CorruptedSessionError) as boom:
+            SessionResumeHelper([]).resume(data)
+        self.assertIsInstance(boom.exception.__context__, ValueError)
+
+
+class EndToEndTests(TestCase):
+
+    full_repr = {
+        'version': 1,
+        'session': {
+            'jobs': {
+                '__category__': (
+                    '5267192a5eac9288d144242d800b981eeca476c17e0'
+                    'dd32a09c4b3ea0a14f955'),
+                'generator': (
+                    '7e67e23b7e7a6a5803721a9f282c0e88c7f40bae470'
+                    '950f880e419bb9c7665d8'),
+                'generated': (
+                    'bfee8c57b6adc9f0f281b59fe818de2ed98b6affb78'
+                    '9cf4fbf282d89453190d3'),
+            },
+            'results': {
+                '__category__': [{
+                    'comments': None,
+                    'execution_duration': None,
+                    'io_log': [
+                        [0.0, 'stdout', 'cGx1Z2luOmxvY2FsCg=='],
+                        [0.1, 'stdout', 'bmFtZTpnZW5lcmF0b3IK']],
+                    'outcome': None,
+                    'return_code': None,
+                }],
+                'generator': [{
+                    'comments': None,
+                    'execution_duration': None,
+                    'io_log': [
+                        [0.0, 'stdout', 'bmFtZTpnZW5lcmF0ZWQ=']],
+                    'outcome': None,
+                    'return_code': None,
+                }],
+                'generated': [{
+                    'comments': None,
+                    'execution_duration': None,
+                    'io_log': [],
+                    'outcome': None,
+                    'return_code': None,
+                }]
+            },
+            'desired_job_list': ['__category__', 'generator'],
+            'metadata': {
+                'flags': [],
+                'running_job_name': None,
+                'title': None
+            },
+        }
+    }
+
+    def setUp(self):
+        # Crete a "__category__" job
+        self.category_job = JobDefinition({
+            "plugin": "local",
+            "name": "__category__"
+        })
+        # Create a "generator" job
+        self.generator_job = JobDefinition({
+            "plugin": "local",
+            "name": "generator"
+        })
+        # Keep a variable for the (future) generated job
+        self.generated_job = None
+        # Create a result for the "__category__" job.
+        # It must define a verbatim copy of the "generator" job
+        self.category_result = MemoryJobResult({
+            "io_log": [
+                (0.0, "stdout", b'plugin:local\n'),
+                (0.1, "stdout", b'name:generator\n'),
+            ]
+        })
+        # Create a result for the "generator" job.
+        # It will define the "generated" job
+        self.generator_result = MemoryJobResult({
+            "io_log": [(0.0, 'stdout', b'name:generated')]
+        })
+        self.job_list = [self.category_job, self.generator_job]
+        self.suspend_data = gzip.compress(
+            json.dumps(self.full_repr).encode("UTF-8"))
+
+    def test_resume_early_callback(self):
+        """
+        verify that early_cb is called with a session object
+        """
+        def early_cb(session):
+            self.seen_session = session
+        session = SessionResumeHelper(self.job_list).resume(
+            self.suspend_data, early_cb)
+        self.assertIs(session, self.seen_session)
+
+
+class IOLogRecordResumeTests(TestCase):
+
+    """
+    Tests for :class:`~plainbox.impl.session.resume.SessionResumeHelper`
+    and how it handles resuming IOLogRecord objects
+    """
+
+    def test_build_IOLogRecord_missing_delay(self):
+        """
+        verify that _build_IOLogRecord() checks for missing ``delay``
+        """
+        with self.assertRaises(CorruptedSessionError):
+            SessionResumeHelper._build_IOLogRecord([])
+
+    def test_build_IOLogRecord_bad_type_for_delay(self):
+        """
+        verify that _build_IOLogRecord() checks that ``delay`` is float
+        """
+        with self.assertRaises(CorruptedSessionError):
+            SessionResumeHelper._build_IOLogRecord([0, 'stdout', ''])
+
+    def test_build_IOLogRecord_negative_delay(self):
+        """
+        verify that _build_IOLogRecord() checks for negative ``delay``
+        """
+        with self.assertRaises(CorruptedSessionError):
+            SessionResumeHelper._build_IOLogRecord([-1.0, 'stdout', ''])
+
+    def test_build_IOLogRecord_missing_stream_name(self):
+        """
+        verify that _build_IOLogRecord() checks for missing ``stream-name``
+        """
+        with self.assertRaises(CorruptedSessionError):
+            SessionResumeHelper._build_IOLogRecord([0.0])
+
+    def test_build_IOLogRecord_bad_type_stream_name(self):
+        """
+        verify that _build_IOLogRecord() checks that ``stream-name``
+        is a string
+        """
+        with self.assertRaises(CorruptedSessionError):
+            SessionResumeHelper._build_IOLogRecord([0.0, 1])
+
+    def test_build_IOLogRecord_bad_value_stream_name(self):
+        """
+        verify that _build_IOLogRecord() checks that ``stream-name`` looks sane
+        """
+        with self.assertRaises(CorruptedSessionError):
+            SessionResumeHelper._build_IOLogRecord([0.0, "foo", ""])
+
+    def test_build_IOLogRecord_missing_data(self):
+        """
+        verify that _build_IOLogRecord() checks for missing ``data``
+        """
+        with self.assertRaises(CorruptedSessionError):
+            SessionResumeHelper._build_IOLogRecord([0.0, 'stdout'])
+
+    def test_build_IOLogRecord_non_ascii_data(self):
+        """
+        verify that _build_IOLogRecord() checks that ``data`` is ASCII
+        """
+        with self.assertRaises(CorruptedSessionError) as boom:
+            SessionResumeHelper._build_IOLogRecord([0.0, 'stdout', '\uFFFD'])
+        self.assertIsInstance(boom.exception.__context__, UnicodeEncodeError)
+
+    def test_build_IOLogRecord_non_base64_ascii_data(self):
+        """
+        verify that _build_IOLogRecord() checks that ``data`` is valid base64
+        """
+        with self.assertRaises(CorruptedSessionError) as boom:
+            SessionResumeHelper._build_IOLogRecord([0.0, 'stdout', '==broken'])
+        # base64.standard_b64decode() raises binascii.Error
+        self.assertIsInstance(boom.exception.__context__, binascii.Error)
+
+    def test_build_IOLogRecord_values(self):
+        """
+        verify that _build_IOLogRecord() returns a proper IOLogRecord object
+        with all the values in order
+        """
+        record = SessionResumeHelper._build_IOLogRecord(
+            [1.5, 'stderr', 'dGhpcyB3b3Jrcw=='])
+        self.assertAlmostEqual(record.delay, 1.5)
+        self.assertEqual(record.stream_name, 'stderr')
+        self.assertEqual(record.data, b"this works")
+
+
+class JobResultResumeMixIn:
+
+    """
+    Mix-in class the defines most of the common tests for both
+    MemoryJobResult and DiskJobResult.
+
+    Sub-classes should define ``good_repr`` at class level
+    """
+
+    good_repr = None
+
+    def test_build_JobResult_checks_for_missing_outcome(self):
+        """
+        verify that _build_JobResult() checks if ``outcome`` is present
+        """
+        with self.assertRaises(CorruptedSessionError) as boom:
+            obj_repr = copy.copy(self.good_repr)
+            del obj_repr['outcome']
+            SessionResumeHelper._build_JobResult(obj_repr)
+        self.assertEqual(
+            str(boom.exception), "Missing value for key 'outcome'")
+
+    def test_build_JobResult_checks_type_of_outcome(self):
+        """
+        verify that _build_JobResult() checks if ``outcome`` is a string
+        """
+        with self.assertRaises(CorruptedSessionError) as boom:
+            obj_repr = copy.copy(self.good_repr)
+            obj_repr['outcome'] = 42
+            SessionResumeHelper._build_JobResult(obj_repr)
+        self.assertEqual(
+            str(boom.exception),
+            "Value of key 'outcome' is of incorrect type int")
+
+    def test_build_JobResult_checks_value_of_outcome(self):
+        """
+        verify that _build_JobResult() checks if the value of ``outcome`` is
+        in the set of known-good values.
+        """
+        with self.assertRaises(CorruptedSessionError) as boom:
+            obj_repr = copy.copy(self.good_repr)
+            obj_repr['outcome'] = 'maybe'
+            SessionResumeHelper._build_JobResult(obj_repr)
+        self.assertEqual(
+            str(boom.exception), (
+                "Value for key 'outcome' not in allowed set [None, 'pass', "
+                "'fail', 'skip', 'not-supported', 'not-implemented', "
+                "'undecided']"))
+
+    def test_build_JobResult_allows_none_outcome(self):
+        """
+        verify that _build_JobResult() allows for the value of ``outcome`` to
+        be None
+        """
+        obj_repr = copy.copy(self.good_repr)
+        obj_repr['outcome'] = None
+        obj = SessionResumeHelper._build_JobResult(obj_repr)
+        self.assertEqual(obj.outcome, None)
+
+    def test_build_JobResult_restores_outcome(self):
+        """
+        verify that _build_JobResult() restores the value of ``outcome``
+        """
+        obj_repr = copy.copy(self.good_repr)
+        obj_repr['outcome'] = 'fail'
+        obj = SessionResumeHelper._build_JobResult(obj_repr)
+        self.assertEqual(obj.outcome, 'fail')
+
+    def test_build_JobResult_checks_for_missing_comments(self):
+        """
+        verify that _build_JobResult() checks if ``comments`` is present
+        """
+        with self.assertRaises(CorruptedSessionError) as boom:
+            obj_repr = copy.copy(self.good_repr)
+            del obj_repr['comments']
+            SessionResumeHelper._build_JobResult(obj_repr)
+        self.assertEqual(
+            str(boom.exception), "Missing value for key 'comments'")
+
+    def test_build_JobResult_checks_type_of_comments(self):
+        """
+        verify that _build_JobResult() checks if ``comments`` is a string
+        """
+        with self.assertRaises(CorruptedSessionError) as boom:
+            obj_repr = copy.copy(self.good_repr)
+            obj_repr['comments'] = False
+            SessionResumeHelper._build_JobResult(obj_repr)
+        self.assertEqual(
+            str(boom.exception),
+            "Value of key 'comments' is of incorrect type bool")
+
+    def test_build_JobResult_allows_for_none_comments(self):
+        """
+        verify that _build_JobResult() allows for the value of ``comments``
+        to be None
+        """
+        obj_repr = copy.copy(self.good_repr)
+        obj_repr['comments'] = None
+        obj = SessionResumeHelper._build_JobResult(obj_repr)
+        self.assertEqual(obj.comments, None)
+
+    def test_build_JobResult_restores_comments(self):
+        """
+        verify that _build_JobResult() restores the value of ``comments``
+        """
+        obj_repr = copy.copy(self.good_repr)
+        obj_repr['comments'] = 'this is a comment'
+        obj = SessionResumeHelper._build_JobResult(obj_repr)
+        self.assertEqual(obj.comments, 'this is a comment')
+
+    def test_build_JobResult_checks_for_missing_return_code(self):
+        """
+        verify that _build_JobResult() checks if ``return_code`` is present
+        """
+        with self.assertRaises(CorruptedSessionError) as boom:
+            obj_repr = copy.copy(self.good_repr)
+            del obj_repr['return_code']
+            SessionResumeHelper._build_JobResult(obj_repr)
+        self.assertEqual(
+            str(boom.exception), "Missing value for key 'return_code'")
+
+    def test_build_JobResult_checks_type_of_return_code(self):
+        """
+        verify that _build_JobResult() checks if ``return_code`` is an integer
+        """
+        with self.assertRaises(CorruptedSessionError) as boom:
+            obj_repr = copy.copy(self.good_repr)
+            obj_repr['return_code'] = "text"
+            SessionResumeHelper._build_JobResult(obj_repr)
+        self.assertEqual(
+            str(boom.exception),
+            "Value of key 'return_code' is of incorrect type str")
+
+    def test_build_JobResult_allows_for_none_return_code(self):
+        """
+        verify that _build_JobResult() allows for the value of ``return_code``
+        to be None
+        """
+        obj_repr = copy.copy(self.good_repr)
+        obj_repr['return_code'] = None
+        obj = SessionResumeHelper._build_JobResult(obj_repr)
+        self.assertEqual(obj.return_code, None)
+
+    def test_build_JobResult_restores_return_code(self):
+        """
+        verify that _build_JobResult() restores the value of ``return_code``
+        """
+        obj_repr = copy.copy(self.good_repr)
+        obj_repr['return_code'] = 42
+        obj = SessionResumeHelper._build_JobResult(obj_repr)
+        self.assertEqual(obj.return_code, 42)
+
+    def test_build_JobResult_checks_for_missing_execution_duration(self):
+        """
+        verify that _build_JobResult() checks if ``execution_duration``
+        is present
+        """
+        with self.assertRaises(CorruptedSessionError) as boom:
+            obj_repr = copy.copy(self.good_repr)
+            del obj_repr['execution_duration']
+            SessionResumeHelper._build_JobResult(obj_repr)
+        self.assertEqual(
+            str(boom.exception), "Missing value for key 'execution_duration'")
+
+    def test_build_JobResult_checks_type_of_execution_duration(self):
+        """
+        verify that _build_JobResult() checks if ``execution_duration``
+        is a float
+        """
+        with self.assertRaises(CorruptedSessionError) as boom:
+            obj_repr = copy.copy(self.good_repr)
+            obj_repr['execution_duration'] = "text"
+            SessionResumeHelper._build_JobResult(obj_repr)
+        self.assertEqual(
+            str(boom.exception),
+            "Value of key 'execution_duration' is of incorrect type str")
+
+    def test_build_JobResult_allows_for_none_execution_duration(self):
+        """
+        verify that _build_JobResult() allows for the value of
+        ``execution_duration`` to be None
+        """
+        obj_repr = copy.copy(self.good_repr)
+        obj_repr['execution_duration'] = None
+        obj = SessionResumeHelper._build_JobResult(obj_repr)
+        self.assertEqual(obj.execution_duration, None)
+
+    def test_build_JobResult_restores_execution_duration(self):
+        """
+        verify that _build_JobResult() restores the value of
+        ``execution_duration``
+        """
+        obj_repr = copy.copy(self.good_repr)
+        obj_repr['execution_duration'] = 5.1
+        obj = SessionResumeHelper._build_JobResult(obj_repr)
+        self.assertAlmostEqual(obj.execution_duration, 5.1)
+
+
+class MemoryJobResultResumeTests(JobResultResumeMixIn, TestCase):
+
+    """
+    Tests for :class:`~plainbox.impl.session.resume.SessionResumeHelper`
+    and how it handles recreating MemoryJobResult form their representations
+    """
+
+    good_repr = {
+        'outcome': "pass",
+        'comments': None,
+        'return_code': None,
+        'execution_duration': None,
+        'io_log': []
+    }
+
+    def test_build_JobResult_restores_MemoryJobResult_representations(self):
+        obj = SessionResumeHelper._build_JobResult(self.good_repr)
+        self.assertIsInstance(obj, MemoryJobResult)
+
+    def test_build_JobResult_checks_for_missing_io_log(self):
+        """
+        verify that _build_JobResult() checks if ``io_log`` is present
+        """
+        with self.assertRaises(CorruptedSessionError) as boom:
+            obj_repr = copy.copy(self.good_repr)
+            del obj_repr['io_log']
+            SessionResumeHelper._build_JobResult(obj_repr)
+        self.assertEqual(
+            str(boom.exception), "Missing value for key 'io_log'")
+
+    def test_build_JobResult_checks_type_of_io_log(self):
+        """
+        verify that _build_JobResult() checks if ``io_log``
+        is a list
+        """
+        with self.assertRaises(CorruptedSessionError) as boom:
+            obj_repr = copy.copy(self.good_repr)
+            obj_repr['io_log'] = "text"
+            SessionResumeHelper._build_JobResult(obj_repr)
+        self.assertEqual(
+            str(boom.exception),
+            "Value of key 'io_log' is of incorrect type str")
+
+    def test_build_JobResult_checks_for_none_io_log(self):
+        """
+        verify that _build_JobResult() checks if the value of
+        ``io_log`` is not None
+        """
+        with self.assertRaises(CorruptedSessionError) as boom:
+            obj_repr = copy.copy(self.good_repr)
+            obj_repr['io_log'] = None
+            SessionResumeHelper._build_JobResult(obj_repr)
+        self.assertEqual(
+            str(boom.exception),
+            "Value of key 'io_log' cannot be None")
+
+    def test_build_JobResult_restores_io_log(self):
+        """
+        verify that _build_JobResult() checks if ``io_log``
+        is restored for MemoryJobResult representations
+        """
+        obj_repr = copy.copy(self.good_repr)
+        obj_repr['io_log'] = [[0.0, 'stdout', '']]
+        obj = SessionResumeHelper._build_JobResult(obj_repr)
+        # NOTE: MemoryJobResult.io_log is a property that converts
+        # whatever was stored to IOLogRecord and returns a _tuple_
+        # so the original list is not visible
+        self.assertEqual(obj.io_log, tuple([
+            IOLogRecord(0.0, 'stdout', b'')
+        ]))
+
+
+class DiskJobResultResumeTests(JobResultResumeMixIn, TestCase):
+
+    """
+    Tests for :class:`~plainbox.impl.session.resume.SessionResumeHelper`
+    and how it handles recreating DiskJobResult form their representations
+    """
+
+    good_repr = {
+        'outcome': "pass",
+        'comments': None,
+        'return_code': None,
+        'execution_duration': None,
+        'io_log_filename': "file.txt"
+    }
+
+    def test_build_JobResult_restores_DiskJobResult_representations(self):
+        obj = SessionResumeHelper._build_JobResult(self.good_repr)
+        self.assertIsInstance(obj, DiskJobResult)
+
+    def test_build_JobResult_does_not_check_for_missing_io_log_filename(self):
+        """
+        verify that _build_JobResult() does not check if
+        ``io_log_filename`` is present as that signifies that MemoryJobResult
+        should be recreated instead
+        """
+        with self.assertRaises(CorruptedSessionError) as boom:
+            obj_repr = copy.copy(self.good_repr)
+            del obj_repr['io_log_filename']
+            SessionResumeHelper._build_JobResult(obj_repr)
+        # NOTE: the error message explicitly talks about 'io_log', not
+        # about 'io_log_filename' because we're hitting the other path
+        # of the restore function
+        self.assertEqual(
+            str(boom.exception), "Missing value for key 'io_log'")
+
+    def test_build_JobResult_checks_type_of_io_log_filename(self):
+        """
+        verify that _build_JobResult() checks if ``io_log_filename``
+        is a string
+        """
+        with self.assertRaises(CorruptedSessionError) as boom:
+            obj_repr = copy.copy(self.good_repr)
+            obj_repr['io_log_filename'] = False
+            SessionResumeHelper._build_JobResult(obj_repr)
+        self.assertEqual(
+            str(boom.exception),
+            "Value of key 'io_log_filename' is of incorrect type bool")
+
+    def test_build_JobResult_checks_for_none_io_log_filename(self):
+        """
+        verify that _build_JobResult() checks if the value of
+        ``io_log_filename`` is not None
+        """
+        with self.assertRaises(CorruptedSessionError) as boom:
+            obj_repr = copy.copy(self.good_repr)
+            obj_repr['io_log_filename'] = None
+            SessionResumeHelper._build_JobResult(obj_repr)
+        self.assertEqual(
+            str(boom.exception),
+            "Value of key 'io_log_filename' cannot be None")
+
+    def test_build_JobResult_restores_io_log_filename(self):
+        """
+        verify that _build_JobResult() restores the value of
+        ``io_log_filename`` DiskJobResult representations
+        """
+        obj_repr = copy.copy(self.good_repr)
+        obj_repr['io_log_filename'] = "some-file.txt"
+        obj = SessionResumeHelper._build_JobResult(obj_repr)
+        self.assertEqual(obj.io_log_filename, "some-file.txt")
+
+
+class DesiredJobListResumeTests(TestCase):
+
+    """
+    Tests for :class:`~plainbox.impl.session.resume.SessionResumeHelper`
+    and how it handles recreating SessionState.desired_job_list form its
+    representation
+    """
+
+    def setUp(self):
+        # All of the tests need a SessionState object and some jobs to work
+        # with. Actual values don't matter much.
+        self.job_a = make_job(name='a')
+        self.job_b = make_job(name='b')
+        self.session = SessionState([self.job_a, self.job_b])
+        self.good_repr = {
+            "desired_job_list": ['a', 'b']
+        }
+        self.resume_fn = \
+            SessionResumeHelper._restore_SessionState_desired_job_list
+
+    def test_restore_SessionState_desired_job_list_checks_for_repr_type(self):
+        """
+        verify that _restore_SessionState_desired_job_list() checks the
+        type of the representation of ``desired_job_list``.
+        """
+        with self.assertRaises(CorruptedSessionError) as boom:
+            obj_repr = copy.copy(self.good_repr)
+            obj_repr['desired_job_list'] = 1
+            self.resume_fn(self.session, obj_repr)
+        self.assertEqual(
+            str(boom.exception),
+            "Value of key 'desired_job_list' is of incorrect type int")
+
+    def test_restore_SessionState_desired_job_list_checks_job_name_type(self):
+        """
+        verify that _restore_SessionState_desired_job_list() checks the
+        type of each job name listed in ``desired_job_list``.
+        """
+        with self.assertRaises(CorruptedSessionError) as boom:
+            obj_repr = copy.copy(self.good_repr)
+            obj_repr['desired_job_list'] = [1]
+            self.resume_fn(self.session, obj_repr)
+        self.assertEqual(str(boom.exception), "Each job name must be a string")
+
+    def test_restore_SessionState_desired_job_list_checks_for_bogus_jobs(self):
+        """
+        verify that _restore_SessionState_desired_job_list() checks if
+        each of the mentioned jobs are known and defined in the session
+        """
+        with self.assertRaises(CorruptedSessionError) as boom:
+            obj_repr = copy.copy(self.good_repr)
+            obj_repr['desired_job_list'] = ['bogus']
+            self.resume_fn(self.session, obj_repr)
+        self.assertEqual(
+            str(boom.exception),
+            "'desired_job_list' refers to unknown job 'bogus'")
+
+    def test_restore_SessionState_desired_job_list_works(self):
+        """
+        verify that _restore_SessionState_desired_job_list() actually
+        restores desired_job_list on the session
+        """
+        self.assertEqual(
+            self.session.desired_job_list, [])
+        self.resume_fn(self.session, self.good_repr)
+        # Good representation has two jobs, 'a' and 'b', in that order
+        self.assertEqual(
+            self.session.desired_job_list,
+            [self.job_a, self.job_b])
+
+
+class SessionMetaDataResumeTests(TestCase):
+
+    """
+    Tests for :class:`~plainbox.impl.session.resume.SessionResumeHelper`
+    and how it handles recreating SessionMetaData form its representation
+    """
+
+    def setUp(self):
+        # All of the tests need a SessionState object
+        self.session = SessionState([])
+        self.good_repr = {
+            "metadata": {
+                "title": "some title",
+                "flags": ["flag1", "flag2"],
+                "running_job_name": "job1"
+            }
+        }
+        self.resume_fn = SessionResumeHelper._restore_SessionState_metadata
+
+    def test_restore_SessionState_metadata_cheks_for_representation_type(self):
+        """
+        verify that _restore_SessionState_metadata() checks the type of
+        the representation object
+        """
+        with self.assertRaises(CorruptedSessionError) as boom:
+            obj_repr = copy.copy(self.good_repr)
+            obj_repr['metadata'] = 1
+            self.resume_fn(self.session, obj_repr)
+        self.assertEqual(
+            str(boom.exception),
+            "Value of key 'metadata' is of incorrect type int")
+
+    def test_restore_SessionState_metadata_checks_title_type(self):
+        """
+        verify that _restore_SessionState_metadata() checks the type of
+        the ``title`` field.
+        """
+        with self.assertRaises(CorruptedSessionError) as boom:
+            obj_repr = copy.copy(self.good_repr)
+            obj_repr['metadata']['title'] = 1
+            self.resume_fn(self.session, obj_repr)
+        self.assertEqual(
+            str(boom.exception),
+            "Value of key 'title' is of incorrect type int")
+
+    def test_restore_SessionState_metadata_allows_for_none_title(self):
+        """
+        verify that _restore_SessionState_metadata() allows for
+        ``title`` to be None
+        """
+        obj_repr = copy.copy(self.good_repr)
+        obj_repr['metadata']['title'] = None
+        self.resume_fn(self.session, obj_repr)
+        self.assertEqual(self.session.metadata.title, None)
+
+    def test_restore_SessionState_metadata_restores_title(self):
+        """
+        verify that _restore_SessionState_metadata() restores ``title``
+        """
+        obj_repr = copy.copy(self.good_repr)
+        obj_repr['metadata']['title'] = "a title"
+        self.resume_fn(self.session, obj_repr)
+        self.assertEqual(self.session.metadata.title, "a title")
+
+    def test_restore_SessionState_metadata_checks_flags_type(self):
+        """
+        verify that _restore_SessionState_metadata() checks the type of
+        the ``flags`` field.
+        """
+        with self.assertRaises(CorruptedSessionError) as boom:
+            obj_repr = copy.copy(self.good_repr)
+            obj_repr['metadata']['flags'] = 1
+            self.resume_fn(self.session, obj_repr)
+        self.assertEqual(
+            str(boom.exception),
+            "Value of key 'flags' is of incorrect type int")
+
+    def test_restore_SessionState_metadata_cheks_if_flags_are_none(self):
+        """
+        verify that _restore_SessionState_metadata() checks if
+        ``flags`` are None
+        """
+        with self.assertRaises(CorruptedSessionError) as boom:
+            obj_repr = copy.copy(self.good_repr)
+            obj_repr['metadata']['flags'] = None
+            self.resume_fn(self.session, obj_repr)
+        self.assertEqual(
+            str(boom.exception),
+            "Value of key 'flags' cannot be None")
+
+    def test_restore_SessionState_metadata_checks_type_of_each_flag(self):
+        """
+        verify that _restore_SessionState_metadata() checks the type of each
+        value of ``flags``
+        """
+        with self.assertRaises(CorruptedSessionError) as boom:
+            obj_repr = copy.copy(self.good_repr)
+            obj_repr['metadata']['flags'] = [1]
+            self.resume_fn(self.session, obj_repr)
+        self.assertEqual(
+            str(boom.exception),
+            "Each flag must be a string")
+
+    def test_restore_SessionState_metadata_restores_flags(self):
+        """
+        verify that _restore_SessionState_metadata() restores ``flags``
+        """
+        obj_repr = copy.copy(self.good_repr)
+        obj_repr['metadata']['flags'] = ["flag1", "flag2"]
+        self.resume_fn(self.session, obj_repr)
+        self.assertEqual(self.session.metadata.flags, set(['flag1', 'flag2']))
+
+    def test_restore_SessionState_metadata_checks_running_job_name_type(self):
+        """
+        verify that _restore_SessionState_metadata() checks the type of
+        ``running_job_name``.
+        """
+        with self.assertRaises(CorruptedSessionError) as boom:
+            obj_repr = copy.copy(self.good_repr)
+            obj_repr['metadata']['running_job_name'] = 1
+            self.resume_fn(self.session, obj_repr)
+        self.assertEqual(
+            str(boom.exception),
+            "Value of key 'running_job_name' is of incorrect type int")
+
+    def test_restore_SessionState_metadata_allows__none_running_job_name(self):
+        """
+        verify that _restore_SessionState_metadata() allows for
+        ``running_job_name`` to be None
+        """
+        obj_repr = copy.copy(self.good_repr)
+        obj_repr['metadata']['running_job_name'] = None
+        self.resume_fn(self.session, obj_repr)
+        self.assertEqual(self.session.metadata.running_job_name, None)
+
+    def test_restore_SessionState_metadata_restores_running_job_name(self):
+        """
+        verify that _restore_SessionState_metadata() restores
+        the value of ``running_job_name``
+        """
+        obj_repr = copy.copy(self.good_repr)
+        obj_repr['metadata']['running_job_name'] = "a job"
+        self.resume_fn(self.session, obj_repr)
+        self.assertEqual(self.session.metadata.running_job_name, "a job")
+
+
+class ProcessJobTests(TestCase):
+
+    """
+    Tests for :class:`~plainbox.impl.session.resume.SessionResumeHelper`
+    and how it handles processing jobs using _process_job() method
+    """
+
+    def setUp(self):
+        self.job_name = 'job'
+        self.job = make_job(name=self.job_name)
+        self.jobs_repr = {
+            self.job_name: self.job.get_checksum()
+        }
+        self.results_repr = {
+            self.job_name: [{
+                'outcome': 'fail',
+                'comments': None,
+                'execution_duration': None,
+                'return_code': None,
+                'io_log': [],
+            }]
+        }
+        self.helper = SessionResumeHelper([self.job])
+        # This object is artificial and would be constructed internally
+        # by the helper but having it here makes testing easier as we
+        # can reliably test a single method in isolation.
+        self.session = SessionState([self.job])
+
+    def test_process_job_checks_type_of_job_name(self):
+        """
+        verify that _process_job() checks the type of ``job_name``
+        """
+        with self.assertRaises(CorruptedSessionError) as boom:
+            # Pass a job name of the wrong type
+            job_name = 1
+            self.helper._process_job(
+                self.session, self.jobs_repr, self.results_repr, job_name)
+        self.assertEqual(
+            str(boom.exception), "Value of object is of incorrect type int")
+
+    def test_process_job_checks_for_missing_checksum(self):
+        """
+        verify that _process_job() checks if ``checksum`` is missing
+        """
+        with self.assertRaises(CorruptedSessionError) as boom:
+            # Pass a jobs_repr that has no checksums (for any job)
+            jobs_repr = {}
+            self.helper._process_job(
+                self.session, jobs_repr, self.results_repr, self.job_name)
+        self.assertEqual(str(boom.exception), "Missing value for key 'job'")
+
+    def test_process_job_checks_if_job_is_known(self):
+        """
+        verify that _process_job() checks if job is known or raises KeyError
+        """
+        with self.assertRaises(KeyError) as boom:
+            # Pass a session that does not know about any jobs
+            session = SessionState([])
+            self.helper._process_job(
+                session, self.jobs_repr, self.results_repr, self.job_name)
+        self.assertEqual(boom.exception.args[0], 'job')
+
+    def test_process_job_checks_if_job_checksum_matches(self):
+        """
+        verify that _process_job() checks if job checksum matches the
+        checksum of a job with the same name that was passed to the helper.
+        """
+        with self.assertRaises(IncompatibleJobError) as boom:
+            # Pass a jobs_repr with a bad checksum
+            jobs_repr = {self.job_name: 'bad-checksum'}
+            self.helper._process_job(
+                self.session, jobs_repr, self.results_repr, self.job_name)
+        self.assertEqual(
+            str(boom.exception), "Definition of job 'job' has changed")
+
+    def test_process_job_handles_ignores_empty_results(self):
+        """
+        verify that _process_job() does not crash if we have no results
+        for a particular job
+        """
+        self.assertEqual(
+            self.session.job_state_map[self.job_name].result.outcome, None)
+        results_repr = {
+            self.job_name: []
+        }
+        self.helper._process_job(
+            self.session, self.jobs_repr, results_repr, self.job_name)
+        self.assertEqual(
+            self.session.job_state_map[self.job_name].result.outcome, None)
+
+    def test_process_job_handles_only_result_back_to_the_session(self):
+        """
+        verify that _process_job() passes the only result to the session
+        """
+        self.assertEqual(
+            self.session.job_state_map[self.job_name].result.outcome, None)
+        self.helper._process_job(
+            self.session, self.jobs_repr, self.results_repr, self.job_name)
+        # The result in self.results_repr is a failure so we should see it here
+        self.assertEqual(
+            self.session.job_state_map[self.job_name].result.outcome, "fail")
+
+    def test_process_job_handles_last_result_back_to_the_session(self):
+        """
+        verify that _process_job() passes last of the results to the session
+        """
+        self.assertEqual(
+            self.session.job_state_map[self.job_name].result.outcome, None)
+        results_repr = {
+            self.job_name: [{
+                'outcome': 'fail',
+                'comments': None,
+                'execution_duration': None,
+                'return_code': None,
+                'io_log': [],
+            }, {
+                'outcome': 'pass',
+                'comments': None,
+                'execution_duration': None,
+                'return_code': None,
+                'io_log': [],
+            }]
+        }
+        self.helper._process_job(
+            self.session, self.jobs_repr, results_repr, self.job_name)
+        # results_repr has two entries: [fail, pass] so we should see
+        # the passing entry only
+        self.assertEqual(
+            self.session.job_state_map[self.job_name].result.outcome, "pass")
+
+    def test_process_job_checks_results_repr_is_a_list(self):
+        """
+        verify that _process_job() checks if results_repr is a dictionary
+        of lists.
+        """
+        with self.assertRaises(CorruptedSessionError) as boom:
+            results_repr = {self.job_name: 1}
+            self.helper._process_job(
+                self.session, self.jobs_repr, results_repr, self.job_name)
+        self.assertEqual(
+            str(boom.exception),
+            "Value of key 'job' is of incorrect type int")
+
+    def test_process_job_checks_results_repr_values_are_dicts(self):
+        """
+        verify that _process_job() checks if results_repr is a dictionary
+        of lists, each of which holds a dictionary.
+        """
+        with self.assertRaises(CorruptedSessionError) as boom:
+            results_repr = {self.job_name: [1]}
+            self.helper._process_job(
+                self.session, self.jobs_repr, results_repr, self.job_name)
+        self.assertEqual(
+            str(boom.exception),
+            "Value of object is of incorrect type int")
+
+
+class JobPluginSpecificTests(TestCase):
+
+    """
+    Tests for :class:`~plainbox.impl.session.resume.SessionResumeHelper`
+    and how it handles processing jobs using _process_job() method. This
+    class focuses on plugin-specific test such as for local and resource jobs
+    """
+
+    def test_process_job_restores_resources(self):
+        """
+        verify that _process_job() recreates resources
+        """
+        # Set the stage for testing. Setup a session with a known
+        # resource job, representation of the job (checksum)
+        # and representation of a single result, which has a single line
+        # that defines a 'key': 'value' resource record.
+        job_name = 'resource'
+        job = make_job(name=job_name, plugin='resource')
+        jobs_repr = {
+            job_name: job.get_checksum()
+        }
+        results_repr = {
+            job_name: [{
+                'outcome': None,
+                'comments': None,
+                'execution_duration': None,
+                'return_code': None,
+                'io_log': [
+                    # A bit convoluted but this is how we encode each chunk
+                    # of IOLogRecord
+                    [0.0, 'stdout', base64.standard_b64encode(
+                        b'key: value'
+                    ).decode('ASCII')]
+                ],
+            }]
+        }
+        helper = SessionResumeHelper([job])
+        session = SessionState([job])
+        # Ensure that the resource was not there initially
+        self.assertNotIn(job_name, session.resource_map)
+        # Process the representation data defined above
+        helper._process_job(session, jobs_repr, results_repr, job_name)
+        # Ensure that we now have the resource in the resource map
+        self.assertIn(job_name, session.resource_map)
+        # And that it looks right
+        self.assertEqual(
+            session.resource_map[job_name],
+            [Resource({'key': 'value'})])
+
+    def test_process_job_restores_jobs(self):
+        """
+        verify that _process_job() recreates generated jobs
+        """
+        # Set the stage for testing. Setup a session with a known
+        # local job, representation of the job (checksum)
+        # and representation of a single result, which has a single line
+        # that defines a 'name': 'generated' job.
+        job_name = 'local'
+        job = make_job(name=job_name, plugin='local')
+        jobs_repr = {
+            job_name: job.get_checksum()
+        }
+        results_repr = {
+            job_name: [{
+                'outcome': None,
+                'comments': None,
+                'execution_duration': None,
+                'return_code': None,
+                'io_log': [
+                    [0.0, 'stdout', base64.standard_b64encode(
+                        b'name: generated'
+                    ).decode('ASCII')]
+                ],
+            }]
+        }
+        helper = SessionResumeHelper([job])
+        session = SessionState([job])
+        # Ensure that the 'generated' job was not there initially
+        self.assertNotIn('generated', session.job_state_map)
+        self.assertEqual(session.job_list, [job])
+        # Process the representation data defined above
+        helper._process_job(session, jobs_repr, results_repr, job_name)
+        # Ensure that we now have the 'generated' job in the job_state_map
+        self.assertIn('generated', session.job_state_map)
+        # And that it looks right
+        self.assertEqual(
+            session.job_state_map['generated'].job.name, 'generated')
+        self.assertIn(
+            session.job_state_map['generated'].job, session.job_list)
+
+
+class SessionJobsAndResultsResumeTests(TestCase):
+
+    """
+    Tests for :class:`~plainbox.impl.session.resume.SessionResumeHelper`
+    and how it handles resume the session using
+    _restore_SessionState_jobs_and_results() method.
+    """
+
+    def test_empty_session(self):
+        """
+        verify that _restore_SessionState_jobs_and_results() works when
+        faced with a representation of an empty session. This is mostly
+        to do sanity checking on the 'easy' parts of the code before
+        testing specific cases in the rest of the code.
+        """
+        session_repr = {
+            'jobs': {},
+            'results': {}
+        }
+        helper = SessionResumeHelper([])
+        session = SessionState([])
+        helper._restore_SessionState_jobs_and_results(session, session_repr)
+        self.assertEqual(session.job_list, [])
+        self.assertEqual(session.resource_map, {})
+        self.assertEqual(session.job_state_map, {})
+
+    def test_simple_session(self):
+        """
+        verify that _restore_SessionState_jobs_and_results() works when
+        faced with a representation of a simple session (no generated jobs
+        or anything "exotic").
+        """
+        job = make_job(name='job')
+        session_repr = {
+            'jobs': {
+                job.name: job.get_checksum(),
+            },
+            'results': {
+                job.name: [{
+                    'outcome': 'pass',
+                    'comments': None,
+                    'execution_duration': None,
+                    'return_code': None,
+                    'io_log': [],
+                }]
+            }
+        }
+        helper = SessionResumeHelper([job])
+        session = SessionState([job])
+        helper._restore_SessionState_jobs_and_results(session, session_repr)
+        # Session still has one job in it
+        self.assertEqual(session.job_list, [job])
+        # Resources don't have anything (no resource jobs)
+        self.assertEqual(session.resource_map, {})
+        # The result was restored correctly. This is just a smoke test
+        # as specific tests for restoring results are written elsewhere
+        self.assertEqual(
+            session.job_state_map[job.name].result.outcome, 'pass')
+
+    def test_session_with_generated_jobs(self):
+        """
+        verify that _restore_SessionState_jobs_and_results() works when
+        faced with a representation of a non-trivial session where one
+        job generates another one.
+        """
+        parent = make_job(name='parent', plugin='local')
+        # The child job is only here so that we can get the checksum.
+        # We don't actually introduce it into the resume machinery
+        # caveat: make_job() has a default value for
+        # plugin='dummy' which we don't want here
+        child = make_job(name='child', plugin=None)
+        session_repr = {
+            'jobs': {
+                parent.name: parent.get_checksum(),
+                child.name: child.get_checksum(),
+            },
+            'results': {
+                parent.name: [{
+                    'outcome': 'pass',
+                    'comments': None,
+                    'execution_duration': None,
+                    'return_code': None,
+                    'io_log': [
+                        # This record will generate a job identical
+                        # to the 'child' job defined above.
+                        [0.0, 'stdout', base64.standard_b64encode(
+                            b'name: child\n'

Follow ups