← Back to team overview

bigdata-dev team mailing list archive

[Merge] lp:~freyes/charms/xenial/rsyslog/lp1694270 into lp:~bigdata-dev/charms/xenial/rsyslog/trunk

 

Felipe Reyes has proposed merging lp:~freyes/charms/xenial/rsyslog/lp1694270 into lp:~bigdata-dev/charms/xenial/rsyslog/trunk.

Requested reviews:
  Juju Big Data Development (bigdata-dev)
  Jorge Niedbalski (niedbalski)
Related bugs:
  Bug #1694270 in rsyslog (Juju Charms Collection): "rsyslog logrotate template invokes 'reload rsyslog' which fails in Xenial"
  https://bugs.launchpad.net/charms/+source/rsyslog/+bug/1694270

For more details, see:
https://code.launchpad.net/~freyes/charms/xenial/rsyslog/lp1694270/+merge/325877

Use 'rotate' action from /etc/init.d/rsyslog
  
'reload' command does not exist in Xenial, the rsyslog package uses the
'rotate' functionality implemented in the sysvinit script to close all
open file descriptors.

-- 
Your team Juju Big Data Development is requested to review the proposed merge of lp:~freyes/charms/xenial/rsyslog/lp1694270 into lp:~bigdata-dev/charms/xenial/rsyslog/trunk.
=== modified file 'hooks/charmhelpers/__init__.py'
--- hooks/charmhelpers/__init__.py	2016-10-26 18:19:59 +0000
+++ hooks/charmhelpers/__init__.py	2017-06-16 22:43:26 +0000
@@ -14,6 +14,11 @@
 
 # Bootstrap charm-helpers, installing its dependencies if necessary using
 # only standard libraries.
+from __future__ import print_function
+from __future__ import absolute_import
+
+import functools
+import inspect
 import subprocess
 import sys
 
@@ -34,3 +39,59 @@
     else:
         subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
     import yaml  # flake8: noqa
+
+
+# Holds a list of mapping of mangled function names that have been deprecated
+# using the @deprecate decorator below.  This is so that the warning is only
+# printed once for each usage of the function.
+__deprecated_functions = {}
+
+
+def deprecate(warning, date=None, log=None):
+    """Add a deprecation warning the first time the function is used.
+    The date, which is a string in semi-ISO8660 format indicate the year-month
+    that the function is officially going to be removed.
+
+    usage:
+
+    @deprecate('use core/fetch/add_source() instead', '2017-04')
+    def contributed_add_source_thing(...):
+        ...
+
+    And it then prints to the log ONCE that the function is deprecated.
+    The reason for passing the logging function (log) is so that hookenv.log
+    can be used for a charm if needed.
+
+    :param warning:  String to indicat where it has moved ot.
+    :param date: optional sting, in YYYY-MM format to indicate when the
+                 function will definitely (probably) be removed.
+    :param log: The log function to call to log.  If not, logs to stdout
+    """
+    def wrap(f):
+
+        @functools.wraps(f)
+        def wrapped_f(*args, **kwargs):
+            try:
+                module = inspect.getmodule(f)
+                file = inspect.getsourcefile(f)
+                lines = inspect.getsourcelines(f)
+                f_name = "{}-{}-{}..{}-{}".format(
+                    module.__name__, file, lines[0], lines[-1], f.__name__)
+            except (IOError, TypeError):
+                # assume it was local, so just use the name of the function
+                f_name = f.__name__
+            if f_name not in __deprecated_functions:
+                __deprecated_functions[f_name] = True
+                s = "DEPRECATION WARNING: Function {} is being removed".format(
+                    f.__name__)
+                if date:
+                    s = "{} on/around {}".format(s, date)
+                if warning:
+                    s = "{} : {}".format(s, warning)
+                if log:
+                    log(s)
+                else:
+                    print(s)
+            return f(*args, **kwargs)
+        return wrapped_f
+    return wrap

=== modified file 'hooks/charmhelpers/contrib/charmsupport/nrpe.py'
--- hooks/charmhelpers/contrib/charmsupport/nrpe.py	2016-10-26 18:19:59 +0000
+++ hooks/charmhelpers/contrib/charmsupport/nrpe.py	2017-06-16 22:43:26 +0000
@@ -193,6 +193,13 @@
         nrpe_check_file = self._get_check_filename()
         with open(nrpe_check_file, 'w') as nrpe_check_config:
             nrpe_check_config.write("# check {}\n".format(self.shortname))
+            if nagios_servicegroups:
+                nrpe_check_config.write(
+                    "# The following header was added automatically by juju\n")
+                nrpe_check_config.write(
+                    "# Modifying it will affect nagios monitoring and alerting\n")
+                nrpe_check_config.write(
+                    "# servicegroups: {}\n".format(nagios_servicegroups))
             nrpe_check_config.write("command[{}]={}\n".format(
                 self.command, self.check_cmd))
 
@@ -227,6 +234,7 @@
     nagios_logdir = '/var/log/nagios'
     nagios_exportdir = '/var/lib/nagios/export'
     nrpe_confdir = '/etc/nagios/nrpe.d'
+    homedir = '/var/lib/nagios'  # home dir provided by nagios-nrpe-server
 
     def __init__(self, hostname=None, primary=True):
         super(NRPE, self).__init__()
@@ -338,13 +346,14 @@
     return unit
 
 
-def add_init_service_checks(nrpe, services, unit_name):
+def add_init_service_checks(nrpe, services, unit_name, immediate_check=True):
     """
     Add checks for each service in list
 
     :param NRPE nrpe: NRPE object to add check to
     :param list services: List of services to check
     :param str unit_name: Unit name to use in check description
+    :param bool immediate_check: For sysv init, run the service check immediately
     """
     for svc in services:
         # Don't add a check for these services from neutron-gateway
@@ -368,21 +377,31 @@
             )
         elif os.path.exists(sysv_init):
             cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
-            cron_file = ('*/5 * * * * root '
-                         '/usr/local/lib/nagios/plugins/check_exit_status.pl '
-                         '-s /etc/init.d/%s status > '
-                         '/var/lib/nagios/service-check-%s.txt\n' % (svc,
-                                                                     svc)
-                         )
+            checkpath = '%s/service-check-%s.txt' % (nrpe.homedir, svc)
+            croncmd = (
+                '/usr/local/lib/nagios/plugins/check_exit_status.pl '
+                '-e -s /etc/init.d/%s status' % svc
+            )
+            cron_file = '*/5 * * * * root %s > %s\n' % (croncmd, checkpath)
             f = open(cronpath, 'w')
             f.write(cron_file)
             f.close()
             nrpe.add_check(
                 shortname=svc,
-                description='process check {%s}' % unit_name,
-                check_cmd='check_status_file.py -f '
-                          '/var/lib/nagios/service-check-%s.txt' % svc,
+                description='service check {%s}' % unit_name,
+                check_cmd='check_status_file.py -f %s' % checkpath,
             )
+            # if /var/lib/nagios doesn't exist open(checkpath, 'w') will fail
+            # (LP: #1670223).
+            if immediate_check and os.path.isdir(nrpe.homedir):
+                f = open(checkpath, 'w')
+                subprocess.call(
+                    croncmd.split(),
+                    stdout=f,
+                    stderr=subprocess.STDOUT
+                )
+                f.close()
+                os.chmod(checkpath, 0o644)
 
 
 def copy_nrpe_checks():

=== modified file 'hooks/charmhelpers/core/hookenv.py'
--- hooks/charmhelpers/core/hookenv.py	2016-10-26 18:19:59 +0000
+++ hooks/charmhelpers/core/hookenv.py	2017-06-16 22:43:26 +0000
@@ -332,6 +332,8 @@
     config_cmd_line = ['config-get']
     if scope is not None:
         config_cmd_line.append(scope)
+    else:
+        config_cmd_line.append('--all')
     config_cmd_line.append('--format=json')
     try:
         config_data = json.loads(
@@ -614,6 +616,20 @@
     subprocess.check_call(_args)
 
 
+def open_ports(start, end, protocol="TCP"):
+    """Opens a range of service network ports"""
+    _args = ['open-port']
+    _args.append('{}-{}/{}'.format(start, end, protocol))
+    subprocess.check_call(_args)
+
+
+def close_ports(start, end, protocol="TCP"):
+    """Close a range of service network ports"""
+    _args = ['close-port']
+    _args.append('{}-{}/{}'.format(start, end, protocol))
+    subprocess.check_call(_args)
+
+
 @cached
 def unit_get(attribute):
     """Get the unit ID for the remote unit"""
@@ -1019,3 +1035,34 @@
     '''
     cmd = ['network-get', '--primary-address', binding]
     return subprocess.check_output(cmd).decode('UTF-8').strip()
+
+
+def add_metric(*args, **kwargs):
+    """Add metric values. Values may be expressed with keyword arguments. For
+    metric names containing dashes, these may be expressed as one or more
+    'key=value' positional arguments. May only be called from the collect-metrics
+    hook."""
+    _args = ['add-metric']
+    _kvpairs = []
+    _kvpairs.extend(args)
+    _kvpairs.extend(['{}={}'.format(k, v) for k, v in kwargs.items()])
+    _args.extend(sorted(_kvpairs))
+    try:
+        subprocess.check_call(_args)
+        return
+    except EnvironmentError as e:
+        if e.errno != errno.ENOENT:
+            raise
+    log_message = 'add-metric failed: {}'.format(' '.join(_kvpairs))
+    log(log_message, level='INFO')
+
+
+def meter_status():
+    """Get the meter status, if running in the meter-status-changed hook."""
+    return os.environ.get('JUJU_METER_STATUS')
+
+
+def meter_info():
+    """Get the meter status information, if running in the meter-status-changed
+    hook."""
+    return os.environ.get('JUJU_METER_INFO')

=== modified file 'hooks/charmhelpers/core/host.py'
--- hooks/charmhelpers/core/host.py	2016-10-26 18:19:59 +0000
+++ hooks/charmhelpers/core/host.py	2017-06-16 22:43:26 +0000
@@ -45,6 +45,7 @@
         add_new_group,
         lsb_release,
         cmp_pkgrevno,
+        CompareHostReleases,
     )  # flake8: noqa -- ignore F401 for this import
 elif __platform__ == "centos":
     from charmhelpers.core.host_factory.centos import (
@@ -52,44 +53,146 @@
         add_new_group,
         lsb_release,
         cmp_pkgrevno,
+        CompareHostReleases,
     )  # flake8: noqa -- ignore F401 for this import
 
-
-def service_start(service_name):
-    """Start a system service"""
-    return service('start', service_name)
-
-
-def service_stop(service_name):
-    """Stop a system service"""
-    return service('stop', service_name)
-
-
-def service_restart(service_name):
-    """Restart a system service"""
+UPDATEDB_PATH = '/etc/updatedb.conf'
+
+def service_start(service_name, **kwargs):
+    """Start a system service.
+
+    The specified service name is managed via the system level init system.
+    Some init systems (e.g. upstart) require that additional arguments be
+    provided in order to directly control service instances whereas other init
+    systems allow for addressing instances of a service directly by name (e.g.
+    systemd).
+
+    The kwargs allow for the additional parameters to be passed to underlying
+    init systems for those systems which require/allow for them. For example,
+    the ceph-osd upstart script requires the id parameter to be passed along
+    in order to identify which running daemon should be reloaded. The follow-
+    ing example stops the ceph-osd service for instance id=4:
+
+    service_stop('ceph-osd', id=4)
+
+    :param service_name: the name of the service to stop
+    :param **kwargs: additional parameters to pass to the init system when
+                     managing services. These will be passed as key=value
+                     parameters to the init system's commandline. kwargs
+                     are ignored for systemd enabled systems.
+    """
+    return service('start', service_name, **kwargs)
+
+
+def service_stop(service_name, **kwargs):
+    """Stop a system service.
+
+    The specified service name is managed via the system level init system.
+    Some init systems (e.g. upstart) require that additional arguments be
+    provided in order to directly control service instances whereas other init
+    systems allow for addressing instances of a service directly by name (e.g.
+    systemd).
+
+    The kwargs allow for the additional parameters to be passed to underlying
+    init systems for those systems which require/allow for them. For example,
+    the ceph-osd upstart script requires the id parameter to be passed along
+    in order to identify which running daemon should be reloaded. The follow-
+    ing example stops the ceph-osd service for instance id=4:
+
+    service_stop('ceph-osd', id=4)
+
+    :param service_name: the name of the service to stop
+    :param **kwargs: additional parameters to pass to the init system when
+                     managing services. These will be passed as key=value
+                     parameters to the init system's commandline. kwargs
+                     are ignored for systemd enabled systems.
+    """
+    return service('stop', service_name, **kwargs)
+
+
+def service_restart(service_name, **kwargs):
+    """Restart a system service.
+
+    The specified service name is managed via the system level init system.
+    Some init systems (e.g. upstart) require that additional arguments be
+    provided in order to directly control service instances whereas other init
+    systems allow for addressing instances of a service directly by name (e.g.
+    systemd).
+
+    The kwargs allow for the additional parameters to be passed to underlying
+    init systems for those systems which require/allow for them. For example,
+    the ceph-osd upstart script requires the id parameter to be passed along
+    in order to identify which running daemon should be restarted. The follow-
+    ing example restarts the ceph-osd service for instance id=4:
+
+    service_restart('ceph-osd', id=4)
+
+    :param service_name: the name of the service to restart
+    :param **kwargs: additional parameters to pass to the init system when
+                     managing services. These will be passed as key=value
+                     parameters to the  init system's commandline. kwargs
+                     are ignored for init systems not allowing additional
+                     parameters via the commandline (systemd).
+    """
     return service('restart', service_name)
 
 
-def service_reload(service_name, restart_on_failure=False):
+def service_reload(service_name, restart_on_failure=False, **kwargs):
     """Reload a system service, optionally falling back to restart if
-    reload fails"""
-    service_result = service('reload', service_name)
+    reload fails.
+
+    The specified service name is managed via the system level init system.
+    Some init systems (e.g. upstart) require that additional arguments be
+    provided in order to directly control service instances whereas other init
+    systems allow for addressing instances of a service directly by name (e.g.
+    systemd).
+
+    The kwargs allow for the additional parameters to be passed to underlying
+    init systems for those systems which require/allow for them. For example,
+    the ceph-osd upstart script requires the id parameter to be passed along
+    in order to identify which running daemon should be reloaded. The follow-
+    ing example restarts the ceph-osd service for instance id=4:
+
+    service_reload('ceph-osd', id=4)
+
+    :param service_name: the name of the service to reload
+    :param restart_on_failure: boolean indicating whether to fallback to a
+                               restart if the reload fails.
+    :param **kwargs: additional parameters to pass to the init system when
+                     managing services. These will be passed as key=value
+                     parameters to the  init system's commandline. kwargs
+                     are ignored for init systems not allowing additional
+                     parameters via the commandline (systemd).
+    """
+    service_result = service('reload', service_name, **kwargs)
     if not service_result and restart_on_failure:
-        service_result = service('restart', service_name)
+        service_result = service('restart', service_name, **kwargs)
     return service_result
 
 
-def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"):
+def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d",
+                  **kwargs):
     """Pause a system service.
 
-    Stop it, and prevent it from starting again at boot."""
+    Stop it, and prevent it from starting again at boot.
+
+    :param service_name: the name of the service to pause
+    :param init_dir: path to the upstart init directory
+    :param initd_dir: path to the sysv init directory
+    :param **kwargs: additional parameters to pass to the init system when
+                     managing services. These will be passed as key=value
+                     parameters to the init system's commandline. kwargs
+                     are ignored for init systems which do not support
+                     key=value arguments via the commandline.
+    """
     stopped = True
-    if service_running(service_name):
-        stopped = service_stop(service_name)
+    if service_running(service_name, **kwargs):
+        stopped = service_stop(service_name, **kwargs)
     upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
     sysv_file = os.path.join(initd_dir, service_name)
     if init_is_systemd():
         service('disable', service_name)
+        service('mask', service_name)
     elif os.path.exists(upstart_file):
         override_path = os.path.join(
             init_dir, '{}.override'.format(service_name))
@@ -106,13 +209,23 @@
 
 
 def service_resume(service_name, init_dir="/etc/init",
-                   initd_dir="/etc/init.d"):
+                   initd_dir="/etc/init.d", **kwargs):
     """Resume a system service.
 
-    Reenable starting again at boot. Start the service"""
+    Reenable starting again at boot. Start the service.
+
+    :param service_name: the name of the service to resume
+    :param init_dir: the path to the init dir
+    :param initd dir: the path to the initd dir
+    :param **kwargs: additional parameters to pass to the init system when
+                     managing services. These will be passed as key=value
+                     parameters to the init system's commandline. kwargs
+                     are ignored for systemd enabled systems.
+    """
     upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
     sysv_file = os.path.join(initd_dir, service_name)
     if init_is_systemd():
+        service('unmask', service_name)
         service('enable', service_name)
     elif os.path.exists(upstart_file):
         override_path = os.path.join(
@@ -126,19 +239,28 @@
             "Unable to detect {0} as SystemD, Upstart {1} or"
             " SysV {2}".format(
                 service_name, upstart_file, sysv_file))
+    started = service_running(service_name, **kwargs)
 
-    started = service_running(service_name)
     if not started:
-        started = service_start(service_name)
+        started = service_start(service_name, **kwargs)
     return started
 
 
-def service(action, service_name):
-    """Control a system service"""
+def service(action, service_name, **kwargs):
+    """Control a system service.
+
+    :param action: the action to take on the service
+    :param service_name: the name of the service to perform th action on
+    :param **kwargs: additional params to be passed to the service command in
+                    the form of key=value.
+    """
     if init_is_systemd():
         cmd = ['systemctl', action, service_name]
     else:
         cmd = ['service', service_name, action]
+        for key, value in six.iteritems(kwargs):
+            parameter = '%s=%s' % (key, value)
+            cmd.append(parameter)
     return subprocess.call(cmd) == 0
 
 
@@ -146,15 +268,26 @@
 _INIT_D_CONF = "/etc/init.d/{}"
 
 
-def service_running(service_name):
-    """Determine whether a system service is running"""
+def service_running(service_name, **kwargs):
+    """Determine whether a system service is running.
+
+    :param service_name: the name of the service
+    :param **kwargs: additional args to pass to the service command. This is
+                     used to pass additional key=value arguments to the
+                     service command line for managing specific instance
+                     units (e.g. service ceph-osd status id=2). The kwargs
+                     are ignored in systemd services.
+    """
     if init_is_systemd():
         return service('is-active', service_name)
     else:
         if os.path.exists(_UPSTART_CONF.format(service_name)):
             try:
-                output = subprocess.check_output(
-                    ['status', service_name],
+                cmd = ['status', service_name]
+                for key, value in six.iteritems(kwargs):
+                    parameter = '%s=%s' % (key, value)
+                    cmd.append(parameter)
+                output = subprocess.check_output(cmd,
                     stderr=subprocess.STDOUT).decode('UTF-8')
             except subprocess.CalledProcessError:
                 return False
@@ -177,6 +310,8 @@
 
 def init_is_systemd():
     """Return True if the host system uses systemd, False otherwise."""
+    if lsb_release()['DISTRIB_CODENAME'] == 'trusty':
+        return False
     return os.path.isdir(SYSTEMD_SYSTEM)
 
 
@@ -306,15 +441,17 @@
     subprocess.check_call(cmd)
 
 
-def rsync(from_path, to_path, flags='-r', options=None):
+def rsync(from_path, to_path, flags='-r', options=None, timeout=None):
     """Replicate the contents of a path"""
     options = options or ['--delete', '--executability']
     cmd = ['/usr/bin/rsync', flags]
+    if timeout:
+        cmd = ['timeout', str(timeout)] + cmd
     cmd.extend(options)
     cmd.append(from_path)
     cmd.append(to_path)
     log(" ".join(cmd))
-    return subprocess.check_output(cmd).decode('UTF-8').strip()
+    return subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8').strip()
 
 
 def symlink(source, destination):
@@ -684,7 +821,7 @@
     :param str path: The string path to start changing ownership.
     :param str owner: The owner string to use when looking up the uid.
     :param str group: The group string to use when looking up the gid.
-    :param bool follow_links: Also Chown links if True
+    :param bool follow_links: Also follow and chown links if True
     :param bool chowntopdir: Also chown path itself if True
     """
     uid = pwd.getpwnam(owner).pw_uid
@@ -698,7 +835,7 @@
         broken_symlink = os.path.lexists(path) and not os.path.exists(path)
         if not broken_symlink:
             chown(path, uid, gid)
-    for root, dirs, files in os.walk(path):
+    for root, dirs, files in os.walk(path, followlinks=follow_links):
         for name in dirs + files:
             full = os.path.join(root, name)
             broken_symlink = os.path.lexists(full) and not os.path.exists(full)
@@ -718,6 +855,20 @@
     chownr(path, owner, group, follow_links=False)
 
 
+def owner(path):
+    """Returns a tuple containing the username & groupname owning the path.
+
+    :param str path: the string path to retrieve the ownership
+    :return tuple(str, str): A (username, groupname) tuple containing the
+                             name of the user and group owning the path.
+    :raises OSError: if the specified path does not exist
+    """
+    stat = os.stat(path)
+    username = pwd.getpwuid(stat.st_uid)[0]
+    groupname = grp.getgrgid(stat.st_gid)[0]
+    return username, groupname
+
+
 def get_total_ram():
     """The total amount of system RAM in bytes.
 
@@ -732,3 +883,42 @@
                     assert unit == 'kB', 'Unknown unit'
                     return int(value) * 1024  # Classic, not KiB.
         raise NotImplementedError()
+
+
+UPSTART_CONTAINER_TYPE = '/run/container_type'
+
+
+def is_container():
+    """Determine whether unit is running in a container
+
+    @return: boolean indicating if unit is in a container
+    """
+    if init_is_systemd():
+        # Detect using systemd-detect-virt
+        return subprocess.call(['systemd-detect-virt',
+                                '--container']) == 0
+    else:
+        # Detect using upstart container file marker
+        return os.path.exists(UPSTART_CONTAINER_TYPE)
+
+
+def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH):
+    with open(updatedb_path, 'r+') as f_id:
+        updatedb_text = f_id.read()
+        output = updatedb(updatedb_text, path)
+        f_id.seek(0)
+        f_id.write(output)
+        f_id.truncate()
+
+
+def updatedb(updatedb_text, new_path):
+    lines = [line for line in updatedb_text.split("\n")]
+    for i, line in enumerate(lines):
+        if line.startswith("PRUNEPATHS="):
+            paths_line = line.split("=")[1].replace('"', '')
+            paths = paths_line.split(" ")
+            if new_path not in paths:
+                paths.append(new_path)
+                lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths))
+    output = "\n".join(lines)
+    return output

=== modified file 'hooks/charmhelpers/core/host_factory/centos.py'
--- hooks/charmhelpers/core/host_factory/centos.py	2016-10-26 18:19:59 +0000
+++ hooks/charmhelpers/core/host_factory/centos.py	2017-06-16 22:43:26 +0000
@@ -2,6 +2,22 @@
 import yum
 import os
 
+from charmhelpers.core.strutils import BasicStringComparator
+
+
+class CompareHostReleases(BasicStringComparator):
+    """Provide comparisons of Host releases.
+
+    Use in the form of
+
+    if CompareHostReleases(release) > 'trusty':
+        # do something with mitaka
+    """
+
+    def __init__(self, item):
+        raise NotImplementedError(
+            "CompareHostReleases() is not implemented for CentOS")
+
 
 def service_available(service_name):
     # """Determine whether a system service is available."""

=== modified file 'hooks/charmhelpers/core/host_factory/ubuntu.py'
--- hooks/charmhelpers/core/host_factory/ubuntu.py	2016-10-26 18:19:59 +0000
+++ hooks/charmhelpers/core/host_factory/ubuntu.py	2017-06-16 22:43:26 +0000
@@ -1,5 +1,38 @@
 import subprocess
 
+from charmhelpers.core.strutils import BasicStringComparator
+
+
+UBUNTU_RELEASES = (
+    'lucid',
+    'maverick',
+    'natty',
+    'oneiric',
+    'precise',
+    'quantal',
+    'raring',
+    'saucy',
+    'trusty',
+    'utopic',
+    'vivid',
+    'wily',
+    'xenial',
+    'yakkety',
+    'zesty',
+    'artful',
+)
+
+
+class CompareHostReleases(BasicStringComparator):
+    """Provide comparisons of Ubuntu releases.
+
+    Use in the form of
+
+    if CompareHostReleases(release) > 'trusty':
+        # do something with mitaka
+    """
+    _list = UBUNTU_RELEASES
+
 
 def service_available(service_name):
     """Determine whether a system service is available"""

=== modified file 'hooks/charmhelpers/core/kernel_factory/ubuntu.py'
--- hooks/charmhelpers/core/kernel_factory/ubuntu.py	2016-10-26 18:19:59 +0000
+++ hooks/charmhelpers/core/kernel_factory/ubuntu.py	2017-06-16 22:43:26 +0000
@@ -5,7 +5,7 @@
     """Load a kernel module and configure for auto-load on reboot."""
     with open('/etc/modules', 'r+') as modules:
         if module not in modules.read():
-            modules.write(module)
+            modules.write(module + "\n")
 
 
 def update_initramfs(version='all'):

=== modified file 'hooks/charmhelpers/core/strutils.py'
--- hooks/charmhelpers/core/strutils.py	2016-10-26 18:19:59 +0000
+++ hooks/charmhelpers/core/strutils.py	2017-06-16 22:43:26 +0000
@@ -68,3 +68,56 @@
         msg = "Unable to interpret string value '%s' as bytes" % (value)
         raise ValueError(msg)
     return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
+
+
+class BasicStringComparator(object):
+    """Provides a class that will compare strings from an iterator type object.
+    Used to provide > and < comparisons on strings that may not necessarily be
+    alphanumerically ordered.  e.g. OpenStack or Ubuntu releases AFTER the
+    z-wrap.
+    """
+
+    _list = None
+
+    def __init__(self, item):
+        if self._list is None:
+            raise Exception("Must define the _list in the class definition!")
+        try:
+            self.index = self._list.index(item)
+        except Exception:
+            raise KeyError("Item '{}' is not in list '{}'"
+                           .format(item, self._list))
+
+    def __eq__(self, other):
+        assert isinstance(other, str) or isinstance(other, self.__class__)
+        return self.index == self._list.index(other)
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+    def __lt__(self, other):
+        assert isinstance(other, str) or isinstance(other, self.__class__)
+        return self.index < self._list.index(other)
+
+    def __ge__(self, other):
+        return not self.__lt__(other)
+
+    def __gt__(self, other):
+        assert isinstance(other, str) or isinstance(other, self.__class__)
+        return self.index > self._list.index(other)
+
+    def __le__(self, other):
+        return not self.__gt__(other)
+
+    def __str__(self):
+        """Always give back the item at the index so it can be used in
+        comparisons like:
+
+        s_mitaka = CompareOpenStack('mitaka')
+        s_newton = CompareOpenstack('newton')
+
+        assert s_newton > s_mitaka
+
+        @returns: <string>
+        """
+        return self._list[self.index]

=== modified file 'hooks/charmhelpers/fetch/__init__.py'
--- hooks/charmhelpers/fetch/__init__.py	2016-10-26 18:19:59 +0000
+++ hooks/charmhelpers/fetch/__init__.py	2017-06-16 22:43:26 +0000
@@ -48,6 +48,13 @@
     pass
 
 
+class GPGKeyError(Exception):
+    """Exception occurs when a GPG key cannot be fetched or used.  The message
+    indicates what the problem is.
+    """
+    pass
+
+
 class BaseFetchHandler(object):
 
     """Base class for FetchHandler implementations in fetch plugins"""
@@ -77,21 +84,22 @@
 fetch = importlib.import_module(module)
 
 filter_installed_packages = fetch.filter_installed_packages
-install = fetch.install
-upgrade = fetch.upgrade
-update = fetch.update
-purge = fetch.purge
+install = fetch.apt_install
+upgrade = fetch.apt_upgrade
+update = _fetch_update = fetch.apt_update
+purge = fetch.apt_purge
 add_source = fetch.add_source
 
 if __platform__ == "ubuntu":
     apt_cache = fetch.apt_cache
-    apt_install = fetch.install
-    apt_update = fetch.update
-    apt_upgrade = fetch.upgrade
-    apt_purge = fetch.purge
+    apt_install = fetch.apt_install
+    apt_update = fetch.apt_update
+    apt_upgrade = fetch.apt_upgrade
+    apt_purge = fetch.apt_purge
     apt_mark = fetch.apt_mark
     apt_hold = fetch.apt_hold
     apt_unhold = fetch.apt_unhold
+    import_key = fetch.import_key
     get_upstream_version = fetch.get_upstream_version
 elif __platform__ == "centos":
     yum_search = fetch.yum_search
@@ -135,7 +143,7 @@
         for source, key in zip(sources, keys):
             add_source(source, key)
     if update:
-        fetch.update(fatal=True)
+        _fetch_update(fatal=True)
 
 
 def install_remote(source, *args, **kwargs):

=== modified file 'hooks/charmhelpers/fetch/centos.py'
--- hooks/charmhelpers/fetch/centos.py	2016-10-26 18:19:59 +0000
+++ hooks/charmhelpers/fetch/centos.py	2017-06-16 22:43:26 +0000
@@ -132,7 +132,7 @@
                 key_file.write(key)
                 key_file.flush()
                 key_file.seek(0)
-            subprocess.check_call(['rpm', '--import', key_file])
+                subprocess.check_call(['rpm', '--import', key_file.name])
         else:
             subprocess.check_call(['rpm', '--import', key])
 

=== added file 'hooks/charmhelpers/fetch/snap.py'
--- hooks/charmhelpers/fetch/snap.py	1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/fetch/snap.py	2017-06-16 22:43:26 +0000
@@ -0,0 +1,122 @@
+# Copyright 2014-2017 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Charm helpers snap for classic charms.
+
+If writing reactive charms, use the snap layer:
+https://lists.ubuntu.com/archives/snapcraft/2016-September/001114.html
+"""
+import subprocess
+from os import environ
+from time import sleep
+from charmhelpers.core.hookenv import log
+
+__author__ = 'Joseph Borg <joseph.borg@xxxxxxxxxxxxx>'
+
+SNAP_NO_LOCK = 1  # The return code for "couldn't acquire lock" in Snap (hopefully this will be improved).
+SNAP_NO_LOCK_RETRY_DELAY = 10  # Wait X seconds between Snap lock checks.
+SNAP_NO_LOCK_RETRY_COUNT = 30  # Retry to acquire the lock X times.
+
+
+class CouldNotAcquireLockException(Exception):
+    pass
+
+
+def _snap_exec(commands):
+    """
+    Execute snap commands.
+
+    :param commands: List commands
+    :return: Integer exit code
+    """
+    assert type(commands) == list
+
+    retry_count = 0
+    return_code = None
+
+    while return_code is None or return_code == SNAP_NO_LOCK:
+        try:
+            return_code = subprocess.check_call(['snap'] + commands, env=environ)
+        except subprocess.CalledProcessError as e:
+            retry_count += + 1
+            if retry_count > SNAP_NO_LOCK_RETRY_COUNT:
+                raise CouldNotAcquireLockException('Could not aquire lock after %s attempts' % SNAP_NO_LOCK_RETRY_COUNT)
+            return_code = e.returncode
+            log('Snap failed to acquire lock, trying again in %s seconds.' % SNAP_NO_LOCK_RETRY_DELAY, level='WARN')
+            sleep(SNAP_NO_LOCK_RETRY_DELAY)
+
+    return return_code
+
+
+def snap_install(packages, *flags):
+    """
+    Install a snap package.
+
+    :param packages: String or List String package name
+    :param flags: List String flags to pass to install command
+    :return: Integer return code from snap
+    """
+    if type(packages) is not list:
+        packages = [packages]
+
+    flags = list(flags)
+
+    message = 'Installing snap(s) "%s"' % ', '.join(packages)
+    if flags:
+        message += ' with option(s) "%s"' % ', '.join(flags)
+
+    log(message, level='INFO')
+    return _snap_exec(['install'] + flags + packages)
+
+
+def snap_remove(packages, *flags):
+    """
+    Remove a snap package.
+
+    :param packages: String or List String package name
+    :param flags: List String flags to pass to remove command
+    :return: Integer return code from snap
+    """
+    if type(packages) is not list:
+        packages = [packages]
+
+    flags = list(flags)
+
+    message = 'Removing snap(s) "%s"' % ', '.join(packages)
+    if flags:
+        message += ' with options "%s"' % ', '.join(flags)
+
+    log(message, level='INFO')
+    return _snap_exec(['remove'] + flags + packages)
+
+
+def snap_refresh(packages, *flags):
+    """
+    Refresh / Update snap package.
+
+    :param packages: String or List String package name
+    :param flags: List String flags to pass to refresh command
+    :return: Integer return code from snap
+    """
+    if type(packages) is not list:
+        packages = [packages]
+
+    flags = list(flags)
+
+    message = 'Refreshing snap(s) "%s"' % ', '.join(packages)
+    if flags:
+        message += ' with options "%s"' % ', '.join(flags)
+
+    log(message, level='INFO')
+    return _snap_exec(['refresh'] + flags + packages)

=== modified file 'hooks/charmhelpers/fetch/ubuntu.py'
--- hooks/charmhelpers/fetch/ubuntu.py	2016-10-26 18:19:59 +0000
+++ hooks/charmhelpers/fetch/ubuntu.py	2017-06-16 22:43:26 +0000
@@ -12,29 +12,47 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from collections import OrderedDict
 import os
+import platform
+import re
 import six
 import time
 import subprocess
-
 from tempfile import NamedTemporaryFile
+
 from charmhelpers.core.host import (
     lsb_release
 )
-from charmhelpers.core.hookenv import log
-from charmhelpers.fetch import SourceConfigError
+from charmhelpers.core.hookenv import (
+    log,
+    DEBUG,
+)
+from charmhelpers.fetch import SourceConfigError, GPGKeyError
 
+PROPOSED_POCKET = (
+    "# Proposed\n"
+    "deb http://archive.ubuntu.com/ubuntu {}-proposed main universe "
+    "multiverse restricted\n")
+PROPOSED_PORTS_POCKET = (
+    "# Proposed\n"
+    "deb http://ports.ubuntu.com/ubuntu-ports {}-proposed main universe "
+    "multiverse restricted\n")
+# Only supports 64bit and ppc64 at the moment.
+ARCH_TO_PROPOSED_POCKET = {
+    'x86_64': PROPOSED_POCKET,
+    'ppc64le': PROPOSED_PORTS_POCKET,
+    'aarch64': PROPOSED_PORTS_POCKET,
+}
+CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu";
+CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
 CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
 deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
 """
-
-PROPOSED_POCKET = """# Proposed
-deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
-"""
-
 CLOUD_ARCHIVE_POCKETS = {
     # Folsom
     'folsom': 'precise-updates/folsom',
+    'folsom/updates': 'precise-updates/folsom',
     'precise-folsom': 'precise-updates/folsom',
     'precise-folsom/updates': 'precise-updates/folsom',
     'precise-updates/folsom': 'precise-updates/folsom',
@@ -43,6 +61,7 @@
     'precise-proposed/folsom': 'precise-proposed/folsom',
     # Grizzly
     'grizzly': 'precise-updates/grizzly',
+    'grizzly/updates': 'precise-updates/grizzly',
     'precise-grizzly': 'precise-updates/grizzly',
     'precise-grizzly/updates': 'precise-updates/grizzly',
     'precise-updates/grizzly': 'precise-updates/grizzly',
@@ -51,6 +70,7 @@
     'precise-proposed/grizzly': 'precise-proposed/grizzly',
     # Havana
     'havana': 'precise-updates/havana',
+    'havana/updates': 'precise-updates/havana',
     'precise-havana': 'precise-updates/havana',
     'precise-havana/updates': 'precise-updates/havana',
     'precise-updates/havana': 'precise-updates/havana',
@@ -59,6 +79,7 @@
     'precise-proposed/havana': 'precise-proposed/havana',
     # Icehouse
     'icehouse': 'precise-updates/icehouse',
+    'icehouse/updates': 'precise-updates/icehouse',
     'precise-icehouse': 'precise-updates/icehouse',
     'precise-icehouse/updates': 'precise-updates/icehouse',
     'precise-updates/icehouse': 'precise-updates/icehouse',
@@ -67,6 +88,7 @@
     'precise-proposed/icehouse': 'precise-proposed/icehouse',
     # Juno
     'juno': 'trusty-updates/juno',
+    'juno/updates': 'trusty-updates/juno',
     'trusty-juno': 'trusty-updates/juno',
     'trusty-juno/updates': 'trusty-updates/juno',
     'trusty-updates/juno': 'trusty-updates/juno',
@@ -75,6 +97,7 @@
     'trusty-proposed/juno': 'trusty-proposed/juno',
     # Kilo
     'kilo': 'trusty-updates/kilo',
+    'kilo/updates': 'trusty-updates/kilo',
     'trusty-kilo': 'trusty-updates/kilo',
     'trusty-kilo/updates': 'trusty-updates/kilo',
     'trusty-updates/kilo': 'trusty-updates/kilo',
@@ -83,6 +106,7 @@
     'trusty-proposed/kilo': 'trusty-proposed/kilo',
     # Liberty
     'liberty': 'trusty-updates/liberty',
+    'liberty/updates': 'trusty-updates/liberty',
     'trusty-liberty': 'trusty-updates/liberty',
     'trusty-liberty/updates': 'trusty-updates/liberty',
     'trusty-updates/liberty': 'trusty-updates/liberty',
@@ -91,6 +115,7 @@
     'trusty-proposed/liberty': 'trusty-proposed/liberty',
     # Mitaka
     'mitaka': 'trusty-updates/mitaka',
+    'mitaka/updates': 'trusty-updates/mitaka',
     'trusty-mitaka': 'trusty-updates/mitaka',
     'trusty-mitaka/updates': 'trusty-updates/mitaka',
     'trusty-updates/mitaka': 'trusty-updates/mitaka',
@@ -99,17 +124,44 @@
     'trusty-proposed/mitaka': 'trusty-proposed/mitaka',
     # Newton
     'newton': 'xenial-updates/newton',
+    'newton/updates': 'xenial-updates/newton',
     'xenial-newton': 'xenial-updates/newton',
     'xenial-newton/updates': 'xenial-updates/newton',
     'xenial-updates/newton': 'xenial-updates/newton',
     'newton/proposed': 'xenial-proposed/newton',
     'xenial-newton/proposed': 'xenial-proposed/newton',
     'xenial-proposed/newton': 'xenial-proposed/newton',
+    # Ocata
+    'ocata': 'xenial-updates/ocata',
+    'ocata/updates': 'xenial-updates/ocata',
+    'xenial-ocata': 'xenial-updates/ocata',
+    'xenial-ocata/updates': 'xenial-updates/ocata',
+    'xenial-updates/ocata': 'xenial-updates/ocata',
+    'ocata/proposed': 'xenial-proposed/ocata',
+    'xenial-ocata/proposed': 'xenial-proposed/ocata',
+    'xenial-ocata/newton': 'xenial-proposed/ocata',
+    # Pike
+    'pike': 'xenial-updates/pike',
+    'xenial-pike': 'xenial-updates/pike',
+    'xenial-pike/updates': 'xenial-updates/pike',
+    'xenial-updates/pike': 'xenial-updates/pike',
+    'pike/proposed': 'xenial-proposed/pike',
+    'xenial-pike/proposed': 'xenial-proposed/pike',
+    'xenial-pike/newton': 'xenial-proposed/pike',
+    # Queens
+    'queens': 'xenial-updates/queens',
+    'xenial-queens': 'xenial-updates/queens',
+    'xenial-queens/updates': 'xenial-updates/queens',
+    'xenial-updates/queens': 'xenial-updates/queens',
+    'queens/proposed': 'xenial-proposed/queens',
+    'xenial-queens/proposed': 'xenial-proposed/queens',
+    'xenial-queens/newton': 'xenial-proposed/queens',
 }
 
+
 APT_NO_LOCK = 100  # The return code for "couldn't acquire lock" in APT.
-APT_NO_LOCK_RETRY_DELAY = 10  # Wait 10 seconds between apt lock checks.
-APT_NO_LOCK_RETRY_COUNT = 30  # Retry to acquire the lock X times.
+CMD_RETRY_DELAY = 10  # Wait 10 seconds between command retries.
+CMD_RETRY_COUNT = 30  # Retry a failing fatal command X times.
 
 
 def filter_installed_packages(packages):
@@ -137,7 +189,7 @@
     return apt_pkg.Cache(progress)
 
 
-def install(packages, options=None, fatal=False):
+def apt_install(packages, options=None, fatal=False):
     """Install one or more packages."""
     if options is None:
         options = ['--option=Dpkg::Options::=--force-confold']
@@ -154,7 +206,7 @@
     _run_apt_command(cmd, fatal)
 
 
-def upgrade(options=None, fatal=False, dist=False):
+def apt_upgrade(options=None, fatal=False, dist=False):
     """Upgrade all packages."""
     if options is None:
         options = ['--option=Dpkg::Options::=--force-confold']
@@ -169,13 +221,13 @@
     _run_apt_command(cmd, fatal)
 
 
-def update(fatal=False):
+def apt_update(fatal=False):
     """Update local apt cache."""
     cmd = ['apt-get', 'update']
     _run_apt_command(cmd, fatal)
 
 
-def purge(packages, fatal=False):
+def apt_purge(packages, fatal=False):
     """Purge one or more packages."""
     cmd = ['apt-get', '--assume-yes', 'purge']
     if isinstance(packages, six.string_types):
@@ -209,7 +261,45 @@
     return apt_mark(packages, 'unhold', fatal=fatal)
 
 
-def add_source(source, key=None):
+def import_key(keyid):
+    """Import a key in either ASCII Armor or Radix64 format.
+
+    `keyid` is either the keyid to fetch from a PGP server, or
+    the key in ASCII armor foramt.
+
+    :param keyid: String of key (or key id).
+    :raises: GPGKeyError if the key could not be imported
+    """
+    key = keyid.strip()
+    if (key.startswith('-----BEGIN PGP PUBLIC KEY BLOCK-----') and
+            key.endswith('-----END PGP PUBLIC KEY BLOCK-----')):
+        log("PGP key found (looks like ASCII Armor format)", level=DEBUG)
+        log("Importing ASCII Armor PGP key", level=DEBUG)
+        with NamedTemporaryFile() as keyfile:
+            with open(keyfile.name, 'w') as fd:
+                fd.write(key)
+                fd.write("\n")
+            cmd = ['apt-key', 'add', keyfile.name]
+            try:
+                subprocess.check_call(cmd)
+            except subprocess.CalledProcessError:
+                error = "Error importing PGP key '{}'".format(key)
+                log(error)
+                raise GPGKeyError(error)
+    else:
+        log("PGP key found (looks like Radix64 format)", level=DEBUG)
+        log("Importing PGP key from keyserver", level=DEBUG)
+        cmd = ['apt-key', 'adv', '--keyserver',
+               'hkp://keyserver.ubuntu.com:80', '--recv-keys', key]
+        try:
+            subprocess.check_call(cmd)
+        except subprocess.CalledProcessError:
+            error = "Error importing PGP key '{}'".format(key)
+            log(error)
+            raise GPGKeyError(error)
+
+
+def add_source(source, key=None, fail_invalid=False):
     """Add a package source to this system.
 
     @param source: a URL or sources.list entry, as supported by
@@ -225,6 +315,33 @@
         such as 'cloud:icehouse'
         'distro' may be used as a noop
 
+    Full list of source specifications supported by the function are:
+
+    'distro': A NOP; i.e. it has no effect.
+    'proposed': the proposed deb spec [2] is wrtten to
+      /etc/apt/sources.list/proposed
+    'distro-proposed': adds <version>-proposed to the debs [2]
+    'ppa:<ppa-name>': add-apt-repository --yes <ppa_name>
+    'deb <deb-spec>': add-apt-repository --yes deb <deb-spec>
+    'http://....': add-apt-repository --yes http://...
+    'cloud-archive:<spec>': add-apt-repository -yes cloud-archive:<spec>
+    'cloud:<release>[-staging]': specify a Cloud Archive pocket <release> with
+      optional staging version.  If staging is used then the staging PPA [2]
+      with be used.  If staging is NOT used then the cloud archive [3] will be
+      added, and the 'ubuntu-cloud-keyring' package will be added for the
+      current distro.
+
+    Otherwise the source is not recognised and this is logged to the juju log.
+    However, no error is raised, unless sys_error_on_exit is True.
+
+    [1] deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
+        where {} is replaced with the derived pocket name.
+    [2] deb http://archive.ubuntu.com/ubuntu {}-proposed \
+        main universe multiverse restricted
+        where {} is replaced with the lsb_release codename (e.g. xenial)
+    [3] deb http://ubuntu-cloud.archive.canonical.com/ubuntu <pocket>
+        to /etc/apt/sources.list.d/cloud-archive-list
+
     @param key: A key to be added to the system's APT keyring and used
     to verify the signatures on packages. Ideally, this should be an
     ASCII format GPG public key including the block headers. A GPG key
@@ -232,87 +349,202 @@
     available to retrieve the actual public key from a public keyserver
     placing your Juju environment at risk. ppa and cloud archive keys
     are securely added automtically, so sould not be provided.
+
+    @param fail_invalid: (boolean) if True, then the function raises a
+    SourceConfigError is there is no matching installation source.
+
+    @raises SourceConfigError() if for cloud:<pocket>, the <pocket> is not a
+    valid pocket in CLOUD_ARCHIVE_POCKETS
     """
+    _mapping = OrderedDict([
+        (r"^distro$", lambda: None),  # This is a NOP
+        (r"^(?:proposed|distro-proposed)$", _add_proposed),
+        (r"^cloud-archive:(.*)$", _add_apt_repository),
+        (r"^((?:deb |http:|https:|ppa:).*)$", _add_apt_repository),
+        (r"^cloud:(.*)-(.*)\/staging$", _add_cloud_staging),
+        (r"^cloud:(.*)-(.*)$", _add_cloud_distro_check),
+        (r"^cloud:(.*)$", _add_cloud_pocket),
+    ])
     if source is None:
-        log('Source is not present. Skipping')
-        return
-
-    if (source.startswith('ppa:') or
-        source.startswith('http') or
-        source.startswith('deb ') or
-            source.startswith('cloud-archive:')):
-        subprocess.check_call(['add-apt-repository', '--yes', source])
-    elif source.startswith('cloud:'):
-        install(filter_installed_packages(['ubuntu-cloud-keyring']),
+        source = ''
+    for r, fn in six.iteritems(_mapping):
+        m = re.match(r, source)
+        if m:
+            # call the assoicated function with the captured groups
+            # raises SourceConfigError on error.
+            fn(*m.groups())
+            if key:
+                try:
+                    import_key(key)
+                except GPGKeyError as e:
+                    raise SourceConfigError(str(e))
+            break
+    else:
+        # nothing matched.  log an error and maybe sys.exit
+        err = "Unknown source: {!r}".format(source)
+        log(err)
+        if fail_invalid:
+            raise SourceConfigError(err)
+
+
+def _add_proposed():
+    """Add the PROPOSED_POCKET as /etc/apt/source.list.d/proposed.list
+
+    Uses lsb_release()['DISTRIB_CODENAME'] to determine the correct staza for
+    the deb line.
+
+    For intel architecutres PROPOSED_POCKET is used for the release, but for
+    other architectures PROPOSED_PORTS_POCKET is used for the release.
+    """
+    release = lsb_release()['DISTRIB_CODENAME']
+    arch = platform.machine()
+    if arch not in six.iterkeys(ARCH_TO_PROPOSED_POCKET):
+        raise SourceConfigError("Arch {} not supported for (distro-)proposed"
+                                .format(arch))
+    with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
+        apt.write(ARCH_TO_PROPOSED_POCKET[arch].format(release))
+
+
+def _add_apt_repository(spec):
+    """Add the spec using add_apt_repository
+
+    :param spec: the parameter to pass to add_apt_repository
+    """
+    _run_with_retries(['add-apt-repository', '--yes', spec])
+
+
+def _add_cloud_pocket(pocket):
+    """Add a cloud pocket as /etc/apt/sources.d/cloud-archive.list
+
+    Note that this overwrites the existing file if there is one.
+
+    This function also converts the simple pocket in to the actual pocket using
+    the CLOUD_ARCHIVE_POCKETS mapping.
+
+    :param pocket: string representing the pocket to add a deb spec for.
+    :raises: SourceConfigError if the cloud pocket doesn't exist or the
+        requested release doesn't match the current distro version.
+    """
+    apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
                 fatal=True)
-        pocket = source.split(':')[-1]
-        if pocket not in CLOUD_ARCHIVE_POCKETS:
-            raise SourceConfigError(
-                'Unsupported cloud: source option %s' %
-                pocket)
-        actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
-        with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
-            apt.write(CLOUD_ARCHIVE.format(actual_pocket))
-    elif source == 'proposed':
-        release = lsb_release()['DISTRIB_CODENAME']
-        with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
-            apt.write(PROPOSED_POCKET.format(release))
-    elif source == 'distro':
-        pass
-    else:
-        log("Unknown source: {!r}".format(source))
-
-    if key:
-        if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
-            with NamedTemporaryFile('w+') as key_file:
-                key_file.write(key)
-                key_file.flush()
-                key_file.seek(0)
-                subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file)
-        else:
-            # Note that hkp: is in no way a secure protocol. Using a
-            # GPG key id is pointless from a security POV unless you
-            # absolutely trust your network and DNS.
-            subprocess.check_call(['apt-key', 'adv', '--keyserver',
-                                   'hkp://keyserver.ubuntu.com:80', '--recv',
-                                   key])
+    if pocket not in CLOUD_ARCHIVE_POCKETS:
+        raise SourceConfigError(
+            'Unsupported cloud: source option %s' %
+            pocket)
+    actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
+    with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
+        apt.write(CLOUD_ARCHIVE.format(actual_pocket))
+
+
+def _add_cloud_staging(cloud_archive_release, openstack_release):
+    """Add the cloud staging repository which is in
+    ppa:ubuntu-cloud-archive/<openstack_release>-staging
+
+    This function checks that the cloud_archive_release matches the current
+    codename for the distro that charm is being installed on.
+
+    :param cloud_archive_release: string, codename for the release.
+    :param openstack_release: String, codename for the openstack release.
+    :raises: SourceConfigError if the cloud_archive_release doesn't match the
+        current version of the os.
+    """
+    _verify_is_ubuntu_rel(cloud_archive_release, openstack_release)
+    ppa = 'ppa:ubuntu-cloud-archive/{}-staging'.format(openstack_release)
+    cmd = 'add-apt-repository -y {}'.format(ppa)
+    _run_with_retries(cmd.split(' '))
+
+
+def _add_cloud_distro_check(cloud_archive_release, openstack_release):
+    """Add the cloud pocket, but also check the cloud_archive_release against
+    the current distro, and use the openstack_release as the full lookup.
+
+    This just calls _add_cloud_pocket() with the openstack_release as pocket
+    to get the correct cloud-archive.list for dpkg to work with.
+
+    :param cloud_archive_release:String, codename for the distro release.
+    :param openstack_release: String, spec for the release to look up in the
+        CLOUD_ARCHIVE_POCKETS
+    :raises: SourceConfigError if this is the wrong distro, or the pocket spec
+        doesn't exist.
+    """
+    _verify_is_ubuntu_rel(cloud_archive_release, openstack_release)
+    _add_cloud_pocket("{}-{}".format(cloud_archive_release, openstack_release))
+
+
+def _verify_is_ubuntu_rel(release, os_release):
+    """Verify that the release is in the same as the current ubuntu release.
+
+    :param release: String, lowercase for the release.
+    :param os_release: String, the os_release being asked for
+    :raises: SourceConfigError if the release is not the same as the ubuntu
+        release.
+    """
+    ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
+    if release != ubuntu_rel:
+        raise SourceConfigError(
+            'Invalid Cloud Archive release specified: {}-{} on this Ubuntu'
+            'version ({})'.format(release, os_release, ubuntu_rel))
+
+
+def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,),
+                      retry_message="", cmd_env=None):
+    """Run a command and retry until success or max_retries is reached.
+
+    :param: cmd: str: The apt command to run.
+    :param: max_retries: int: The number of retries to attempt on a fatal
+        command. Defaults to CMD_RETRY_COUNT.
+    :param: retry_exitcodes: tuple: Optional additional exit codes to retry.
+        Defaults to retry on exit code 1.
+    :param: retry_message: str: Optional log prefix emitted during retries.
+    :param: cmd_env: dict: Environment variables to add to the command run.
+    """
+
+    env = None
+    kwargs = {}
+    if cmd_env:
+        env = os.environ.copy()
+        env.update(cmd_env)
+        kwargs['env'] = env
+
+    if not retry_message:
+        retry_message = "Failed executing '{}'".format(" ".join(cmd))
+    retry_message += ". Will retry in {} seconds".format(CMD_RETRY_DELAY)
+
+    retry_count = 0
+    result = None
+
+    retry_results = (None,) + retry_exitcodes
+    while result in retry_results:
+        try:
+            # result = subprocess.check_call(cmd, env=env)
+            result = subprocess.check_call(cmd, **kwargs)
+        except subprocess.CalledProcessError as e:
+            retry_count = retry_count + 1
+            if retry_count > max_retries:
+                raise
+            result = e.returncode
+            log(retry_message)
+            time.sleep(CMD_RETRY_DELAY)
 
 
 def _run_apt_command(cmd, fatal=False):
-    """Run an APT command.
-
-    Checks the output and retries if the fatal flag is set
-    to True.
+    """Run an apt command with optional retries.
 
     :param: cmd: str: The apt command to run.
     :param: fatal: bool: Whether the command's output should be checked and
         retried.
     """
-    env = os.environ.copy()
-
-    if 'DEBIAN_FRONTEND' not in env:
-        env['DEBIAN_FRONTEND'] = 'noninteractive'
+    # Provide DEBIAN_FRONTEND=noninteractive if not present in the environment.
+    cmd_env = {
+        'DEBIAN_FRONTEND': os.environ.get('DEBIAN_FRONTEND', 'noninteractive')}
 
     if fatal:
-        retry_count = 0
-        result = None
-
-        # If the command is considered "fatal", we need to retry if the apt
-        # lock was not acquired.
-
-        while result is None or result == APT_NO_LOCK:
-            try:
-                result = subprocess.check_call(cmd, env=env)
-            except subprocess.CalledProcessError as e:
-                retry_count = retry_count + 1
-                if retry_count > APT_NO_LOCK_RETRY_COUNT:
-                    raise
-                result = e.returncode
-                log("Couldn't acquire DPKG lock. Will retry in {} seconds."
-                    "".format(APT_NO_LOCK_RETRY_DELAY))
-                time.sleep(APT_NO_LOCK_RETRY_DELAY)
-
+        _run_with_retries(
+            cmd, cmd_env=cmd_env, retry_exitcodes=(1, APT_NO_LOCK,),
+            retry_message="Couldn't acquire DPKG lock")
     else:
+        env = os.environ.copy()
+        env.update(cmd_env)
         subprocess.call(cmd, env=env)
 
 

=== modified file 'hooks/charmhelpers/osplatform.py'
--- hooks/charmhelpers/osplatform.py	2016-10-26 18:19:59 +0000
+++ hooks/charmhelpers/osplatform.py	2017-06-16 22:43:26 +0000
@@ -8,12 +8,18 @@
     will be returned (which is the name of the module).
     This string is used to decide which platform module should be imported.
     """
+    # linux_distribution is deprecated and will be removed in Python 3.7
+    # Warings *not* disabled, as we certainly need to fix this.
     tuple_platform = platform.linux_distribution()
     current_platform = tuple_platform[0]
     if "Ubuntu" in current_platform:
         return "ubuntu"
     elif "CentOS" in current_platform:
         return "centos"
+    elif "debian" in current_platform:
+        # Stock Python does not detect Ubuntu and instead returns debian.
+        # Or at least it does in some build environments like Travis CI
+        return "ubuntu"
     else:
         raise RuntimeError("This module is not supported on {}."
                            .format(current_platform))

=== modified file 'hooks/hooks.py'
--- hooks/hooks.py	2016-11-15 18:15:20 +0000
+++ hooks/hooks.py	2017-06-16 22:43:26 +0000
@@ -4,6 +4,8 @@
 import sys
 
 from charmhelpers.core.host import (
+    CompareHostReleases,
+    lsb_release,
     service_start,
     service_stop,
     service_restart,
@@ -69,6 +71,12 @@
         changed[config_key] = config_get(config_key)
         juju_log("Configuration key:%s set to value: %s" %
                  (config_key, changed[config_key]))
+
+
+    # allows templates to generate series-dependent configuration
+    lsb = lsb_release()
+    changed['series'] = CompareHostReleases(lsb['DISTRIB_CODENAME'])
+
     return changed
 
 

=== modified file 'templates/rsyslog.conf'
--- templates/rsyslog.conf	2014-04-22 19:45:41 +0000
+++ templates/rsyslog.conf	2017-06-16 22:43:26 +0000
@@ -1,3 +1,8 @@
+###############################################################################
+# [ WARNING ]
+# Configuration file maintained by Juju. Local changes may be overwritten.
+###############################################################################
+
 /var/log/syslog
 {
 	rotate {{syslog_rotate}}
@@ -7,7 +12,11 @@
 	delaycompress
 	compress
 	postrotate
+	{%- if series <= 'trusty' %}
 		reload rsyslog >/dev/null 2>&1 || true
+	{%- else %}
+		invoke-rc.d rsyslog rotate > /dev/null
+	{%- endif %}
 	endscript
 }
 
@@ -32,6 +41,10 @@
 	delaycompress
 	sharedscripts
 	postrotate
+	{%- if series <= 'trusty' %}
 		reload rsyslog >/dev/null 2>&1 || true
+	{%- else %}
+		invoke-rc.d rsyslog rotate > /dev/null
+	{%- endif %}
 	endscript
 }

=== modified file 'tox.ini'
--- tox.ini	2016-10-27 15:33:35 +0000
+++ tox.ini	2017-06-16 22:43:26 +0000
@@ -1,6 +1,6 @@
 [tox]
 skipsdist=True
-envlist = py34, py35
+envlist = py27, py34, py35
 skip_missing_interpreters = True
 
 [testenv]

=== modified file 'unit_tests/test_hooks.py'
--- unit_tests/test_hooks.py	2017-01-10 17:09:58 +0000
+++ unit_tests/test_hooks.py	2017-06-16 22:43:26 +0000
@@ -1,7 +1,8 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 import os
-import hooks
+import six
+import tempfile
 
 __author__ = 'Jorge Niedbalski R. <jorge.niedbalski@xxxxxxxxxxxxx>'
 _HERE = os.path.abspath(os.path.dirname(__file__))
@@ -12,6 +13,7 @@
 except ImportError as ex:
     raise ImportError("Please install unittest and mock modules")
 
+import hooks
 
 TO_PATCH = [
     "apt_install",
@@ -28,7 +30,7 @@
 ]
 
 
-class HooksTestCase(unittest.TestCase):
+class BaseTestCase(unittest.TestCase):
 
     def setUp(self):
         unittest.TestCase.setUp(self)
@@ -37,6 +39,14 @@
         self.juju_log.return_value = True
         self.apt_install.return_value = True
         self.charm_dir.return_value = os.path.join(_HERE, '..')
+        self.tmpdir = tempfile.mkdtemp()
+        self.maxDiff = None
+
+    def tearDown(self):
+        try:
+            shutil.rmtree(self.tmpdir)
+        except:
+            pass
 
     def patch(self, method):
         _m = mock.patch.object(hooks, method)
@@ -48,6 +58,8 @@
         for method in TO_PATCH:
             setattr(self, method, self.patch(method))
 
+class TestHooks(BaseTestCase):
+
     def test_install_hook(self):
         """Check if install hooks is correctly executed
         """
@@ -141,8 +153,16 @@
     def test_config_changed(self):
         """Check if config-changed is executed correctly"""
         _open = mock.mock_open(read_data=b'foo')
-
-        with mock.patch('builtins.open', _open, create=True):
+        lsb = hooks.lsb_release()
+
+        if six .PY2:
+            open_function = '__builtin__.open'
+        else:
+            open_function = 'builtins.open'
+
+        with mock.patch(open_function, _open, create=True) as mock_open, \
+                mock.patch.object(hooks, 'lsb_release') as mock_lsb:
+            mock_lsb.return_value = lsb
             hooks.config_changed()
 
         # I'm not quite sure why config_changed appears to be called twice but
@@ -154,12 +174,15 @@
                                    '60-aggregator.conf'), 'rb'),
             mock.call(os.path.join(hooks.DEFAULT_RSYSLOG_PATH,
                                    '60-aggregator.conf'), 'w'),
-            mock.call(os.path.join(hooks.get_template_dir(),
-                                   '70-forward.conf'), 'rb'),
-            mock.call(os.path.join(hooks.get_template_dir(),
-                                   '70-forward.conf'), 'rb'),
+
+            mock.call(os.path.join(hooks.get_template_dir(),
+                                   '70-forward.conf'), 'rb'),
+            mock.call(os.path.join(hooks.get_template_dir(),
+                                   '70-forward.conf'), 'rb'),
+
             mock.call(os.path.join(hooks.DEFAULT_RSYSLOG_PATH,
                                    '70-forward.conf'), 'w'),
+
             mock.call(os.path.join(hooks.get_template_dir(),
                                    'rsyslog.conf'), 'rb'),
             mock.call(os.path.join(hooks.get_template_dir(),
@@ -169,3 +192,38 @@
 
         self.assertEquals(sorted(_open.call_args_list), sorted(expected))
         self.service_restart.assert_called_once_with("rsyslog")
+
+
+class TestSeries(BaseTestCase):
+
+    def setUp(self):
+        super(TestSeries, self).setUp()
+        self._logrotate_path = hooks.DEFAULT_LOGROTATE_PATH
+        self._rsyslog_path = hooks.DEFAULT_RSYSLOG_PATH
+
+    def tearDown(self):
+        hooks.DEFAULT_LOGROTATE_PATH = self._logrotate_path
+        hooks.DEFAULT_RSYSLOG_PATH = self._rsyslog_path
+        super(TestSeries, self).tearDown()
+
+    def test_series(self):
+        rsyslog_config = os.path.join(self.tmpdir, 'rsyslog.conf')
+        hooks.DEFAULT_LOGROTATE_PATH = rsyslog_config
+        hooks.DEFAULT_RSYSLOG_PATH = os.path.join(self.tmpdir)
+        lsb = hooks.lsb_release()
+        with mock.patch.object(hooks, 'lsb_release') as mock_lsb:
+            lsb['DISTRIB_CODENAME'] = 'artful'
+            mock_lsb.return_value = lsb
+            hooks.config_changed()
+
+            with open(rsyslog_config, 'r') as f:
+                content = f.read()
+                self.assertIn('invoke-rc.d rsyslog rotate > /dev/null',
+                              content)
+
+            lsb['DISTRIB_CODENAME'] = 'trusty'
+            hooks.config_changed()
+            with open(rsyslog_config, 'r') as f:
+                content = f.read()
+                self.assertIn('reload rsyslog >/dev/null 2>&1 || true',
+                              content)