← Back to team overview

nagios-charmers team mailing list archive

[Merge] ~xavpaice/nagios-charm:fix-lp1677580 into nagios-charm:master

 

Xav Paice has proposed merging ~xavpaice/nagios-charm:fix-lp1677580 into nagios-charm:master.

Requested reviews:
  Nagios Charm developers (nagios-charmers)

For more details, see:
https://code.launchpad.net/~xavpaice/nagios-charm/+git/nagios-charm/+merge/329234
-- 
Your team Nagios Charm developers is requested to review the proposed merge of ~xavpaice/nagios-charm:fix-lp1677580 into nagios-charm:master.
diff --git a/Makefile b/Makefile
index c75b2e9..9d48829 100644
--- a/Makefile
+++ b/Makefile
@@ -1,3 +1,7 @@
+#!/usr/bin/make
+PYTHON := /usr/bin/python3
+export PYTHONPATH := hooks
+
 default:
 	echo Nothing to do
 
@@ -12,3 +16,13 @@ test:
 	tests/22-extraconfig-test
 	tests/23-livestatus-test
 	tests/24-pagerduty-test
+
+bin/charm_helpers_sync.py:
+	@mkdir -p bin
+	@bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \
+        > bin/charm_helpers_sync.py
+
+sync: bin/charm_helpers_sync.py
+	@$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers.yaml
+
+
diff --git a/bin/charm_helpers_sync.py b/bin/charm_helpers_sync.py
new file mode 100644
index 0000000..bd79460
--- /dev/null
+++ b/bin/charm_helpers_sync.py
@@ -0,0 +1,252 @@
+#!/usr/bin/python
+
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Authors:
+#   Adam Gandelman <adamg@xxxxxxxxxx>
+
+import logging
+import optparse
+import os
+import subprocess
+import shutil
+import sys
+import tempfile
+import yaml
+from fnmatch import fnmatch
+
+import six
+
+CHARM_HELPERS_BRANCH = 'lp:charm-helpers'
+
+
+def parse_config(conf_file):
+    if not os.path.isfile(conf_file):
+        logging.error('Invalid config file: %s.' % conf_file)
+        return False
+    return yaml.load(open(conf_file).read())
+
+
+def clone_helpers(work_dir, branch):
+    dest = os.path.join(work_dir, 'charm-helpers')
+    logging.info('Checking out %s to %s.' % (branch, dest))
+    cmd = ['bzr', 'checkout', '--lightweight', branch, dest]
+    subprocess.check_call(cmd)
+    return dest
+
+
+def _module_path(module):
+    return os.path.join(*module.split('.'))
+
+
+def _src_path(src, module):
+    return os.path.join(src, 'charmhelpers', _module_path(module))
+
+
+def _dest_path(dest, module):
+    return os.path.join(dest, _module_path(module))
+
+
+def _is_pyfile(path):
+    return os.path.isfile(path + '.py')
+
+
+def ensure_init(path):
+    '''
+    ensure directories leading up to path are importable, omitting
+    parent directory, eg path='/hooks/helpers/foo'/:
+        hooks/
+        hooks/helpers/__init__.py
+        hooks/helpers/foo/__init__.py
+    '''
+    for d, dirs, files in os.walk(os.path.join(*path.split('/')[:2])):
+        _i = os.path.join(d, '__init__.py')
+        if not os.path.exists(_i):
+            logging.info('Adding missing __init__.py: %s' % _i)
+            open(_i, 'wb').close()
+
+
+def sync_pyfile(src, dest):
+    src = src + '.py'
+    src_dir = os.path.dirname(src)
+    logging.info('Syncing pyfile: %s -> %s.' % (src, dest))
+    if not os.path.exists(dest):
+        os.makedirs(dest)
+    shutil.copy(src, dest)
+    if os.path.isfile(os.path.join(src_dir, '__init__.py')):
+        shutil.copy(os.path.join(src_dir, '__init__.py'),
+                    dest)
+    ensure_init(dest)
+
+
+def get_filter(opts=None):
+    opts = opts or []
+    if 'inc=*' in opts:
+        # do not filter any files, include everything
+        return None
+
+    def _filter(dir, ls):
+        incs = [opt.split('=').pop() for opt in opts if 'inc=' in opt]
+        _filter = []
+        for f in ls:
+            _f = os.path.join(dir, f)
+
+            if not os.path.isdir(_f) and not _f.endswith('.py') and incs:
+                if True not in [fnmatch(_f, inc) for inc in incs]:
+                    logging.debug('Not syncing %s, does not match include '
+                                  'filters (%s)' % (_f, incs))
+                    _filter.append(f)
+                else:
+                    logging.debug('Including file, which matches include '
+                                  'filters (%s): %s' % (incs, _f))
+            elif (os.path.isfile(_f) and not _f.endswith('.py')):
+                logging.debug('Not syncing file: %s' % f)
+                _filter.append(f)
+            elif (os.path.isdir(_f) and not
+                  os.path.isfile(os.path.join(_f, '__init__.py'))):
+                logging.debug('Not syncing directory: %s' % f)
+                _filter.append(f)
+        return _filter
+    return _filter
+
+
+def sync_directory(src, dest, opts=None):
+    if os.path.exists(dest):
+        logging.debug('Removing existing directory: %s' % dest)
+        shutil.rmtree(dest)
+    logging.info('Syncing directory: %s -> %s.' % (src, dest))
+
+    shutil.copytree(src, dest, ignore=get_filter(opts))
+    ensure_init(dest)
+
+
+def sync(src, dest, module, opts=None):
+
+    # Sync charmhelpers/__init__.py for bootstrap code.
+    sync_pyfile(_src_path(src, '__init__'), dest)
+
+    # Sync other __init__.py files in the path leading to module.
+    m = []
+    steps = module.split('.')[:-1]
+    while steps:
+        m.append(steps.pop(0))
+        init = '.'.join(m + ['__init__'])
+        sync_pyfile(_src_path(src, init),
+                    os.path.dirname(_dest_path(dest, init)))
+
+    # Sync the module, or maybe a .py file.
+    if os.path.isdir(_src_path(src, module)):
+        sync_directory(_src_path(src, module), _dest_path(dest, module), opts)
+    elif _is_pyfile(_src_path(src, module)):
+        sync_pyfile(_src_path(src, module),
+                    os.path.dirname(_dest_path(dest, module)))
+    else:
+        logging.warn('Could not sync: %s. Neither a pyfile or directory, '
+                     'does it even exist?' % module)
+
+
+def parse_sync_options(options):
+    if not options:
+        return []
+    return options.split(',')
+
+
+def extract_options(inc, global_options=None):
+    global_options = global_options or []
+    if global_options and isinstance(global_options, six.string_types):
+        global_options = [global_options]
+    if '|' not in inc:
+        return (inc, global_options)
+    inc, opts = inc.split('|')
+    return (inc, parse_sync_options(opts) + global_options)
+
+
+def sync_helpers(include, src, dest, options=None):
+    if not os.path.isdir(dest):
+        os.makedirs(dest)
+
+    global_options = parse_sync_options(options)
+
+    for inc in include:
+        if isinstance(inc, str):
+            inc, opts = extract_options(inc, global_options)
+            sync(src, dest, inc, opts)
+        elif isinstance(inc, dict):
+            # could also do nested dicts here.
+            for k, v in six.iteritems(inc):
+                if isinstance(v, list):
+                    for m in v:
+                        inc, opts = extract_options(m, global_options)
+                        sync(src, dest, '%s.%s' % (k, inc), opts)
+
+
+if __name__ == '__main__':
+    parser = optparse.OptionParser()
+    parser.add_option('-c', '--config', action='store', dest='config',
+                      default=None, help='helper config file')
+    parser.add_option('-D', '--debug', action='store_true', dest='debug',
+                      default=False, help='debug')
+    parser.add_option('-b', '--branch', action='store', dest='branch',
+                      help='charm-helpers bzr branch (overrides config)')
+    parser.add_option('-d', '--destination', action='store', dest='dest_dir',
+                      help='sync destination dir (overrides config)')
+    (opts, args) = parser.parse_args()
+
+    if opts.debug:
+        logging.basicConfig(level=logging.DEBUG)
+    else:
+        logging.basicConfig(level=logging.INFO)
+
+    if opts.config:
+        logging.info('Loading charm helper config from %s.' % opts.config)
+        config = parse_config(opts.config)
+        if not config:
+            logging.error('Could not parse config from %s.' % opts.config)
+            sys.exit(1)
+    else:
+        config = {}
+
+    if 'branch' not in config:
+        config['branch'] = CHARM_HELPERS_BRANCH
+    if opts.branch:
+        config['branch'] = opts.branch
+    if opts.dest_dir:
+        config['destination'] = opts.dest_dir
+
+    if 'destination' not in config:
+        logging.error('No destination dir. specified as option or config.')
+        sys.exit(1)
+
+    if 'include' not in config:
+        if not args:
+            logging.error('No modules to sync specified as option or config.')
+            sys.exit(1)
+        config['include'] = []
+        [config['include'].append(a) for a in args]
+
+    sync_options = None
+    if 'options' in config:
+        sync_options = config['options']
+    tmpd = tempfile.mkdtemp()
+    try:
+        checkout = clone_helpers(tmpd, config['branch'])
+        sync_helpers(config['include'], checkout, config['destination'],
+                     options=sync_options)
+    except Exception as e:
+        logging.error("Could not sync: %s" % e)
+        raise e
+    finally:
+        logging.debug('Cleaning up %s' % tmpd)
+        shutil.rmtree(tmpd)
diff --git a/charm-helpers.yaml b/charm-helpers.yaml
index 4c97181..e5f7760 100644
--- a/charm-helpers.yaml
+++ b/charm-helpers.yaml
@@ -1,6 +1,7 @@
 destination: hooks/charmhelpers
-branch: lp:~openstack-charmers/charm-helpers/ssl-everywhere
+branch: lp:charm-helpers
 include:
     - core
     - fetch
+    - osplatform
     - contrib.ssl
diff --git a/config.yaml b/config.yaml
index 3eb834a..ca60d58 100644
--- a/config.yaml
+++ b/config.yaml
@@ -144,3 +144,24 @@ options:
             Password to use for Nagios administrative access.  If not
             provided, a password will be generated (see documentation for
             instructions on retrieving the generated password.)
+    monitor_self:
+        type: boolean
+        default: true
+        description: |
+            If true, enable monitoring of the nagios unit itself.
+    nagios_host_context:
+        default: "juju"
+        type: string
+        description: |
+            a string that will be prepended to instance name to set the host name
+            in nagios. So for instance the hostname would be something like:
+                juju-postgresql-0
+            If you're running multiple environments with the same services in them
+            this allows you to differentiate between them.
+    load_monitor:
+        default: '5.0!4.0!3.0!10.0!6.0!4.0'
+        type: string
+        description: |
+            A string to pass to the Nagios load monitoring command.  Default is
+            to warn at 1, 5, 15 min load averages of 10.0, 6.0 and 4.0 and to
+            report Critical at 5.0, 4.0 and 3.0 averages.
diff --git a/hooks/charmhelpers/__init__.py b/hooks/charmhelpers/__init__.py
index e69de29..e7aa471 100644
--- a/hooks/charmhelpers/__init__.py
+++ b/hooks/charmhelpers/__init__.py
@@ -0,0 +1,97 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Bootstrap charm-helpers, installing its dependencies if necessary using
+# only standard libraries.
+from __future__ import print_function
+from __future__ import absolute_import
+
+import functools
+import inspect
+import subprocess
+import sys
+
+try:
+    import six  # flake8: noqa
+except ImportError:
+    if sys.version_info.major == 2:
+        subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
+    else:
+        subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
+    import six  # flake8: noqa
+
+try:
+    import yaml  # flake8: noqa
+except ImportError:
+    if sys.version_info.major == 2:
+        subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
+    else:
+        subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
+    import yaml  # flake8: noqa
+
+
+# Holds a list of mapping of mangled function names that have been deprecated
+# using the @deprecate decorator below.  This is so that the warning is only
+# printed once for each usage of the function.
+__deprecated_functions = {}
+
+
+def deprecate(warning, date=None, log=None):
+    """Add a deprecation warning the first time the function is used.
+    The date, which is a string in semi-ISO8660 format indicate the year-month
+    that the function is officially going to be removed.
+
+    usage:
+
+    @deprecate('use core/fetch/add_source() instead', '2017-04')
+    def contributed_add_source_thing(...):
+        ...
+
+    And it then prints to the log ONCE that the function is deprecated.
+    The reason for passing the logging function (log) is so that hookenv.log
+    can be used for a charm if needed.
+
+    :param warning:  String to indicat where it has moved ot.
+    :param date: optional sting, in YYYY-MM format to indicate when the
+                 function will definitely (probably) be removed.
+    :param log: The log function to call to log.  If not, logs to stdout
+    """
+    def wrap(f):
+
+        @functools.wraps(f)
+        def wrapped_f(*args, **kwargs):
+            try:
+                module = inspect.getmodule(f)
+                file = inspect.getsourcefile(f)
+                lines = inspect.getsourcelines(f)
+                f_name = "{}-{}-{}..{}-{}".format(
+                    module.__name__, file, lines[0], lines[-1], f.__name__)
+            except (IOError, TypeError):
+                # assume it was local, so just use the name of the function
+                f_name = f.__name__
+            if f_name not in __deprecated_functions:
+                __deprecated_functions[f_name] = True
+                s = "DEPRECATION WARNING: Function {} is being removed".format(
+                    f.__name__)
+                if date:
+                    s = "{} on/around {}".format(s, date)
+                if warning:
+                    s = "{} : {}".format(s, warning)
+                if log:
+                    log(s)
+                else:
+                    print(s)
+            return f(*args, **kwargs)
+        return wrapped_f
+    return wrap
diff --git a/hooks/charmhelpers/contrib/__init__.py b/hooks/charmhelpers/contrib/__init__.py
index e69de29..d7567b8 100644
--- a/hooks/charmhelpers/contrib/__init__.py
+++ b/hooks/charmhelpers/contrib/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/hooks/charmhelpers/contrib/ssl/__init__.py b/hooks/charmhelpers/contrib/ssl/__init__.py
index 2999c0a..1d238b5 100644
--- a/hooks/charmhelpers/contrib/ssl/__init__.py
+++ b/hooks/charmhelpers/contrib/ssl/__init__.py
@@ -1,3 +1,17 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
 import subprocess
 from charmhelpers.core import hookenv
 
@@ -74,5 +88,5 @@ def generate_selfsigned(keyfile, certfile, keysize="1024", config=None, subject=
         subprocess.check_call(cmd)
         return True
     except Exception as e:
-        print "Execution of openssl command failed:\n{}".format(e)
+        print("Execution of openssl command failed:\n{}".format(e))
         return False
diff --git a/hooks/charmhelpers/contrib/ssl/service.py b/hooks/charmhelpers/contrib/ssl/service.py
index 295f721..06b534f 100644
--- a/hooks/charmhelpers/contrib/ssl/service.py
+++ b/hooks/charmhelpers/contrib/ssl/service.py
@@ -1,13 +1,23 @@
-import logging
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
 import os
 from os.path import join as path_join
 from os.path import exists
 import subprocess
 
-
-log = logging.getLogger("service_ca")
-
-logging.basicConfig(level=logging.DEBUG)
+from charmhelpers.core.hookenv import log, DEBUG
 
 STD_CERT = "standard"
 
@@ -46,7 +56,7 @@ class ServiceCA(object):
     ###############
 
     def init(self):
-        log.debug("initializing service ca")
+        log("initializing service ca", level=DEBUG)
         if not exists(self.ca_dir):
             self._init_ca_dir(self.ca_dir)
             self._init_ca()
@@ -75,23 +85,23 @@ class ServiceCA(object):
                 os.mkdir(sd)
 
         if not exists(path_join(ca_dir, 'serial')):
-            with open(path_join(ca_dir, 'serial'), 'wb') as fh:
+            with open(path_join(ca_dir, 'serial'), 'w') as fh:
                 fh.write('02\n')
 
         if not exists(path_join(ca_dir, 'index.txt')):
-            with open(path_join(ca_dir, 'index.txt'), 'wb') as fh:
+            with open(path_join(ca_dir, 'index.txt'), 'w') as fh:
                 fh.write('')
 
     def _init_ca(self):
         """Generate the root ca's cert and key.
         """
         if not exists(path_join(self.ca_dir, 'ca.cnf')):
-            with open(path_join(self.ca_dir, 'ca.cnf'), 'wb') as fh:
+            with open(path_join(self.ca_dir, 'ca.cnf'), 'w') as fh:
                 fh.write(
                     CA_CONF_TEMPLATE % (self.get_conf_variables()))
 
         if not exists(path_join(self.ca_dir, 'signing.cnf')):
-            with open(path_join(self.ca_dir, 'signing.cnf'), 'wb') as fh:
+            with open(path_join(self.ca_dir, 'signing.cnf'), 'w') as fh:
                 fh.write(
                     SIGNING_CONF_TEMPLATE % (self.get_conf_variables()))
 
@@ -103,7 +113,7 @@ class ServiceCA(object):
                '-keyout', self.ca_key, '-out', self.ca_cert,
                '-outform', 'PEM']
         output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
-        log.debug("CA Init:\n %s", output)
+        log("CA Init:\n %s" % output, level=DEBUG)
 
     def get_conf_variables(self):
         return dict(
@@ -127,7 +137,7 @@ class ServiceCA(object):
         return self.get_certificate(common_name)
 
     def get_certificate(self, common_name):
-        if not common_name in self:
+        if common_name not in self:
             raise ValueError("No certificate for %s" % common_name)
         key_p = path_join(self.ca_dir, "certs", "%s.key" % common_name)
         crt_p = path_join(self.ca_dir, "certs", "%s.crt" % common_name)
@@ -147,15 +157,15 @@ class ServiceCA(object):
         subj = '/O=%(org_name)s/OU=%(org_unit_name)s/CN=%(common_name)s' % (
             template_vars)
 
-        log.debug("CA Create Cert %s", common_name)
+        log("CA Create Cert %s" % common_name, level=DEBUG)
         cmd = ['openssl', 'req', '-sha1', '-newkey', 'rsa:2048',
                '-nodes', '-days', self.default_expiry,
                '-keyout', key_p, '-out', csr_p, '-subj', subj]
-        subprocess.check_call(cmd)
+        subprocess.check_call(cmd, stderr=subprocess.PIPE)
         cmd = ['openssl', 'rsa', '-in', key_p, '-out', key_p]
-        subprocess.check_call(cmd)
+        subprocess.check_call(cmd, stderr=subprocess.PIPE)
 
-        log.debug("CA Sign Cert %s", common_name)
+        log("CA Sign Cert %s" % common_name, level=DEBUG)
         if self.cert_type == MYSQL_CERT:
             cmd = ['openssl', 'x509', '-req',
                    '-in', csr_p, '-days', self.default_expiry,
@@ -166,8 +176,8 @@ class ServiceCA(object):
                    '-extensions', 'req_extensions',
                    '-days', self.default_expiry, '-notext',
                    '-in', csr_p, '-out', crt_p, '-subj', subj, '-batch']
-        log.debug("running %s", " ".join(cmd))
-        subprocess.check_call(cmd)
+        log("running %s" % " ".join(cmd), level=DEBUG)
+        subprocess.check_call(cmd, stderr=subprocess.PIPE)
 
     def get_ca_bundle(self):
         with open(self.ca_cert) as fh:
diff --git a/hooks/charmhelpers/core/__init__.py b/hooks/charmhelpers/core/__init__.py
index e69de29..d7567b8 100644
--- a/hooks/charmhelpers/core/__init__.py
+++ b/hooks/charmhelpers/core/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/hooks/charmhelpers/core/decorators.py b/hooks/charmhelpers/core/decorators.py
new file mode 100644
index 0000000..6ad41ee
--- /dev/null
+++ b/hooks/charmhelpers/core/decorators.py
@@ -0,0 +1,55 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Copyright 2014 Canonical Ltd.
+#
+# Authors:
+#  Edward Hope-Morley <opentastic@xxxxxxxxx>
+#
+
+import time
+
+from charmhelpers.core.hookenv import (
+    log,
+    INFO,
+)
+
+
+def retry_on_exception(num_retries, base_delay=0, exc_type=Exception):
+    """If the decorated function raises exception exc_type, allow num_retries
+    retry attempts before raise the exception.
+    """
+    def _retry_on_exception_inner_1(f):
+        def _retry_on_exception_inner_2(*args, **kwargs):
+            retries = num_retries
+            multiplier = 1
+            while True:
+                try:
+                    return f(*args, **kwargs)
+                except exc_type:
+                    if not retries:
+                        raise
+
+                delay = base_delay * multiplier
+                multiplier += 1
+                log("Retrying '%s' %d more times (delay=%s)" %
+                    (f.__name__, retries, delay), level=INFO)
+                retries -= 1
+                if delay:
+                    time.sleep(delay)
+
+        return _retry_on_exception_inner_2
+
+    return _retry_on_exception_inner_1
diff --git a/hooks/charmhelpers/core/files.py b/hooks/charmhelpers/core/files.py
new file mode 100644
index 0000000..fdd82b7
--- /dev/null
+++ b/hooks/charmhelpers/core/files.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__author__ = 'Jorge Niedbalski <niedbalski@xxxxxxxxxx>'
+
+import os
+import subprocess
+
+
+def sed(filename, before, after, flags='g'):
+    """
+    Search and replaces the given pattern on filename.
+
+    :param filename: relative or absolute file path.
+    :param before: expression to be replaced (see 'man sed')
+    :param after: expression to replace with (see 'man sed')
+    :param flags: sed-compatible regex flags in example, to make
+    the  search and replace case insensitive, specify ``flags="i"``.
+    The ``g`` flag is always specified regardless, so you do not
+    need to remember to include it when overriding this parameter.
+    :returns: If the sed command exit code was zero then return,
+    otherwise raise CalledProcessError.
+    """
+    expression = r's/{0}/{1}/{2}'.format(before,
+                                         after, flags)
+
+    return subprocess.check_call(["sed", "-i", "-r", "-e",
+                                  expression,
+                                  os.path.expanduser(filename)])
diff --git a/hooks/charmhelpers/core/fstab.py b/hooks/charmhelpers/core/fstab.py
new file mode 100644
index 0000000..d9fa915
--- /dev/null
+++ b/hooks/charmhelpers/core/fstab.py
@@ -0,0 +1,132 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import io
+import os
+
+__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@xxxxxxxxxxxxx>'
+
+
+class Fstab(io.FileIO):
+    """This class extends file in order to implement a file reader/writer
+    for file `/etc/fstab`
+    """
+
+    class Entry(object):
+        """Entry class represents a non-comment line on the `/etc/fstab` file
+        """
+        def __init__(self, device, mountpoint, filesystem,
+                     options, d=0, p=0):
+            self.device = device
+            self.mountpoint = mountpoint
+            self.filesystem = filesystem
+
+            if not options:
+                options = "defaults"
+
+            self.options = options
+            self.d = int(d)
+            self.p = int(p)
+
+        def __eq__(self, o):
+            return str(self) == str(o)
+
+        def __str__(self):
+            return "{} {} {} {} {} {}".format(self.device,
+                                              self.mountpoint,
+                                              self.filesystem,
+                                              self.options,
+                                              self.d,
+                                              self.p)
+
+    DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
+
+    def __init__(self, path=None):
+        if path:
+            self._path = path
+        else:
+            self._path = self.DEFAULT_PATH
+        super(Fstab, self).__init__(self._path, 'rb+')
+
+    def _hydrate_entry(self, line):
+        # NOTE: use split with no arguments to split on any
+        #       whitespace including tabs
+        return Fstab.Entry(*filter(
+            lambda x: x not in ('', None),
+            line.strip("\n").split()))
+
+    @property
+    def entries(self):
+        self.seek(0)
+        for line in self.readlines():
+            line = line.decode('us-ascii')
+            try:
+                if line.strip() and not line.strip().startswith("#"):
+                    yield self._hydrate_entry(line)
+            except ValueError:
+                pass
+
+    def get_entry_by_attr(self, attr, value):
+        for entry in self.entries:
+            e_attr = getattr(entry, attr)
+            if e_attr == value:
+                return entry
+        return None
+
+    def add_entry(self, entry):
+        if self.get_entry_by_attr('device', entry.device):
+            return False
+
+        self.write((str(entry) + '\n').encode('us-ascii'))
+        self.truncate()
+        return entry
+
+    def remove_entry(self, entry):
+        self.seek(0)
+
+        lines = [l.decode('us-ascii') for l in self.readlines()]
+
+        found = False
+        for index, line in enumerate(lines):
+            if line.strip() and not line.strip().startswith("#"):
+                if self._hydrate_entry(line) == entry:
+                    found = True
+                    break
+
+        if not found:
+            return False
+
+        lines.remove(line)
+
+        self.seek(0)
+        self.write(''.join(lines).encode('us-ascii'))
+        self.truncate()
+        return True
+
+    @classmethod
+    def remove_by_mountpoint(cls, mountpoint, path=None):
+        fstab = cls(path=path)
+        entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
+        if entry:
+            return fstab.remove_entry(entry)
+        return False
+
+    @classmethod
+    def add(cls, device, mountpoint, filesystem, options=None, path=None):
+        return cls(path=path).add_entry(Fstab.Entry(device,
+                                                    mountpoint, filesystem,
+                                                    options=options))
diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py
index 505c202..12f37b2 100644
--- a/hooks/charmhelpers/core/hookenv.py
+++ b/hooks/charmhelpers/core/hookenv.py
@@ -1,22 +1,49 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
 "Interactions with the Juju environment"
 # Copyright 2013 Canonical Ltd.
 #
 # Authors:
 #  Charm Helpers Developers <juju@xxxxxxxxxxxxxxxx>
 
+from __future__ import print_function
+import copy
+from distutils.version import LooseVersion
+from functools import wraps
+import glob
 import os
 import json
 import yaml
 import subprocess
 import sys
-import UserDict
+import errno
+import tempfile
 from subprocess import CalledProcessError
 
+import six
+if not six.PY3:
+    from UserDict import UserDict
+else:
+    from collections import UserDict
+
 CRITICAL = "CRITICAL"
 ERROR = "ERROR"
 WARNING = "WARNING"
 INFO = "INFO"
 DEBUG = "DEBUG"
+TRACE = "TRACE"
 MARKER = object()
 
 cache = {}
@@ -25,7 +52,7 @@ cache = {}
 def cached(func):
     """Cache return values for multiple executions of func + args
 
-    For example:
+    For example::
 
         @cached
         def unit_get(attribute):
@@ -35,15 +62,18 @@ def cached(func):
 
     will cache the result of unit_get + 'test' for future calls.
     """
+    @wraps(func)
     def wrapper(*args, **kwargs):
         global cache
         key = str((func, args, kwargs))
         try:
             return cache[key]
         except KeyError:
-            res = func(*args, **kwargs)
-            cache[key] = res
-            return res
+            pass  # Drop out of the exception handler scope.
+        res = func(*args, **kwargs)
+        cache[key] = res
+        return res
+    wrapper._wrapped = func
     return wrapper
 
 
@@ -63,16 +93,29 @@ def log(message, level=None):
     command = ['juju-log']
     if level:
         command += ['-l', level]
+    if not isinstance(message, six.string_types):
+        message = repr(message)
     command += [message]
-    subprocess.call(command)
+    # Missing juju-log should not cause failures in unit tests
+    # Send log output to stderr
+    try:
+        subprocess.call(command)
+    except OSError as e:
+        if e.errno == errno.ENOENT:
+            if level:
+                message = "{}: {}".format(level, message)
+            message = "juju-log: {}".format(message)
+            print(message, file=sys.stderr)
+        else:
+            raise
 
 
-class Serializable(UserDict.IterableUserDict):
+class Serializable(UserDict):
     """Wrapper, an object that can be serialized to yaml or json"""
 
     def __init__(self, obj):
         # wrap the object
-        UserDict.IterableUserDict.__init__(self)
+        UserDict.__init__(self)
         self.data = obj
 
     def __getattr__(self, attr):
@@ -130,9 +173,19 @@ def relation_type():
     return os.environ.get('JUJU_RELATION', None)
 
 
-def relation_id():
-    """The relation ID for the current relation hook"""
-    return os.environ.get('JUJU_RELATION_ID', None)
+@cached
+def relation_id(relation_name=None, service_or_unit=None):
+    """The relation ID for the current or a specified relation"""
+    if not relation_name and not service_or_unit:
+        return os.environ.get('JUJU_RELATION_ID', None)
+    elif relation_name and service_or_unit:
+        service_name = service_or_unit.split('/')[0]
+        for relid in relation_ids(relation_name):
+            remote_service = remote_service_name(relid)
+            if remote_service == service_name:
+                return relid
+    else:
+        raise ValueError('Must specify neither or both of relation_name and service_or_unit')
 
 
 def local_unit():
@@ -142,7 +195,7 @@ def local_unit():
 
 def remote_unit():
     """The remote unit for the current relation hook"""
-    return os.environ['JUJU_REMOTE_UNIT']
+    return os.environ.get('JUJU_REMOTE_UNIT', None)
 
 
 def service_name():
@@ -150,9 +203,149 @@ def service_name():
     return local_unit().split('/')[0]
 
 
+def principal_unit():
+    """Returns the principal unit of this unit, otherwise None"""
+    # Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT
+    principal_unit = os.environ.get('JUJU_PRINCIPAL_UNIT', None)
+    # If it's empty, then this unit is the principal
+    if principal_unit == '':
+        return os.environ['JUJU_UNIT_NAME']
+    elif principal_unit is not None:
+        return principal_unit
+    # For Juju 2.1 and below, let's try work out the principle unit by
+    # the various charms' metadata.yaml.
+    for reltype in relation_types():
+        for rid in relation_ids(reltype):
+            for unit in related_units(rid):
+                md = _metadata_unit(unit)
+                subordinate = md.pop('subordinate', None)
+                if not subordinate:
+                    return unit
+    return None
+
+
+@cached
+def remote_service_name(relid=None):
+    """The remote service name for a given relation-id (or the current relation)"""
+    if relid is None:
+        unit = remote_unit()
+    else:
+        units = related_units(relid)
+        unit = units[0] if units else None
+    return unit.split('/')[0] if unit else None
+
+
 def hook_name():
     """The name of the currently executing hook"""
-    return os.path.basename(sys.argv[0])
+    return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0]))
+
+
+class Config(dict):
+    """A dictionary representation of the charm's config.yaml, with some
+    extra features:
+
+    - See which values in the dictionary have changed since the previous hook.
+    - For values that have changed, see what the previous value was.
+    - Store arbitrary data for use in a later hook.
+
+    NOTE: Do not instantiate this object directly - instead call
+    ``hookenv.config()``, which will return an instance of :class:`Config`.
+
+    Example usage::
+
+        >>> # inside a hook
+        >>> from charmhelpers.core import hookenv
+        >>> config = hookenv.config()
+        >>> config['foo']
+        'bar'
+        >>> # store a new key/value for later use
+        >>> config['mykey'] = 'myval'
+
+
+        >>> # user runs `juju set mycharm foo=baz`
+        >>> # now we're inside subsequent config-changed hook
+        >>> config = hookenv.config()
+        >>> config['foo']
+        'baz'
+        >>> # test to see if this val has changed since last hook
+        >>> config.changed('foo')
+        True
+        >>> # what was the previous value?
+        >>> config.previous('foo')
+        'bar'
+        >>> # keys/values that we add are preserved across hooks
+        >>> config['mykey']
+        'myval'
+
+    """
+    CONFIG_FILE_NAME = '.juju-persistent-config'
+
+    def __init__(self, *args, **kw):
+        super(Config, self).__init__(*args, **kw)
+        self.implicit_save = True
+        self._prev_dict = None
+        self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
+        if os.path.exists(self.path):
+            self.load_previous()
+        atexit(self._implicit_save)
+
+    def load_previous(self, path=None):
+        """Load previous copy of config from disk.
+
+        In normal usage you don't need to call this method directly - it
+        is called automatically at object initialization.
+
+        :param path:
+
+            File path from which to load the previous config. If `None`,
+            config is loaded from the default location. If `path` is
+            specified, subsequent `save()` calls will write to the same
+            path.
+
+        """
+        self.path = path or self.path
+        with open(self.path) as f:
+            self._prev_dict = json.load(f)
+        for k, v in copy.deepcopy(self._prev_dict).items():
+            if k not in self:
+                self[k] = v
+
+    def changed(self, key):
+        """Return True if the current value for this key is different from
+        the previous value.
+
+        """
+        if self._prev_dict is None:
+            return True
+        return self.previous(key) != self.get(key)
+
+    def previous(self, key):
+        """Return previous value for this key, or None if there
+        is no previous value.
+
+        """
+        if self._prev_dict:
+            return self._prev_dict.get(key)
+        return None
+
+    def save(self):
+        """Save this config to disk.
+
+        If the charm is using the :mod:`Services Framework <services.base>`
+        or :meth:'@hook <Hooks.hook>' decorator, this
+        is called automatically at the end of successful hook execution.
+        Otherwise, it should be called directly by user code.
+
+        To disable automatic saves, set ``implicit_save=False`` on this
+        instance.
+
+        """
+        with open(self.path, 'w') as f:
+            json.dump(self, f)
+
+    def _implicit_save(self):
+        if self.implicit_save:
+            self.save()
 
 
 @cached
@@ -161,9 +354,15 @@ def config(scope=None):
     config_cmd_line = ['config-get']
     if scope is not None:
         config_cmd_line.append(scope)
+    else:
+        config_cmd_line.append('--all')
     config_cmd_line.append('--format=json')
     try:
-        return json.loads(subprocess.check_output(config_cmd_line))
+        config_data = json.loads(
+            subprocess.check_output(config_cmd_line).decode('UTF-8'))
+        if scope is not None:
+            return config_data
+        return Config(config_data)
     except ValueError:
         return None
 
@@ -179,30 +378,62 @@ def relation_get(attribute=None, unit=None, rid=None):
     if unit:
         _args.append(unit)
     try:
-        return json.loads(subprocess.check_output(_args))
+        return json.loads(subprocess.check_output(_args).decode('UTF-8'))
     except ValueError:
         return None
-    except CalledProcessError, e:
+    except CalledProcessError as e:
         if e.returncode == 2:
             return None
         raise
 
 
-def relation_set(relation_id=None, relation_settings={}, **kwargs):
+def relation_set(relation_id=None, relation_settings=None, **kwargs):
     """Set relation information for the current unit"""
+    relation_settings = relation_settings if relation_settings else {}
     relation_cmd_line = ['relation-set']
+    accepts_file = "--file" in subprocess.check_output(
+        relation_cmd_line + ["--help"], universal_newlines=True)
     if relation_id is not None:
         relation_cmd_line.extend(('-r', relation_id))
-    for k, v in (relation_settings.items() + kwargs.items()):
-        if v is None:
-            relation_cmd_line.append('{}='.format(k))
-        else:
-            relation_cmd_line.append('{}={}'.format(k, v))
-    subprocess.check_call(relation_cmd_line)
+    settings = relation_settings.copy()
+    settings.update(kwargs)
+    for key, value in settings.items():
+        # Force value to be a string: it always should, but some call
+        # sites pass in things like dicts or numbers.
+        if value is not None:
+            settings[key] = "{}".format(value)
+    if accepts_file:
+        # --file was introduced in Juju 1.23.2. Use it by default if
+        # available, since otherwise we'll break if the relation data is
+        # too big. Ideally we should tell relation-set to read the data from
+        # stdin, but that feature is broken in 1.23.2: Bug #1454678.
+        with tempfile.NamedTemporaryFile(delete=False) as settings_file:
+            settings_file.write(yaml.safe_dump(settings).encode("utf-8"))
+        subprocess.check_call(
+            relation_cmd_line + ["--file", settings_file.name])
+        os.remove(settings_file.name)
+    else:
+        for key, value in settings.items():
+            if value is None:
+                relation_cmd_line.append('{}='.format(key))
+            else:
+                relation_cmd_line.append('{}={}'.format(key, value))
+        subprocess.check_call(relation_cmd_line)
     # Flush cache of any relation-gets for local unit
     flush(local_unit())
 
 
+def relation_clear(r_id=None):
+    ''' Clears any relation data already set on relation r_id '''
+    settings = relation_get(rid=r_id,
+                            unit=local_unit())
+    for setting in settings:
+        if setting not in ['public-address', 'private-address']:
+            settings[setting] = None
+    relation_set(relation_id=r_id,
+                 **settings)
+
+
 @cached
 def relation_ids(reltype=None):
     """A list of relation_ids"""
@@ -210,7 +441,8 @@ def relation_ids(reltype=None):
     relid_cmd_line = ['relation-ids', '--format=json']
     if reltype is not None:
         relid_cmd_line.append(reltype)
-        return json.loads(subprocess.check_output(relid_cmd_line)) or []
+        return json.loads(
+            subprocess.check_output(relid_cmd_line).decode('UTF-8')) or []
     return []
 
 
@@ -221,7 +453,8 @@ def related_units(relid=None):
     units_cmd_line = ['relation-list', '--format=json']
     if relid is not None:
         units_cmd_line.extend(('-r', relid))
-    return json.loads(subprocess.check_output(units_cmd_line)) or []
+    return json.loads(
+        subprocess.check_output(units_cmd_line).decode('UTF-8')) or []
 
 
 @cached
@@ -261,21 +494,116 @@ def relations_of_type(reltype=None):
 
 
 @cached
+def metadata():
+    """Get the current charm metadata.yaml contents as a python object"""
+    with open(os.path.join(charm_dir(), 'metadata.yaml')) as md:
+        return yaml.safe_load(md)
+
+
+def _metadata_unit(unit):
+    """Given the name of a unit (e.g. apache2/0), get the unit charm's
+    metadata.yaml. Very similar to metadata() but allows us to inspect
+    other units. Unit needs to be co-located, such as a subordinate or
+    principal/primary.
+
+    :returns: metadata.yaml as a python object.
+
+    """
+    basedir = os.sep.join(charm_dir().split(os.sep)[:-2])
+    unitdir = 'unit-{}'.format(unit.replace(os.sep, '-'))
+    with open(os.path.join(basedir, unitdir, 'charm', 'metadata.yaml')) as md:
+        return yaml.safe_load(md)
+
+
+@cached
 def relation_types():
     """Get a list of relation types supported by this charm"""
-    charmdir = os.environ.get('CHARM_DIR', '')
-    mdf = open(os.path.join(charmdir, 'metadata.yaml'))
-    md = yaml.safe_load(mdf)
     rel_types = []
+    md = metadata()
     for key in ('provides', 'requires', 'peers'):
         section = md.get(key)
         if section:
             rel_types.extend(section.keys())
-    mdf.close()
     return rel_types
 
 
 @cached
+def peer_relation_id():
+    '''Get the peers relation id if a peers relation has been joined, else None.'''
+    md = metadata()
+    section = md.get('peers')
+    if section:
+        for key in section:
+            relids = relation_ids(key)
+            if relids:
+                return relids[0]
+    return None
+
+
+@cached
+def relation_to_interface(relation_name):
+    """
+    Given the name of a relation, return the interface that relation uses.
+
+    :returns: The interface name, or ``None``.
+    """
+    return relation_to_role_and_interface(relation_name)[1]
+
+
+@cached
+def relation_to_role_and_interface(relation_name):
+    """
+    Given the name of a relation, return the role and the name of the interface
+    that relation uses (where role is one of ``provides``, ``requires``, or ``peers``).
+
+    :returns: A tuple containing ``(role, interface)``, or ``(None, None)``.
+    """
+    _metadata = metadata()
+    for role in ('provides', 'requires', 'peers'):
+        interface = _metadata.get(role, {}).get(relation_name, {}).get('interface')
+        if interface:
+            return role, interface
+    return None, None
+
+
+@cached
+def role_and_interface_to_relations(role, interface_name):
+    """
+    Given a role and interface name, return a list of relation names for the
+    current charm that use that interface under that role (where role is one
+    of ``provides``, ``requires``, or ``peers``).
+
+    :returns: A list of relation names.
+    """
+    _metadata = metadata()
+    results = []
+    for relation_name, relation in _metadata.get(role, {}).items():
+        if relation['interface'] == interface_name:
+            results.append(relation_name)
+    return results
+
+
+@cached
+def interface_to_relations(interface_name):
+    """
+    Given an interface, return a list of relation names for the current
+    charm that use that interface.
+
+    :returns: A list of relation names.
+    """
+    results = []
+    for role in ('provides', 'requires', 'peers'):
+        results.extend(role_and_interface_to_relations(role, interface_name))
+    return results
+
+
+@cached
+def charm_name():
+    """Get the name of the current charm as is specified on metadata.yaml"""
+    return metadata().get('name')
+
+
+@cached
 def relations():
     """Get a nested dictionary of relation data for all related units"""
     rels = {}
@@ -325,21 +653,72 @@ def close_port(port, protocol="TCP"):
     subprocess.check_call(_args)
 
 
+def open_ports(start, end, protocol="TCP"):
+    """Opens a range of service network ports"""
+    _args = ['open-port']
+    _args.append('{}-{}/{}'.format(start, end, protocol))
+    subprocess.check_call(_args)
+
+
+def close_ports(start, end, protocol="TCP"):
+    """Close a range of service network ports"""
+    _args = ['close-port']
+    _args.append('{}-{}/{}'.format(start, end, protocol))
+    subprocess.check_call(_args)
+
+
 @cached
 def unit_get(attribute):
     """Get the unit ID for the remote unit"""
     _args = ['unit-get', '--format=json', attribute]
     try:
-        return json.loads(subprocess.check_output(_args))
+        return json.loads(subprocess.check_output(_args).decode('UTF-8'))
     except ValueError:
         return None
 
 
+def unit_public_ip():
+    """Get this unit's public IP address"""
+    return unit_get('public-address')
+
+
 def unit_private_ip():
     """Get this unit's private IP address"""
     return unit_get('private-address')
 
 
+@cached
+def storage_get(attribute=None, storage_id=None):
+    """Get storage attributes"""
+    _args = ['storage-get', '--format=json']
+    if storage_id:
+        _args.extend(('-s', storage_id))
+    if attribute:
+        _args.append(attribute)
+    try:
+        return json.loads(subprocess.check_output(_args).decode('UTF-8'))
+    except ValueError:
+        return None
+
+
+@cached
+def storage_list(storage_name=None):
+    """List the storage IDs for the unit"""
+    _args = ['storage-list', '--format=json']
+    if storage_name:
+        _args.append(storage_name)
+    try:
+        return json.loads(subprocess.check_output(_args).decode('UTF-8'))
+    except ValueError:
+        return None
+    except OSError as e:
+        import errno
+        if e.errno == errno.ENOENT:
+            # storage-list does not exist
+            return []
+        raise
+
+
 class UnregisteredHookError(Exception):
     """Raised when an undefined hook is called"""
     pass
@@ -348,37 +727,50 @@ class UnregisteredHookError(Exception):
 class Hooks(object):
     """A convenient handler for hook functions.
 
-    Example:
+    Example::
+
         hooks = Hooks()
 
         # register a hook, taking its name from the function name
         @hooks.hook()
         def install():
-            ...
+            pass  # your code here
 
         # register a hook, providing a custom hook name
         @hooks.hook("config-changed")
         def config_changed():
-            ...
+            pass  # your code here
 
         if __name__ == "__main__":
             # execute a hook based on the name the program is called by
             hooks.execute(sys.argv)
     """
 
-    def __init__(self):
+    def __init__(self, config_save=None):
         super(Hooks, self).__init__()
         self._hooks = {}
 
+        # For unknown reasons, we allow the Hooks constructor to override
+        # config().implicit_save.
+        if config_save is not None:
+            config().implicit_save = config_save
+
     def register(self, name, function):
         """Register a hook"""
         self._hooks[name] = function
 
     def execute(self, args):
         """Execute a registered hook based on args[0]"""
+        _run_atstart()
         hook_name = os.path.basename(args[0])
         if hook_name in self._hooks:
-            self._hooks[hook_name]()
+            try:
+                self._hooks[hook_name]()
+            except SystemExit as x:
+                if x.code is None or x.code == 0:
+                    _run_atexit()
+                raise
+            _run_atexit()
         else:
             raise UnregisteredHookError(hook_name)
 
@@ -398,4 +790,319 @@ class Hooks(object):
 
 def charm_dir():
     """Return the root directory of the current charm"""
+    d = os.environ.get('JUJU_CHARM_DIR')
+    if d is not None:
+        return d
     return os.environ.get('CHARM_DIR')
+
+
+@cached
+def action_get(key=None):
+    """Gets the value of an action parameter, or all key/value param pairs"""
+    cmd = ['action-get']
+    if key is not None:
+        cmd.append(key)
+    cmd.append('--format=json')
+    action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8'))
+    return action_data
+
+
+def action_set(values):
+    """Sets the values to be returned after the action finishes"""
+    cmd = ['action-set']
+    for k, v in list(values.items()):
+        cmd.append('{}={}'.format(k, v))
+    subprocess.check_call(cmd)
+
+
+def action_fail(message):
+    """Sets the action status to failed and sets the error message.
+
+    The results set by action_set are preserved."""
+    subprocess.check_call(['action-fail', message])
+
+
+def action_name():
+    """Get the name of the currently executing action."""
+    return os.environ.get('JUJU_ACTION_NAME')
+
+
+def action_uuid():
+    """Get the UUID of the currently executing action."""
+    return os.environ.get('JUJU_ACTION_UUID')
+
+
+def action_tag():
+    """Get the tag for the currently executing action."""
+    return os.environ.get('JUJU_ACTION_TAG')
+
+
+def status_set(workload_state, message):
+    """Set the workload state with a message
+
+    Use status-set to set the workload state with a message which is visible
+    to the user via juju status. If the status-set command is not found then
+    assume this is juju < 1.23 and juju-log the message unstead.
+
+    workload_state -- valid juju workload state.
+    message        -- status update message
+    """
+    valid_states = ['maintenance', 'blocked', 'waiting', 'active']
+    if workload_state not in valid_states:
+        raise ValueError(
+            '{!r} is not a valid workload state'.format(workload_state)
+        )
+    cmd = ['status-set', workload_state, message]
+    try:
+        ret = subprocess.call(cmd)
+        if ret == 0:
+            return
+    except OSError as e:
+        if e.errno != errno.ENOENT:
+            raise
+    log_message = 'status-set failed: {} {}'.format(workload_state,
+                                                    message)
+    log(log_message, level='INFO')
+
+
+def status_get():
+    """Retrieve the previously set juju workload state and message
+
+    If the status-get command is not found then assume this is juju < 1.23 and
+    return 'unknown', ""
+
+    """
+    cmd = ['status-get', "--format=json", "--include-data"]
+    try:
+        raw_status = subprocess.check_output(cmd)
+    except OSError as e:
+        if e.errno == errno.ENOENT:
+            return ('unknown', "")
+        else:
+            raise
+    else:
+        status = json.loads(raw_status.decode("UTF-8"))
+        return (status["status"], status["message"])
+
+
+def translate_exc(from_exc, to_exc):
+    def inner_translate_exc1(f):
+        @wraps(f)
+        def inner_translate_exc2(*args, **kwargs):
+            try:
+                return f(*args, **kwargs)
+            except from_exc:
+                raise to_exc
+
+        return inner_translate_exc2
+
+    return inner_translate_exc1
+
+
+def application_version_set(version):
+    """Charm authors may trigger this command from any hook to output what
+    version of the application is running. This could be a package version,
+    for instance postgres version 9.5. It could also be a build number or
+    version control revision identifier, for instance git sha 6fb7ba68. """
+
+    cmd = ['application-version-set']
+    cmd.append(version)
+    try:
+        subprocess.check_call(cmd)
+    except OSError:
+        log("Application Version: {}".format(version))
+
+
+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
+def is_leader():
+    """Does the current unit hold the juju leadership
+
+    Uses juju to determine whether the current unit is the leader of its peers
+    """
+    cmd = ['is-leader', '--format=json']
+    return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
+
+
+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
+def leader_get(attribute=None):
+    """Juju leader get value(s)"""
+    cmd = ['leader-get', '--format=json'] + [attribute or '-']
+    return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
+
+
+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
+def leader_set(settings=None, **kwargs):
+    """Juju leader set value(s)"""
+    # Don't log secrets.
+    # log("Juju leader-set '%s'" % (settings), level=DEBUG)
+    cmd = ['leader-set']
+    settings = settings or {}
+    settings.update(kwargs)
+    for k, v in settings.items():
+        if v is None:
+            cmd.append('{}='.format(k))
+        else:
+            cmd.append('{}={}'.format(k, v))
+    subprocess.check_call(cmd)
+
+
+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
+def payload_register(ptype, klass, pid):
+    """ is used while a hook is running to let Juju know that a
+        payload has been started."""
+    cmd = ['payload-register']
+    for x in [ptype, klass, pid]:
+        cmd.append(x)
+    subprocess.check_call(cmd)
+
+
+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
+def payload_unregister(klass, pid):
+    """ is used while a hook is running to let Juju know
+    that a payload has been manually stopped. The <class> and <id> provided
+    must match a payload that has been previously registered with juju using
+    payload-register."""
+    cmd = ['payload-unregister']
+    for x in [klass, pid]:
+        cmd.append(x)
+    subprocess.check_call(cmd)
+
+
+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
+def payload_status_set(klass, pid, status):
+    """is used to update the current status of a registered payload.
+    The <class> and <id> provided must match a payload that has been previously
+    registered with juju using payload-register. The <status> must be one of the
+    follow: starting, started, stopping, stopped"""
+    cmd = ['payload-status-set']
+    for x in [klass, pid, status]:
+        cmd.append(x)
+    subprocess.check_call(cmd)
+
+
+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
+def resource_get(name):
+    """used to fetch the resource path of the given name.
+
+    <name> must match a name of defined resource in metadata.yaml
+
+    returns either a path or False if resource not available
+    """
+    if not name:
+        return False
+
+    cmd = ['resource-get', name]
+    try:
+        return subprocess.check_output(cmd).decode('UTF-8')
+    except subprocess.CalledProcessError:
+        return False
+
+
+@cached
+def juju_version():
+    """Full version string (eg. '1.23.3.1-trusty-amd64')"""
+    # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1
+    jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0]
+    return subprocess.check_output([jujud, 'version'],
+                                   universal_newlines=True).strip()
+
+
+@cached
+def has_juju_version(minimum_version):
+    """Return True if the Juju version is at least the provided version"""
+    return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
+
+
+_atexit = []
+_atstart = []
+
+
+def atstart(callback, *args, **kwargs):
+    '''Schedule a callback to run before the main hook.
+
+    Callbacks are run in the order they were added.
+
+    This is useful for modules and classes to perform initialization
+    and inject behavior. In particular:
+
+        - Run common code before all of your hooks, such as logging
+          the hook name or interesting relation data.
+        - Defer object or module initialization that requires a hook
+          context until we know there actually is a hook context,
+          making testing easier.
+        - Rather than requiring charm authors to include boilerplate to
+          invoke your helper's behavior, have it run automatically if
+          your object is instantiated or module imported.
+
+    This is not at all useful after your hook framework as been launched.
+    '''
+    global _atstart
+    _atstart.append((callback, args, kwargs))
+
+
+def atexit(callback, *args, **kwargs):
+    '''Schedule a callback to run on successful hook completion.
+
+    Callbacks are run in the reverse order that they were added.'''
+    _atexit.append((callback, args, kwargs))
+
+
+def _run_atstart():
+    '''Hook frameworks must invoke this before running the main hook body.'''
+    global _atstart
+    for callback, args, kwargs in _atstart:
+        callback(*args, **kwargs)
+    del _atstart[:]
+
+
+def _run_atexit():
+    '''Hook frameworks must invoke this after the main hook body has
+    successfully completed. Do not invoke it if the hook fails.'''
+    global _atexit
+    for callback, args, kwargs in reversed(_atexit):
+        callback(*args, **kwargs)
+    del _atexit[:]
+
+
+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
+def network_get_primary_address(binding):
+    '''
+    Retrieve the primary network address for a named binding
+
+    :param binding: string. The name of a relation of extra-binding
+    :return: string. The primary IP address for the named binding
+    :raise: NotImplementedError if run on Juju < 2.0
+    '''
+    cmd = ['network-get', '--primary-address', binding]
+    return subprocess.check_output(cmd).decode('UTF-8').strip()
+
+
+def add_metric(*args, **kwargs):
+    """Add metric values. Values may be expressed with keyword arguments. For
+    metric names containing dashes, these may be expressed as one or more
+    'key=value' positional arguments. May only be called from the collect-metrics
+    hook."""
+    _args = ['add-metric']
+    _kvpairs = []
+    _kvpairs.extend(args)
+    _kvpairs.extend(['{}={}'.format(k, v) for k, v in kwargs.items()])
+    _args.extend(sorted(_kvpairs))
+    try:
+        subprocess.check_call(_args)
+        return
+    except EnvironmentError as e:
+        if e.errno != errno.ENOENT:
+            raise
+    log_message = 'add-metric failed: {}'.format(' '.join(_kvpairs))
+    log(log_message, level='INFO')
+
+
+def meter_status():
+    """Get the meter status, if running in the meter-status-changed hook."""
+    return os.environ.get('JUJU_METER_STATUS')
+
+
+def meter_info():
+    """Get the meter status information, if running in the meter-status-changed
+    hook."""
+    return os.environ.get('JUJU_METER_INFO')
diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py
index cfd2684..5656e2f 100644
--- a/hooks/charmhelpers/core/host.py
+++ b/hooks/charmhelpers/core/host.py
@@ -1,3 +1,17 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
 """Tools for working with the host system"""
 # Copyright 2012 Canonical Ltd.
 #
@@ -6,68 +20,332 @@
 #  Matthew Wedgwood <matthew.wedgwood@xxxxxxxxxxxxx>
 
 import os
+import re
 import pwd
+import glob
 import grp
 import random
 import string
 import subprocess
 import hashlib
+import functools
+import itertools
+import six
 
+from contextlib import contextmanager
 from collections import OrderedDict
+from .hookenv import log, DEBUG
+from .fstab import Fstab
+from charmhelpers.osplatform import get_platform
+
+__platform__ = get_platform()
+if __platform__ == "ubuntu":
+    from charmhelpers.core.host_factory.ubuntu import (
+        service_available,
+        add_new_group,
+        lsb_release,
+        cmp_pkgrevno,
+        CompareHostReleases,
+    )  # flake8: noqa -- ignore F401 for this import
+elif __platform__ == "centos":
+    from charmhelpers.core.host_factory.centos import (
+        service_available,
+        add_new_group,
+        lsb_release,
+        cmp_pkgrevno,
+        CompareHostReleases,
+    )  # flake8: noqa -- ignore F401 for this import
+
+UPDATEDB_PATH = '/etc/updatedb.conf'
+
+def service_start(service_name, **kwargs):
+    """Start a system service.
+
+    The specified service name is managed via the system level init system.
+    Some init systems (e.g. upstart) require that additional arguments be
+    provided in order to directly control service instances whereas other init
+    systems allow for addressing instances of a service directly by name (e.g.
+    systemd).
+
+    The kwargs allow for the additional parameters to be passed to underlying
+    init systems for those systems which require/allow for them. For example,
+    the ceph-osd upstart script requires the id parameter to be passed along
+    in order to identify which running daemon should be reloaded. The follow-
+    ing example stops the ceph-osd service for instance id=4:
+
+    service_stop('ceph-osd', id=4)
+
+    :param service_name: the name of the service to stop
+    :param **kwargs: additional parameters to pass to the init system when
+                     managing services. These will be passed as key=value
+                     parameters to the init system's commandline. kwargs
+                     are ignored for systemd enabled systems.
+    """
+    return service('start', service_name, **kwargs)
 
-from hookenv import log
 
+def service_stop(service_name, **kwargs):
+    """Stop a system service.
+
+    The specified service name is managed via the system level init system.
+    Some init systems (e.g. upstart) require that additional arguments be
+    provided in order to directly control service instances whereas other init
+    systems allow for addressing instances of a service directly by name (e.g.
+    systemd).
+
+    The kwargs allow for the additional parameters to be passed to underlying
+    init systems for those systems which require/allow for them. For example,
+    the ceph-osd upstart script requires the id parameter to be passed along
+    in order to identify which running daemon should be reloaded. The follow-
+    ing example stops the ceph-osd service for instance id=4:
+
+    service_stop('ceph-osd', id=4)
+
+    :param service_name: the name of the service to stop
+    :param **kwargs: additional parameters to pass to the init system when
+                     managing services. These will be passed as key=value
+                     parameters to the init system's commandline. kwargs
+                     are ignored for systemd enabled systems.
+    """
+    return service('stop', service_name, **kwargs)
 
-def service_start(service_name):
-    """Start a system service"""
-    return service('start', service_name)
 
+def service_restart(service_name, **kwargs):
+    """Restart a system service.
 
-def service_stop(service_name):
-    """Stop a system service"""
-    return service('stop', service_name)
+    The specified service name is managed via the system level init system.
+    Some init systems (e.g. upstart) require that additional arguments be
+    provided in order to directly control service instances whereas other init
+    systems allow for addressing instances of a service directly by name (e.g.
+    systemd).
 
+    The kwargs allow for the additional parameters to be passed to underlying
+    init systems for those systems which require/allow for them. For example,
+    the ceph-osd upstart script requires the id parameter to be passed along
+    in order to identify which running daemon should be restarted. The follow-
+    ing example restarts the ceph-osd service for instance id=4:
 
-def service_restart(service_name):
-    """Restart a system service"""
+    service_restart('ceph-osd', id=4)
+
+    :param service_name: the name of the service to restart
+    :param **kwargs: additional parameters to pass to the init system when
+                     managing services. These will be passed as key=value
+                     parameters to the  init system's commandline. kwargs
+                     are ignored for init systems not allowing additional
+                     parameters via the commandline (systemd).
+    """
     return service('restart', service_name)
 
 
-def service_reload(service_name, restart_on_failure=False):
-    """Reload a system service, optionally falling back to restart if reload fails"""
-    service_result = service('reload', service_name)
+def service_reload(service_name, restart_on_failure=False, **kwargs):
+    """Reload a system service, optionally falling back to restart if
+    reload fails.
+
+    The specified service name is managed via the system level init system.
+    Some init systems (e.g. upstart) require that additional arguments be
+    provided in order to directly control service instances whereas other init
+    systems allow for addressing instances of a service directly by name (e.g.
+    systemd).
+
+    The kwargs allow for the additional parameters to be passed to underlying
+    init systems for those systems which require/allow for them. For example,
+    the ceph-osd upstart script requires the id parameter to be passed along
+    in order to identify which running daemon should be reloaded. The follow-
+    ing example restarts the ceph-osd service for instance id=4:
+
+    service_reload('ceph-osd', id=4)
+
+    :param service_name: the name of the service to reload
+    :param restart_on_failure: boolean indicating whether to fallback to a
+                               restart if the reload fails.
+    :param **kwargs: additional parameters to pass to the init system when
+                     managing services. These will be passed as key=value
+                     parameters to the  init system's commandline. kwargs
+                     are ignored for init systems not allowing additional
+                     parameters via the commandline (systemd).
+    """
+    service_result = service('reload', service_name, **kwargs)
     if not service_result and restart_on_failure:
-        service_result = service('restart', service_name)
+        service_result = service('restart', service_name, **kwargs)
     return service_result
 
 
-def service(action, service_name):
-    """Control a system service"""
-    cmd = ['service', service_name, action]
+def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d",
+                  **kwargs):
+    """Pause a system service.
+
+    Stop it, and prevent it from starting again at boot.
+
+    :param service_name: the name of the service to pause
+    :param init_dir: path to the upstart init directory
+    :param initd_dir: path to the sysv init directory
+    :param **kwargs: additional parameters to pass to the init system when
+                     managing services. These will be passed as key=value
+                     parameters to the init system's commandline. kwargs
+                     are ignored for init systems which do not support
+                     key=value arguments via the commandline.
+    """
+    stopped = True
+    if service_running(service_name, **kwargs):
+        stopped = service_stop(service_name, **kwargs)
+    upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
+    sysv_file = os.path.join(initd_dir, service_name)
+    if init_is_systemd():
+        service('disable', service_name)
+        service('mask', service_name)
+    elif os.path.exists(upstart_file):
+        override_path = os.path.join(
+            init_dir, '{}.override'.format(service_name))
+        with open(override_path, 'w') as fh:
+            fh.write("manual\n")
+    elif os.path.exists(sysv_file):
+        subprocess.check_call(["update-rc.d", service_name, "disable"])
+    else:
+        raise ValueError(
+            "Unable to detect {0} as SystemD, Upstart {1} or"
+            " SysV {2}".format(
+                service_name, upstart_file, sysv_file))
+    return stopped
+
+
+def service_resume(service_name, init_dir="/etc/init",
+                   initd_dir="/etc/init.d", **kwargs):
+    """Resume a system service.
+
+    Reenable starting again at boot. Start the service.
+
+    :param service_name: the name of the service to resume
+    :param init_dir: the path to the init dir
+    :param initd dir: the path to the initd dir
+    :param **kwargs: additional parameters to pass to the init system when
+                     managing services. These will be passed as key=value
+                     parameters to the init system's commandline. kwargs
+                     are ignored for systemd enabled systems.
+    """
+    upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
+    sysv_file = os.path.join(initd_dir, service_name)
+    if init_is_systemd():
+        service('unmask', service_name)
+        service('enable', service_name)
+    elif os.path.exists(upstart_file):
+        override_path = os.path.join(
+            init_dir, '{}.override'.format(service_name))
+        if os.path.exists(override_path):
+            os.unlink(override_path)
+    elif os.path.exists(sysv_file):
+        subprocess.check_call(["update-rc.d", service_name, "enable"])
+    else:
+        raise ValueError(
+            "Unable to detect {0} as SystemD, Upstart {1} or"
+            " SysV {2}".format(
+                service_name, upstart_file, sysv_file))
+    started = service_running(service_name, **kwargs)
+
+    if not started:
+        started = service_start(service_name, **kwargs)
+    return started
+
+
+def service(action, service_name, **kwargs):
+    """Control a system service.
+
+    :param action: the action to take on the service
+    :param service_name: the name of the service to perform th action on
+    :param **kwargs: additional params to be passed to the service command in
+                    the form of key=value.
+    """
+    if init_is_systemd():
+        cmd = ['systemctl', action, service_name]
+    else:
+        cmd = ['service', service_name, action]
+        for key, value in six.iteritems(kwargs):
+            parameter = '%s=%s' % (key, value)
+            cmd.append(parameter)
     return subprocess.call(cmd) == 0
 
 
-def service_running(service):
-    """Determine whether a system service is running"""
-    try:
-        output = subprocess.check_output(['service', service, 'status'])
-    except subprocess.CalledProcessError:
-        return False
+_UPSTART_CONF = "/etc/init/{}.conf"
+_INIT_D_CONF = "/etc/init.d/{}"
+
+
+def service_running(service_name, **kwargs):
+    """Determine whether a system service is running.
+
+    :param service_name: the name of the service
+    :param **kwargs: additional args to pass to the service command. This is
+                     used to pass additional key=value arguments to the
+                     service command line for managing specific instance
+                     units (e.g. service ceph-osd status id=2). The kwargs
+                     are ignored in systemd services.
+    """
+    if init_is_systemd():
+        return service('is-active', service_name)
     else:
-        if ("start/running" in output or "is running" in output):
-            return True
-        else:
-            return False
+        if os.path.exists(_UPSTART_CONF.format(service_name)):
+            try:
+                cmd = ['status', service_name]
+                for key, value in six.iteritems(kwargs):
+                    parameter = '%s=%s' % (key, value)
+                    cmd.append(parameter)
+                output = subprocess.check_output(cmd,
+                    stderr=subprocess.STDOUT).decode('UTF-8')
+            except subprocess.CalledProcessError:
+                return False
+            else:
+                # This works for upstart scripts where the 'service' command
+                # returns a consistent string to represent running
+                # 'start/running'
+                if ("start/running" in output or
+                        "is running" in output or
+                        "up and running" in output):
+                    return True
+        elif os.path.exists(_INIT_D_CONF.format(service_name)):
+            # Check System V scripts init script return codes
+            return service('status', service_name)
+        return False
+
 
+SYSTEMD_SYSTEM = '/run/systemd/system'
 
-def adduser(username, password=None, shell='/bin/bash', system_user=False):
-    """Add a user to the system"""
+
+def init_is_systemd():
+    """Return True if the host system uses systemd, False otherwise."""
+    if lsb_release()['DISTRIB_CODENAME'] == 'trusty':
+        return False
+    return os.path.isdir(SYSTEMD_SYSTEM)
+
+
+def adduser(username, password=None, shell='/bin/bash',
+            system_user=False, primary_group=None,
+            secondary_groups=None, uid=None, home_dir=None):
+    """Add a user to the system.
+
+    Will log but otherwise succeed if the user already exists.
+
+    :param str username: Username to create
+    :param str password: Password for user; if ``None``, create a system user
+    :param str shell: The default shell for the user
+    :param bool system_user: Whether to create a login or system user
+    :param str primary_group: Primary group for user; defaults to username
+    :param list secondary_groups: Optional list of additional groups
+    :param int uid: UID for user being created
+    :param str home_dir: Home directory for user
+
+    :returns: The password database entry struct, as returned by `pwd.getpwnam`
+    """
     try:
         user_info = pwd.getpwnam(username)
         log('user {0} already exists!'.format(username))
+        if uid:
+            user_info = pwd.getpwuid(int(uid))
+            log('user with uid {0} already exists!'.format(uid))
     except KeyError:
         log('creating user {0}'.format(username))
         cmd = ['useradd']
+        if uid:
+            cmd.extend(['--uid', str(uid)])
+        if home_dir:
+            cmd.extend(['--home', str(home_dir)])
         if system_user or password is None:
             cmd.append('--system')
         else:
@@ -76,32 +354,104 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False):
                 '--shell', shell,
                 '--password', password,
             ])
+        if not primary_group:
+            try:
+                grp.getgrnam(username)
+                primary_group = username  # avoid "group exists" error
+            except KeyError:
+                pass
+        if primary_group:
+            cmd.extend(['-g', primary_group])
+        if secondary_groups:
+            cmd.extend(['-G', ','.join(secondary_groups)])
         cmd.append(username)
         subprocess.check_call(cmd)
         user_info = pwd.getpwnam(username)
     return user_info
 
 
+def user_exists(username):
+    """Check if a user exists"""
+    try:
+        pwd.getpwnam(username)
+        user_exists = True
+    except KeyError:
+        user_exists = False
+    return user_exists
+
+
+def uid_exists(uid):
+    """Check if a uid exists"""
+    try:
+        pwd.getpwuid(uid)
+        uid_exists = True
+    except KeyError:
+        uid_exists = False
+    return uid_exists
+
+
+def group_exists(groupname):
+    """Check if a group exists"""
+    try:
+        grp.getgrnam(groupname)
+        group_exists = True
+    except KeyError:
+        group_exists = False
+    return group_exists
+
+
+def gid_exists(gid):
+    """Check if a gid exists"""
+    try:
+        grp.getgrgid(gid)
+        gid_exists = True
+    except KeyError:
+        gid_exists = False
+    return gid_exists
+
+
+def add_group(group_name, system_group=False, gid=None):
+    """Add a group to the system
+
+    Will log but otherwise succeed if the group already exists.
+
+    :param str group_name: group to create
+    :param bool system_group: Create system group
+    :param int gid: GID for user being created
+
+    :returns: The password database entry struct, as returned by `grp.getgrnam`
+    """
+    try:
+        group_info = grp.getgrnam(group_name)
+        log('group {0} already exists!'.format(group_name))
+        if gid:
+            group_info = grp.getgrgid(gid)
+            log('group with gid {0} already exists!'.format(gid))
+    except KeyError:
+        log('creating group {0}'.format(group_name))
+        add_new_group(group_name, system_group, gid)
+        group_info = grp.getgrnam(group_name)
+    return group_info
+
+
 def add_user_to_group(username, group):
     """Add a user to a group"""
-    cmd = [
-        'gpasswd', '-a',
-        username,
-        group
-    ]
+    cmd = ['gpasswd', '-a', username, group]
     log("Adding user {} to group {}".format(username, group))
     subprocess.check_call(cmd)
 
 
-def rsync(from_path, to_path, flags='-r', options=None):
+def rsync(from_path, to_path, flags='-r', options=None, timeout=None):
     """Replicate the contents of a path"""
     options = options or ['--delete', '--executability']
     cmd = ['/usr/bin/rsync', flags]
+    if timeout:
+        cmd = ['timeout', str(timeout)] + cmd
     cmd.extend(options)
     cmd.append(from_path)
     cmd.append(to_path)
     log(" ".join(cmd))
-    return subprocess.check_output(cmd).strip()
+    return subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8').strip()
 
 
 def symlink(source, destination):
@@ -116,34 +466,71 @@ def symlink(source, destination):
     subprocess.check_call(cmd)
 
 
-def mkdir(path, owner='root', group='root', perms=0555, force=False):
+def mkdir(path, owner='root', group='root', perms=0o555, force=False):
     """Create a directory"""
     log("Making dir {} {}:{} {:o}".format(path, owner, group,
                                           perms))
     uid = pwd.getpwnam(owner).pw_uid
     gid = grp.getgrnam(group).gr_gid
     realpath = os.path.abspath(path)
-    if os.path.exists(realpath):
-        if force and not os.path.isdir(realpath):
+    path_exists = os.path.exists(realpath)
+    if path_exists and force:
+        if not os.path.isdir(realpath):
             log("Removing non-directory file {} prior to mkdir()".format(path))
             os.unlink(realpath)
-    else:
+            os.makedirs(realpath, perms)
+    elif not path_exists:
         os.makedirs(realpath, perms)
     os.chown(realpath, uid, gid)
+    os.chmod(realpath, perms)
 
 
-def write_file(path, content, owner='root', group='root', perms=0444):
-    """Create or overwrite a file with the contents of a string"""
-    log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
+def write_file(path, content, owner='root', group='root', perms=0o444):
+    """Create or overwrite a file with the contents of a byte string."""
     uid = pwd.getpwnam(owner).pw_uid
     gid = grp.getgrnam(group).gr_gid
-    with open(path, 'w') as target:
-        os.fchown(target.fileno(), uid, gid)
-        os.fchmod(target.fileno(), perms)
-        target.write(content)
-
-
-def mount(device, mountpoint, options=None, persist=False):
+    # lets see if we can grab the file and compare the context, to avoid doing
+    # a write.
+    existing_content = None
+    existing_uid, existing_gid = None, None
+    try:
+        with open(path, 'rb') as target:
+            existing_content = target.read()
+        stat = os.stat(path)
+        existing_uid, existing_gid = stat.st_uid, stat.st_gid
+    except:
+        pass
+    if content != existing_content:
+        log("Writing file {} {}:{} {:o}".format(path, owner, group, perms),
+            level=DEBUG)
+        with open(path, 'wb') as target:
+            os.fchown(target.fileno(), uid, gid)
+            os.fchmod(target.fileno(), perms)
+            target.write(content)
+        return
+    # the contents were the same, but we might still need to change the
+    # ownership.
+    if existing_uid != uid:
+        log("Changing uid on already existing content: {} -> {}"
+            .format(existing_uid, uid), level=DEBUG)
+        os.chown(path, uid, -1)
+    if existing_gid != gid:
+        log("Changing gid on already existing content: {} -> {}"
+            .format(existing_gid, gid), level=DEBUG)
+        os.chown(path, -1, gid)
+
+
+def fstab_remove(mp):
+    """Remove the given mountpoint entry from /etc/fstab"""
+    return Fstab.remove_by_mountpoint(mp)
+
+
+def fstab_add(dev, mp, fs, options=None):
+    """Adds the given device entry to the /etc/fstab file"""
+    return Fstab.add(dev, mp, fs, options=options)
+
+
+def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
     """Mount a filesystem at a particular mountpoint"""
     cmd_args = ['mount']
     if options is not None:
@@ -151,12 +538,12 @@ def mount(device, mountpoint, options=None, persist=False):
     cmd_args.extend([device, mountpoint])
     try:
         subprocess.check_output(cmd_args)
-    except subprocess.CalledProcessError, e:
+    except subprocess.CalledProcessError as e:
         log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
         return False
+
     if persist:
-        # TODO: update fstab
-        pass
+        return fstab_add(device, mountpoint, filesystem, options=options)
     return True
 
 
@@ -165,12 +552,12 @@ def umount(mountpoint, persist=False):
     cmd_args = ['umount', mountpoint]
     try:
         subprocess.check_output(cmd_args)
-    except subprocess.CalledProcessError, e:
+    except subprocess.CalledProcessError as e:
         log('Error unmounting {}\n{}'.format(mountpoint, e.output))
         return False
+
     if persist:
-        # TODO: update fstab
-        pass
+        return fstab_remove(mountpoint)
     return True
 
 
@@ -183,102 +570,240 @@ def mounts():
     return system_mounts
 
 
-def file_hash(path):
-    """Generate a md5 hash of the contents of 'path' or None if not found """
+def fstab_mount(mountpoint):
+    """Mount filesystem using fstab"""
+    cmd_args = ['mount', mountpoint]
+    try:
+        subprocess.check_output(cmd_args)
+    except subprocess.CalledProcessError as e:
+        log('Error unmounting {}\n{}'.format(mountpoint, e.output))
+        return False
+    return True
+
+
+def file_hash(path, hash_type='md5'):
+    """Generate a hash checksum of the contents of 'path' or None if not found.
+
+    :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
+                          such as md5, sha1, sha256, sha512, etc.
+    """
     if os.path.exists(path):
-        h = hashlib.md5()
-        with open(path, 'r') as source:
-            h.update(source.read())  # IGNORE:E1101 - it does have update
+        h = getattr(hashlib, hash_type)()
+        with open(path, 'rb') as source:
+            h.update(source.read())
         return h.hexdigest()
     else:
         return None
 
 
-def restart_on_change(restart_map, stopstart=False):
+def path_hash(path):
+    """Generate a hash checksum of all files matching 'path'. Standard
+    wildcards like '*' and '?' are supported, see documentation for the 'glob'
+    module for more information.
+
+    :return: dict: A { filename: hash } dictionary for all matched files.
+                   Empty if none found.
+    """
+    return {
+        filename: file_hash(filename)
+        for filename in glob.iglob(path)
+    }
+
+
+def check_hash(path, checksum, hash_type='md5'):
+    """Validate a file using a cryptographic checksum.
+
+    :param str checksum: Value of the checksum used to validate the file.
+    :param str hash_type: Hash algorithm used to generate `checksum`.
+        Can be any hash alrgorithm supported by :mod:`hashlib`,
+        such as md5, sha1, sha256, sha512, etc.
+    :raises ChecksumError: If the file fails the checksum
+
+    """
+    actual_checksum = file_hash(path, hash_type)
+    if checksum != actual_checksum:
+        raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
+
+
+class ChecksumError(ValueError):
+    """A class derived from Value error to indicate the checksum failed."""
+    pass
+
+
+def restart_on_change(restart_map, stopstart=False, restart_functions=None):
     """Restart services based on configuration files changing
 
-    This function is used a decorator, for example
+    This function is used a decorator, for example::
 
         @restart_on_change({
             '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
+            '/etc/apache/sites-enabled/*': [ 'apache2' ]
             })
-        def ceph_client_changed():
-            ...
+        def config_changed():
+            pass  # your code here
 
     In this example, the cinder-api and cinder-volume services
     would be restarted if /etc/ceph/ceph.conf is changed by the
-    ceph_client_changed function.
+    ceph_client_changed function. The apache2 service would be
+    restarted if any file matching the pattern got changed, created
+    or removed. Standard wildcards are supported, see documentation
+    for the 'glob' module for more information.
+
+    @param restart_map: {path_file_name: [service_name, ...]
+    @param stopstart: DEFAULT false; whether to stop, start OR restart
+    @param restart_functions: nonstandard functions to use to restart services
+                              {svc: func, ...}
+    @returns result from decorated function
     """
     def wrap(f):
-        def wrapped_f(*args):
-            checksums = {}
-            for path in restart_map:
-                checksums[path] = file_hash(path)
-            f(*args)
-            restarts = []
-            for path in restart_map:
-                if checksums[path] != file_hash(path):
-                    restarts += restart_map[path]
-            services_list = list(OrderedDict.fromkeys(restarts))
-            if not stopstart:
-                for service_name in services_list:
-                    service('restart', service_name)
-            else:
-                for action in ['stop', 'start']:
-                    for service_name in services_list:
-                        service(action, service_name)
+        @functools.wraps(f)
+        def wrapped_f(*args, **kwargs):
+            return restart_on_change_helper(
+                (lambda: f(*args, **kwargs)), restart_map, stopstart,
+                restart_functions)
         return wrapped_f
     return wrap
 
 
-def lsb_release():
-    """Return /etc/lsb-release in a dict"""
-    d = {}
-    with open('/etc/lsb-release', 'r') as lsb:
-        for l in lsb:
-            k, v = l.split('=')
-            d[k.strip()] = v.strip()
-    return d
+def restart_on_change_helper(lambda_f, restart_map, stopstart=False,
+                             restart_functions=None):
+    """Helper function to perform the restart_on_change function.
+
+    This is provided for decorators to restart services if files described
+    in the restart_map have changed after an invocation of lambda_f().
+
+    @param lambda_f: function to call.
+    @param restart_map: {file: [service, ...]}
+    @param stopstart: whether to stop, start or restart a service
+    @param restart_functions: nonstandard functions to use to restart services
+                              {svc: func, ...}
+    @returns result of lambda_f()
+    """
+    if restart_functions is None:
+        restart_functions = {}
+    checksums = {path: path_hash(path) for path in restart_map}
+    r = lambda_f()
+    # create a list of lists of the services to restart
+    restarts = [restart_map[path]
+                for path in restart_map
+                if path_hash(path) != checksums[path]]
+    # create a flat list of ordered services without duplicates from lists
+    services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts)))
+    if services_list:
+        actions = ('stop', 'start') if stopstart else ('restart',)
+        for service_name in services_list:
+            if service_name in restart_functions:
+                restart_functions[service_name](service_name)
+            else:
+                for action in actions:
+                    service(action, service_name)
+    return r
 
 
 def pwgen(length=None):
     """Generate a random pasword."""
     if length is None:
+        # A random length is ok to use a weak PRNG
         length = random.choice(range(35, 45))
     alphanumeric_chars = [
-        l for l in (string.letters + string.digits)
+        l for l in (string.ascii_letters + string.digits)
         if l not in 'l0QD1vAEIOUaeiou']
+    # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the
+    # actual password
+    random_generator = random.SystemRandom()
     random_chars = [
-        random.choice(alphanumeric_chars) for _ in range(length)]
+        random_generator.choice(alphanumeric_chars) for _ in range(length)]
     return(''.join(random_chars))
 
 
-def list_nics(nic_type):
-    '''Return a list of nics of given type(s)'''
-    if isinstance(nic_type, basestring):
+def is_phy_iface(interface):
+    """Returns True if interface is not virtual, otherwise False."""
+    if interface:
+        sys_net = '/sys/class/net'
+        if os.path.isdir(sys_net):
+            for iface in glob.glob(os.path.join(sys_net, '*')):
+                if '/virtual/' in os.path.realpath(iface):
+                    continue
+
+                if interface == os.path.basename(iface):
+                    return True
+
+    return False
+
+
+def get_bond_master(interface):
+    """Returns bond master if interface is bond slave otherwise None.
+
+    NOTE: the provided interface is expected to be physical
+    """
+    if interface:
+        iface_path = '/sys/class/net/%s' % (interface)
+        if os.path.exists(iface_path):
+            if '/virtual/' in os.path.realpath(iface_path):
+                return None
+
+            master = os.path.join(iface_path, 'master')
+            if os.path.exists(master):
+                master = os.path.realpath(master)
+                # make sure it is a bond master
+                if os.path.exists(os.path.join(master, 'bonding')):
+                    return os.path.basename(master)
+
+    return None
+
+
+def list_nics(nic_type=None):
+    """Return a list of nics of given type(s)"""
+    if isinstance(nic_type, six.string_types):
         int_types = [nic_type]
     else:
         int_types = nic_type
+
     interfaces = []
-    for int_type in int_types:
-        cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
-        ip_output = subprocess.check_output(cmd).split('\n')
-        ip_output = (line for line in ip_output if line)
+    if nic_type:
+        for int_type in int_types:
+            cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
+            ip_output = subprocess.check_output(cmd).decode('UTF-8')
+            ip_output = ip_output.split('\n')
+            ip_output = (line for line in ip_output if line)
+            for line in ip_output:
+                if line.split()[1].startswith(int_type):
+                    matched = re.search('.*: (' + int_type +
+                                        r'[0-9]+\.[0-9]+)@.*', line)
+                    if matched:
+                        iface = matched.groups()[0]
+                    else:
+                        iface = line.split()[1].replace(":", "")
+
+                    if iface not in interfaces:
+                        interfaces.append(iface)
+    else:
+        cmd = ['ip', 'a']
+        ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
+        ip_output = (line.strip() for line in ip_output if line)
+
+        key = re.compile('^[0-9]+:\s+(.+):')
         for line in ip_output:
-            if line.split()[1].startswith(int_type):
-                interfaces.append(line.split()[1].replace(":", ""))
+            matched = re.search(key, line)
+            if matched:
+                iface = matched.group(1)
+                iface = iface.partition("@")[0]
+                if iface not in interfaces:
+                    interfaces.append(iface)
+
     return interfaces
 
 
 def set_nic_mtu(nic, mtu):
-    '''Set MTU on a network interface'''
+    """Set the Maximum Transmission Unit (MTU) on a network interface."""
     cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
     subprocess.check_call(cmd)
 
 
 def get_nic_mtu(nic):
+    """Return the Maximum Transmission Unit (MTU) for a network interface."""
     cmd = ['ip', 'addr', 'show', nic]
-    ip_output = subprocess.check_output(cmd).split('\n')
+    ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
     mtu = ""
     for line in ip_output:
         words = line.split()
@@ -288,10 +813,136 @@ def get_nic_mtu(nic):
 
 
 def get_nic_hwaddr(nic):
+    """Return the Media Access Control (MAC) for a network interface."""
     cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
-    ip_output = subprocess.check_output(cmd)
+    ip_output = subprocess.check_output(cmd).decode('UTF-8')
     hwaddr = ""
     words = ip_output.split()
     if 'link/ether' in words:
         hwaddr = words[words.index('link/ether') + 1]
     return hwaddr
+
+
+@contextmanager
+def chdir(directory):
+    """Change the current working directory to a different directory for a code
+    block and return the previous directory after the block exits. Useful to
+    run commands from a specificed directory.
+
+    :param str directory: The directory path to change to for this context.
+    """
+    cur = os.getcwd()
+    try:
+        yield os.chdir(directory)
+    finally:
+        os.chdir(cur)
+
+
+def chownr(path, owner, group, follow_links=True, chowntopdir=False):
+    """Recursively change user and group ownership of files and directories
+    in given path. Doesn't chown path itself by default, only its children.
+
+    :param str path: The string path to start changing ownership.
+    :param str owner: The owner string to use when looking up the uid.
+    :param str group: The group string to use when looking up the gid.
+    :param bool follow_links: Also follow and chown links if True
+    :param bool chowntopdir: Also chown path itself if True
+    """
+    uid = pwd.getpwnam(owner).pw_uid
+    gid = grp.getgrnam(group).gr_gid
+    if follow_links:
+        chown = os.chown
+    else:
+        chown = os.lchown
+
+    if chowntopdir:
+        broken_symlink = os.path.lexists(path) and not os.path.exists(path)
+        if not broken_symlink:
+            chown(path, uid, gid)
+    for root, dirs, files in os.walk(path, followlinks=follow_links):
+        for name in dirs + files:
+            full = os.path.join(root, name)
+            broken_symlink = os.path.lexists(full) and not os.path.exists(full)
+            if not broken_symlink:
+                chown(full, uid, gid)
+
+
+def lchownr(path, owner, group):
+    """Recursively change user and group ownership of files and directories
+    in a given path, not following symbolic links. See the documentation for
+    'os.lchown' for more information.
+
+    :param str path: The string path to start changing ownership.
+    :param str owner: The owner string to use when looking up the uid.
+    :param str group: The group string to use when looking up the gid.
+    """
+    chownr(path, owner, group, follow_links=False)
+
+
+def owner(path):
+    """Returns a tuple containing the username & groupname owning the path.
+
+    :param str path: the string path to retrieve the ownership
+    :return tuple(str, str): A (username, groupname) tuple containing the
+                             name of the user and group owning the path.
+    :raises OSError: if the specified path does not exist
+    """
+    stat = os.stat(path)
+    username = pwd.getpwuid(stat.st_uid)[0]
+    groupname = grp.getgrgid(stat.st_gid)[0]
+    return username, groupname
+
+
+def get_total_ram():
+    """The total amount of system RAM in bytes.
+
+    This is what is reported by the OS, and may be overcommitted when
+    there are multiple containers hosted on the same machine.
+    """
+    with open('/proc/meminfo', 'r') as f:
+        for line in f.readlines():
+            if line:
+                key, value, unit = line.split()
+                if key == 'MemTotal:':
+                    assert unit == 'kB', 'Unknown unit'
+                    return int(value) * 1024  # Classic, not KiB.
+        raise NotImplementedError()
+
+
+UPSTART_CONTAINER_TYPE = '/run/container_type'
+
+
+def is_container():
+    """Determine whether unit is running in a container
+
+    @return: boolean indicating if unit is in a container
+    """
+    if init_is_systemd():
+        # Detect using systemd-detect-virt
+        return subprocess.call(['systemd-detect-virt',
+                                '--container']) == 0
+    else:
+        # Detect using upstart container file marker
+        return os.path.exists(UPSTART_CONTAINER_TYPE)
+
+
+def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH):
+    with open(updatedb_path, 'r+') as f_id:
+        updatedb_text = f_id.read()
+        output = updatedb(updatedb_text, path)
+        f_id.seek(0)
+        f_id.write(output)
+        f_id.truncate()
+
+
+def updatedb(updatedb_text, new_path):
+    lines = [line for line in updatedb_text.split("\n")]
+    for i, line in enumerate(lines):
+        if line.startswith("PRUNEPATHS="):
+            paths_line = line.split("=")[1].replace('"', '')
+            paths = paths_line.split(" ")
+            if new_path not in paths:
+                paths.append(new_path)
+                lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths))
+    output = "\n".join(lines)
+    return output
diff --git a/hooks/charmhelpers/core/host_factory/__init__.py b/hooks/charmhelpers/core/host_factory/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/hooks/charmhelpers/core/host_factory/__init__.py
diff --git a/hooks/charmhelpers/core/host_factory/centos.py b/hooks/charmhelpers/core/host_factory/centos.py
new file mode 100644
index 0000000..7781a39
--- /dev/null
+++ b/hooks/charmhelpers/core/host_factory/centos.py
@@ -0,0 +1,72 @@
+import subprocess
+import yum
+import os
+
+from charmhelpers.core.strutils import BasicStringComparator
+
+
+class CompareHostReleases(BasicStringComparator):
+    """Provide comparisons of Host releases.
+
+    Use in the form of
+
+    if CompareHostReleases(release) > 'trusty':
+        # do something with mitaka
+    """
+
+    def __init__(self, item):
+        raise NotImplementedError(
+            "CompareHostReleases() is not implemented for CentOS")
+
+
+def service_available(service_name):
+    # """Determine whether a system service is available."""
+    if os.path.isdir('/run/systemd/system'):
+        cmd = ['systemctl', 'is-enabled', service_name]
+    else:
+        cmd = ['service', service_name, 'is-enabled']
+    return subprocess.call(cmd) == 0
+
+
+def add_new_group(group_name, system_group=False, gid=None):
+    cmd = ['groupadd']
+    if gid:
+        cmd.extend(['--gid', str(gid)])
+    if system_group:
+        cmd.append('-r')
+    cmd.append(group_name)
+    subprocess.check_call(cmd)
+
+
+def lsb_release():
+    """Return /etc/os-release in a dict."""
+    d = {}
+    with open('/etc/os-release', 'r') as lsb:
+        for l in lsb:
+            s = l.split('=')
+            if len(s) != 2:
+                continue
+            d[s[0].strip()] = s[1].strip()
+    return d
+
+
+def cmp_pkgrevno(package, revno, pkgcache=None):
+    """Compare supplied revno with the revno of the installed package.
+
+    *  1 => Installed revno is greater than supplied arg
+    *  0 => Installed revno is the same as supplied arg
+    * -1 => Installed revno is less than supplied arg
+
+    This function imports YumBase function if the pkgcache argument
+    is None.
+    """
+    if not pkgcache:
+        y = yum.YumBase()
+        packages = y.doPackageLists()
+        pkgcache = {i.Name: i.version for i in packages['installed']}
+    pkg = pkgcache[package]
+    if pkg > revno:
+        return 1
+    if pkg < revno:
+        return -1
+    return 0
diff --git a/hooks/charmhelpers/core/host_factory/ubuntu.py b/hooks/charmhelpers/core/host_factory/ubuntu.py
new file mode 100644
index 0000000..d8dc378
--- /dev/null
+++ b/hooks/charmhelpers/core/host_factory/ubuntu.py
@@ -0,0 +1,89 @@
+import subprocess
+
+from charmhelpers.core.strutils import BasicStringComparator
+
+
+UBUNTU_RELEASES = (
+    'lucid',
+    'maverick',
+    'natty',
+    'oneiric',
+    'precise',
+    'quantal',
+    'raring',
+    'saucy',
+    'trusty',
+    'utopic',
+    'vivid',
+    'wily',
+    'xenial',
+    'yakkety',
+    'zesty',
+    'artful',
+)
+
+
+class CompareHostReleases(BasicStringComparator):
+    """Provide comparisons of Ubuntu releases.
+
+    Use in the form of
+
+    if CompareHostReleases(release) > 'trusty':
+        # do something with mitaka
+    """
+    _list = UBUNTU_RELEASES
+
+
+def service_available(service_name):
+    """Determine whether a system service is available"""
+    try:
+        subprocess.check_output(
+            ['service', service_name, 'status'],
+            stderr=subprocess.STDOUT).decode('UTF-8')
+    except subprocess.CalledProcessError as e:
+        return b'unrecognized service' not in e.output
+    else:
+        return True
+
+
+def add_new_group(group_name, system_group=False, gid=None):
+    cmd = ['addgroup']
+    if gid:
+        cmd.extend(['--gid', str(gid)])
+    if system_group:
+        cmd.append('--system')
+    else:
+        cmd.extend([
+            '--group',
+        ])
+    cmd.append(group_name)
+    subprocess.check_call(cmd)
+
+
+def lsb_release():
+    """Return /etc/lsb-release in a dict"""
+    d = {}
+    with open('/etc/lsb-release', 'r') as lsb:
+        for l in lsb:
+            k, v = l.split('=')
+            d[k.strip()] = v.strip()
+    return d
+
+
+def cmp_pkgrevno(package, revno, pkgcache=None):
+    """Compare supplied revno with the revno of the installed package.
+
+    *  1 => Installed revno is greater than supplied arg
+    *  0 => Installed revno is the same as supplied arg
+    * -1 => Installed revno is less than supplied arg
+
+    This function imports apt_cache function from charmhelpers.fetch if
+    the pkgcache argument is None. Be sure to add charmhelpers.fetch if
+    you call this function, or pass an apt_pkg.Cache() instance.
+    """
+    import apt_pkg
+    if not pkgcache:
+        from charmhelpers.fetch import apt_cache
+        pkgcache = apt_cache()
+    pkg = pkgcache[package]
+    return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
diff --git a/hooks/charmhelpers/core/hugepage.py b/hooks/charmhelpers/core/hugepage.py
new file mode 100644
index 0000000..54b5b5e
--- /dev/null
+++ b/hooks/charmhelpers/core/hugepage.py
@@ -0,0 +1,69 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import yaml
+from charmhelpers.core import fstab
+from charmhelpers.core import sysctl
+from charmhelpers.core.host import (
+    add_group,
+    add_user_to_group,
+    fstab_mount,
+    mkdir,
+)
+from charmhelpers.core.strutils import bytes_from_string
+from subprocess import check_output
+
+
+def hugepage_support(user, group='hugetlb', nr_hugepages=256,
+                     max_map_count=65536, mnt_point='/run/hugepages/kvm',
+                     pagesize='2MB', mount=True, set_shmmax=False):
+    """Enable hugepages on system.
+
+    Args:
+    user (str)  -- Username to allow access to hugepages to
+    group (str) -- Group name to own hugepages
+    nr_hugepages (int) -- Number of pages to reserve
+    max_map_count (int) -- Number of Virtual Memory Areas a process can own
+    mnt_point (str) -- Directory to mount hugepages on
+    pagesize (str) -- Size of hugepages
+    mount (bool) -- Whether to Mount hugepages
+    """
+    group_info = add_group(group)
+    gid = group_info.gr_gid
+    add_user_to_group(user, group)
+    if max_map_count < 2 * nr_hugepages:
+        max_map_count = 2 * nr_hugepages
+    sysctl_settings = {
+        'vm.nr_hugepages': nr_hugepages,
+        'vm.max_map_count': max_map_count,
+        'vm.hugetlb_shm_group': gid,
+    }
+    if set_shmmax:
+        shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax']))
+        shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages
+        if shmmax_minsize > shmmax_current:
+            sysctl_settings['kernel.shmmax'] = shmmax_minsize
+    sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf')
+    mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False)
+    lfstab = fstab.Fstab()
+    fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point)
+    if fstab_entry:
+        lfstab.remove_entry(fstab_entry)
+    entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs',
+                         'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0)
+    lfstab.add_entry(entry)
+    if mount:
+        fstab_mount(mnt_point)
diff --git a/hooks/charmhelpers/core/kernel.py b/hooks/charmhelpers/core/kernel.py
new file mode 100644
index 0000000..2d40452
--- /dev/null
+++ b/hooks/charmhelpers/core/kernel.py
@@ -0,0 +1,72 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import re
+import subprocess
+
+from charmhelpers.osplatform import get_platform
+from charmhelpers.core.hookenv import (
+    log,
+    INFO
+)
+
+__platform__ = get_platform()
+if __platform__ == "ubuntu":
+    from charmhelpers.core.kernel_factory.ubuntu import (
+        persistent_modprobe,
+        update_initramfs,
+    )  # flake8: noqa -- ignore F401 for this import
+elif __platform__ == "centos":
+    from charmhelpers.core.kernel_factory.centos import (
+        persistent_modprobe,
+        update_initramfs,
+    )  # flake8: noqa -- ignore F401 for this import
+
+__author__ = "Jorge Niedbalski <jorge.niedbalski@xxxxxxxxxxxxx>"
+
+
+def modprobe(module, persist=True):
+    """Load a kernel module and configure for auto-load on reboot."""
+    cmd = ['modprobe', module]
+
+    log('Loading kernel module %s' % module, level=INFO)
+
+    subprocess.check_call(cmd)
+    if persist:
+        persistent_modprobe(module)
+
+
+def rmmod(module, force=False):
+    """Remove a module from the linux kernel"""
+    cmd = ['rmmod']
+    if force:
+        cmd.append('-f')
+    cmd.append(module)
+    log('Removing kernel module %s' % module, level=INFO)
+    return subprocess.check_call(cmd)
+
+
+def lsmod():
+    """Shows what kernel modules are currently loaded"""
+    return subprocess.check_output(['lsmod'],
+                                   universal_newlines=True)
+
+
+def is_module_loaded(module):
+    """Checks if a kernel module is already loaded"""
+    matches = re.findall('^%s[ ]+' % module, lsmod(), re.M)
+    return len(matches) > 0
diff --git a/hooks/charmhelpers/core/kernel_factory/__init__.py b/hooks/charmhelpers/core/kernel_factory/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/hooks/charmhelpers/core/kernel_factory/__init__.py
diff --git a/hooks/charmhelpers/core/kernel_factory/centos.py b/hooks/charmhelpers/core/kernel_factory/centos.py
new file mode 100644
index 0000000..1c402c1
--- /dev/null
+++ b/hooks/charmhelpers/core/kernel_factory/centos.py
@@ -0,0 +1,17 @@
+import subprocess
+import os
+
+
+def persistent_modprobe(module):
+    """Load a kernel module and configure for auto-load on reboot."""
+    if not os.path.exists('/etc/rc.modules'):
+        open('/etc/rc.modules', 'a')
+        os.chmod('/etc/rc.modules', 111)
+    with open('/etc/rc.modules', 'r+') as modules:
+        if module not in modules.read():
+            modules.write('modprobe %s\n' % module)
+
+
+def update_initramfs(version='all'):
+    """Updates an initramfs image."""
+    return subprocess.check_call(["dracut", "-f", version])
diff --git a/hooks/charmhelpers/core/kernel_factory/ubuntu.py b/hooks/charmhelpers/core/kernel_factory/ubuntu.py
new file mode 100644
index 0000000..3de372f
--- /dev/null
+++ b/hooks/charmhelpers/core/kernel_factory/ubuntu.py
@@ -0,0 +1,13 @@
+import subprocess
+
+
+def persistent_modprobe(module):
+    """Load a kernel module and configure for auto-load on reboot."""
+    with open('/etc/modules', 'r+') as modules:
+        if module not in modules.read():
+            modules.write(module + "\n")
+
+
+def update_initramfs(version='all'):
+    """Updates an initramfs image."""
+    return subprocess.check_call(["update-initramfs", "-k", version, "-u"])
diff --git a/hooks/charmhelpers/core/services/__init__.py b/hooks/charmhelpers/core/services/__init__.py
new file mode 100644
index 0000000..61fd074
--- /dev/null
+++ b/hooks/charmhelpers/core/services/__init__.py
@@ -0,0 +1,16 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .base import *  # NOQA
+from .helpers import *  # NOQA
diff --git a/hooks/charmhelpers/core/services/base.py b/hooks/charmhelpers/core/services/base.py
new file mode 100644
index 0000000..ca9dc99
--- /dev/null
+++ b/hooks/charmhelpers/core/services/base.py
@@ -0,0 +1,351 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import json
+from inspect import getargspec
+from collections import Iterable, OrderedDict
+
+from charmhelpers.core import host
+from charmhelpers.core import hookenv
+
+
+__all__ = ['ServiceManager', 'ManagerCallback',
+           'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
+           'service_restart', 'service_stop']
+
+
+class ServiceManager(object):
+    def __init__(self, services=None):
+        """
+        Register a list of services, given their definitions.
+
+        Service definitions are dicts in the following formats (all keys except
+        'service' are optional)::
+
+            {
+                "service": <service name>,
+                "required_data": <list of required data contexts>,
+                "provided_data": <list of provided data contexts>,
+                "data_ready": <one or more callbacks>,
+                "data_lost": <one or more callbacks>,
+                "start": <one or more callbacks>,
+                "stop": <one or more callbacks>,
+                "ports": <list of ports to manage>,
+            }
+
+        The 'required_data' list should contain dicts of required data (or
+        dependency managers that act like dicts and know how to collect the data).
+        Only when all items in the 'required_data' list are populated are the list
+        of 'data_ready' and 'start' callbacks executed.  See `is_ready()` for more
+        information.
+
+        The 'provided_data' list should contain relation data providers, most likely
+        a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`,
+        that will indicate a set of data to set on a given relation.
+
+        The 'data_ready' value should be either a single callback, or a list of
+        callbacks, to be called when all items in 'required_data' pass `is_ready()`.
+        Each callback will be called with the service name as the only parameter.
+        After all of the 'data_ready' callbacks are called, the 'start' callbacks
+        are fired.
+
+        The 'data_lost' value should be either a single callback, or a list of
+        callbacks, to be called when a 'required_data' item no longer passes
+        `is_ready()`.  Each callback will be called with the service name as the
+        only parameter.  After all of the 'data_lost' callbacks are called,
+        the 'stop' callbacks are fired.
+
+        The 'start' value should be either a single callback, or a list of
+        callbacks, to be called when starting the service, after the 'data_ready'
+        callbacks are complete.  Each callback will be called with the service
+        name as the only parameter.  This defaults to
+        `[host.service_start, services.open_ports]`.
+
+        The 'stop' value should be either a single callback, or a list of
+        callbacks, to be called when stopping the service.  If the service is
+        being stopped because it no longer has all of its 'required_data', this
+        will be called after all of the 'data_lost' callbacks are complete.
+        Each callback will be called with the service name as the only parameter.
+        This defaults to `[services.close_ports, host.service_stop]`.
+
+        The 'ports' value should be a list of ports to manage.  The default
+        'start' handler will open the ports after the service is started,
+        and the default 'stop' handler will close the ports prior to stopping
+        the service.
+
+
+        Examples:
+
+        The following registers an Upstart service called bingod that depends on
+        a mongodb relation and which runs a custom `db_migrate` function prior to
+        restarting the service, and a Runit service called spadesd::
+
+            manager = services.ServiceManager([
+                {
+                    'service': 'bingod',
+                    'ports': [80, 443],
+                    'required_data': [MongoRelation(), config(), {'my': 'data'}],
+                    'data_ready': [
+                        services.template(source='bingod.conf'),
+                        services.template(source='bingod.ini',
+                                          target='/etc/bingod.ini',
+                                          owner='bingo', perms=0400),
+                    ],
+                },
+                {
+                    'service': 'spadesd',
+                    'data_ready': services.template(source='spadesd_run.j2',
+                                                    target='/etc/sv/spadesd/run',
+                                                    perms=0555),
+                    'start': runit_start,
+                    'stop': runit_stop,
+                },
+            ])
+            manager.manage()
+        """
+        self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
+        self._ready = None
+        self.services = OrderedDict()
+        for service in services or []:
+            service_name = service['service']
+            self.services[service_name] = service
+
+    def manage(self):
+        """
+        Handle the current hook by doing The Right Thing with the registered services.
+        """
+        hookenv._run_atstart()
+        try:
+            hook_name = hookenv.hook_name()
+            if hook_name == 'stop':
+                self.stop_services()
+            else:
+                self.reconfigure_services()
+                self.provide_data()
+        except SystemExit as x:
+            if x.code is None or x.code == 0:
+                hookenv._run_atexit()
+        hookenv._run_atexit()
+
+    def provide_data(self):
+        """
+        Set the relation data for each provider in the ``provided_data`` list.
+
+        A provider must have a `name` attribute, which indicates which relation
+        to set data on, and a `provide_data()` method, which returns a dict of
+        data to set.
+
+        The `provide_data()` method can optionally accept two parameters:
+
+          * ``remote_service`` The name of the remote service that the data will
+            be provided to.  The `provide_data()` method will be called once
+            for each connected service (not unit).  This allows the method to
+            tailor its data to the given service.
+          * ``service_ready`` Whether or not the service definition had all of
+            its requirements met, and thus the ``data_ready`` callbacks run.
+
+        Note that the ``provided_data`` methods are now called **after** the
+        ``data_ready`` callbacks are run.  This gives the ``data_ready`` callbacks
+        a chance to generate any data necessary for the providing to the remote
+        services.
+        """
+        for service_name, service in self.services.items():
+            service_ready = self.is_ready(service_name)
+            for provider in service.get('provided_data', []):
+                for relid in hookenv.relation_ids(provider.name):
+                    units = hookenv.related_units(relid)
+                    if not units:
+                        continue
+                    remote_service = units[0].split('/')[0]
+                    argspec = getargspec(provider.provide_data)
+                    if len(argspec.args) > 1:
+                        data = provider.provide_data(remote_service, service_ready)
+                    else:
+                        data = provider.provide_data()
+                    if data:
+                        hookenv.relation_set(relid, data)
+
+    def reconfigure_services(self, *service_names):
+        """
+        Update all files for one or more registered services, and,
+        if ready, optionally restart them.
+
+        If no service names are given, reconfigures all registered services.
+        """
+        for service_name in service_names or self.services.keys():
+            if self.is_ready(service_name):
+                self.fire_event('data_ready', service_name)
+                self.fire_event('start', service_name, default=[
+                    service_restart,
+                    manage_ports])
+                self.save_ready(service_name)
+            else:
+                if self.was_ready(service_name):
+                    self.fire_event('data_lost', service_name)
+                self.fire_event('stop', service_name, default=[
+                    manage_ports,
+                    service_stop])
+                self.save_lost(service_name)
+
+    def stop_services(self, *service_names):
+        """
+        Stop one or more registered services, by name.
+
+        If no service names are given, stops all registered services.
+        """
+        for service_name in service_names or self.services.keys():
+            self.fire_event('stop', service_name, default=[
+                manage_ports,
+                service_stop])
+
+    def get_service(self, service_name):
+        """
+        Given the name of a registered service, return its service definition.
+        """
+        service = self.services.get(service_name)
+        if not service:
+            raise KeyError('Service not registered: %s' % service_name)
+        return service
+
+    def fire_event(self, event_name, service_name, default=None):
+        """
+        Fire a data_ready, data_lost, start, or stop event on a given service.
+        """
+        service = self.get_service(service_name)
+        callbacks = service.get(event_name, default)
+        if not callbacks:
+            return
+        if not isinstance(callbacks, Iterable):
+            callbacks = [callbacks]
+        for callback in callbacks:
+            if isinstance(callback, ManagerCallback):
+                callback(self, service_name, event_name)
+            else:
+                callback(service_name)
+
+    def is_ready(self, service_name):
+        """
+        Determine if a registered service is ready, by checking its 'required_data'.
+
+        A 'required_data' item can be any mapping type, and is considered ready
+        if `bool(item)` evaluates as True.
+        """
+        service = self.get_service(service_name)
+        reqs = service.get('required_data', [])
+        return all(bool(req) for req in reqs)
+
+    def _load_ready_file(self):
+        if self._ready is not None:
+            return
+        if os.path.exists(self._ready_file):
+            with open(self._ready_file) as fp:
+                self._ready = set(json.load(fp))
+        else:
+            self._ready = set()
+
+    def _save_ready_file(self):
+        if self._ready is None:
+            return
+        with open(self._ready_file, 'w') as fp:
+            json.dump(list(self._ready), fp)
+
+    def save_ready(self, service_name):
+        """
+        Save an indicator that the given service is now data_ready.
+        """
+        self._load_ready_file()
+        self._ready.add(service_name)
+        self._save_ready_file()
+
+    def save_lost(self, service_name):
+        """
+        Save an indicator that the given service is no longer data_ready.
+        """
+        self._load_ready_file()
+        self._ready.discard(service_name)
+        self._save_ready_file()
+
+    def was_ready(self, service_name):
+        """
+        Determine if the given service was previously data_ready.
+        """
+        self._load_ready_file()
+        return service_name in self._ready
+
+
+class ManagerCallback(object):
+    """
+    Special case of a callback that takes the `ServiceManager` instance
+    in addition to the service name.
+
+    Subclasses should implement `__call__` which should accept three parameters:
+
+        * `manager`       The `ServiceManager` instance
+        * `service_name`  The name of the service it's being triggered for
+        * `event_name`    The name of the event that this callback is handling
+    """
+    def __call__(self, manager, service_name, event_name):
+        raise NotImplementedError()
+
+
+class PortManagerCallback(ManagerCallback):
+    """
+    Callback class that will open or close ports, for use as either
+    a start or stop action.
+    """
+    def __call__(self, manager, service_name, event_name):
+        service = manager.get_service(service_name)
+        new_ports = service.get('ports', [])
+        port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
+        if os.path.exists(port_file):
+            with open(port_file) as fp:
+                old_ports = fp.read().split(',')
+            for old_port in old_ports:
+                if bool(old_port):
+                    old_port = int(old_port)
+                    if old_port not in new_ports:
+                        hookenv.close_port(old_port)
+        with open(port_file, 'w') as fp:
+            fp.write(','.join(str(port) for port in new_ports))
+        for port in new_ports:
+            if event_name == 'start':
+                hookenv.open_port(port)
+            elif event_name == 'stop':
+                hookenv.close_port(port)
+
+
+def service_stop(service_name):
+    """
+    Wrapper around host.service_stop to prevent spurious "unknown service"
+    messages in the logs.
+    """
+    if host.service_running(service_name):
+        host.service_stop(service_name)
+
+
+def service_restart(service_name):
+    """
+    Wrapper around host.service_restart to prevent spurious "unknown service"
+    messages in the logs.
+    """
+    if host.service_available(service_name):
+        if host.service_running(service_name):
+            host.service_restart(service_name)
+        else:
+            host.service_start(service_name)
+
+
+# Convenience aliases
+open_ports = close_ports = manage_ports = PortManagerCallback()
diff --git a/hooks/charmhelpers/core/services/helpers.py b/hooks/charmhelpers/core/services/helpers.py
new file mode 100644
index 0000000..3e6e30d
--- /dev/null
+++ b/hooks/charmhelpers/core/services/helpers.py
@@ -0,0 +1,290 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import yaml
+
+from charmhelpers.core import hookenv
+from charmhelpers.core import host
+from charmhelpers.core import templating
+
+from charmhelpers.core.services.base import ManagerCallback
+
+
+__all__ = ['RelationContext', 'TemplateCallback',
+           'render_template', 'template']
+
+
+class RelationContext(dict):
+    """
+    Base class for a context generator that gets relation data from juju.
+
+    Subclasses must provide the attributes `name`, which is the name of the
+    interface of interest, `interface`, which is the type of the interface of
+    interest, and `required_keys`, which is the set of keys required for the
+    relation to be considered complete.  The data for all interfaces matching
+    the `name` attribute that are complete will used to populate the dictionary
+    values (see `get_data`, below).
+
+    The generated context will be namespaced under the relation :attr:`name`,
+    to prevent potential naming conflicts.
+
+    :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
+    :param list additional_required_keys: Extend the list of :attr:`required_keys`
+    """
+    name = None
+    interface = None
+
+    def __init__(self, name=None, additional_required_keys=None):
+        if not hasattr(self, 'required_keys'):
+            self.required_keys = []
+
+        if name is not None:
+            self.name = name
+        if additional_required_keys:
+            self.required_keys.extend(additional_required_keys)
+        self.get_data()
+
+    def __bool__(self):
+        """
+        Returns True if all of the required_keys are available.
+        """
+        return self.is_ready()
+
+    __nonzero__ = __bool__
+
+    def __repr__(self):
+        return super(RelationContext, self).__repr__()
+
+    def is_ready(self):
+        """
+        Returns True if all of the `required_keys` are available from any units.
+        """
+        ready = len(self.get(self.name, [])) > 0
+        if not ready:
+            hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
+        return ready
+
+    def _is_ready(self, unit_data):
+        """
+        Helper method that tests a set of relation data and returns True if
+        all of the `required_keys` are present.
+        """
+        return set(unit_data.keys()).issuperset(set(self.required_keys))
+
+    def get_data(self):
+        """
+        Retrieve the relation data for each unit involved in a relation and,
+        if complete, store it in a list under `self[self.name]`.  This
+        is automatically called when the RelationContext is instantiated.
+
+        The units are sorted lexographically first by the service ID, then by
+        the unit ID.  Thus, if an interface has two other services, 'db:1'
+        and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
+        and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
+        set of data, the relation data for the units will be stored in the
+        order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
+
+        If you only care about a single unit on the relation, you can just
+        access it as `{{ interface[0]['key'] }}`.  However, if you can at all
+        support multiple units on a relation, you should iterate over the list,
+        like::
+
+            {% for unit in interface -%}
+                {{ unit['key'] }}{% if not loop.last %},{% endif %}
+            {%- endfor %}
+
+        Note that since all sets of relation data from all related services and
+        units are in a single list, if you need to know which service or unit a
+        set of data came from, you'll need to extend this class to preserve
+        that information.
+        """
+        if not hookenv.relation_ids(self.name):
+            return
+
+        ns = self.setdefault(self.name, [])
+        for rid in sorted(hookenv.relation_ids(self.name)):
+            for unit in sorted(hookenv.related_units(rid)):
+                reldata = hookenv.relation_get(rid=rid, unit=unit)
+                if self._is_ready(reldata):
+                    ns.append(reldata)
+
+    def provide_data(self):
+        """
+        Return data to be relation_set for this interface.
+        """
+        return {}
+
+
+class MysqlRelation(RelationContext):
+    """
+    Relation context for the `mysql` interface.
+
+    :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
+    :param list additional_required_keys: Extend the list of :attr:`required_keys`
+    """
+    name = 'db'
+    interface = 'mysql'
+
+    def __init__(self, *args, **kwargs):
+        self.required_keys = ['host', 'user', 'password', 'database']
+        RelationContext.__init__(self, *args, **kwargs)
+
+
+class HttpRelation(RelationContext):
+    """
+    Relation context for the `http` interface.
+
+    :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
+    :param list additional_required_keys: Extend the list of :attr:`required_keys`
+    """
+    name = 'website'
+    interface = 'http'
+
+    def __init__(self, *args, **kwargs):
+        self.required_keys = ['host', 'port']
+        RelationContext.__init__(self, *args, **kwargs)
+
+    def provide_data(self):
+        return {
+            'host': hookenv.unit_get('private-address'),
+            'port': 80,
+        }
+
+
+class RequiredConfig(dict):
+    """
+    Data context that loads config options with one or more mandatory options.
+
+    Once the required options have been changed from their default values, all
+    config options will be available, namespaced under `config` to prevent
+    potential naming conflicts (for example, between a config option and a
+    relation property).
+
+    :param list *args: List of options that must be changed from their default values.
+    """
+
+    def __init__(self, *args):
+        self.required_options = args
+        self['config'] = hookenv.config()
+        with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
+            self.config = yaml.load(fp).get('options', {})
+
+    def __bool__(self):
+        for option in self.required_options:
+            if option not in self['config']:
+                return False
+            current_value = self['config'][option]
+            default_value = self.config[option].get('default')
+            if current_value == default_value:
+                return False
+            if current_value in (None, '') and default_value in (None, ''):
+                return False
+        return True
+
+    def __nonzero__(self):
+        return self.__bool__()
+
+
+class StoredContext(dict):
+    """
+    A data context that always returns the data that it was first created with.
+
+    This is useful to do a one-time generation of things like passwords, that
+    will thereafter use the same value that was originally generated, instead
+    of generating a new value each time it is run.
+    """
+    def __init__(self, file_name, config_data):
+        """
+        If the file exists, populate `self` with the data from the file.
+        Otherwise, populate with the given data and persist it to the file.
+        """
+        if os.path.exists(file_name):
+            self.update(self.read_context(file_name))
+        else:
+            self.store_context(file_name, config_data)
+            self.update(config_data)
+
+    def store_context(self, file_name, config_data):
+        if not os.path.isabs(file_name):
+            file_name = os.path.join(hookenv.charm_dir(), file_name)
+        with open(file_name, 'w') as file_stream:
+            os.fchmod(file_stream.fileno(), 0o600)
+            yaml.dump(config_data, file_stream)
+
+    def read_context(self, file_name):
+        if not os.path.isabs(file_name):
+            file_name = os.path.join(hookenv.charm_dir(), file_name)
+        with open(file_name, 'r') as file_stream:
+            data = yaml.load(file_stream)
+            if not data:
+                raise OSError("%s is empty" % file_name)
+            return data
+
+
+class TemplateCallback(ManagerCallback):
+    """
+    Callback class that will render a Jinja2 template, for use as a ready
+    action.
+
+    :param str source: The template source file, relative to
+        `$CHARM_DIR/templates`
+
+    :param str target: The target to write the rendered template to (or None)
+    :param str owner: The owner of the rendered file
+    :param str group: The group of the rendered file
+    :param int perms: The permissions of the rendered file
+    :param partial on_change_action: functools partial to be executed when
+                                     rendered file changes
+    :param jinja2 loader template_loader: A jinja2 template loader
+
+    :return str: The rendered template
+    """
+    def __init__(self, source, target,
+                 owner='root', group='root', perms=0o444,
+                 on_change_action=None, template_loader=None):
+        self.source = source
+        self.target = target
+        self.owner = owner
+        self.group = group
+        self.perms = perms
+        self.on_change_action = on_change_action
+        self.template_loader = template_loader
+
+    def __call__(self, manager, service_name, event_name):
+        pre_checksum = ''
+        if self.on_change_action and os.path.isfile(self.target):
+            pre_checksum = host.file_hash(self.target)
+        service = manager.get_service(service_name)
+        context = {'ctx': {}}
+        for ctx in service.get('required_data', []):
+            context.update(ctx)
+            context['ctx'].update(ctx)
+
+        result = templating.render(self.source, self.target, context,
+                                   self.owner, self.group, self.perms,
+                                   template_loader=self.template_loader)
+        if self.on_change_action:
+            if pre_checksum == host.file_hash(self.target):
+                hookenv.log(
+                    'No change detected: {}'.format(self.target),
+                    hookenv.DEBUG)
+            else:
+                self.on_change_action()
+
+        return result
+
+
+# Convenience aliases for templates
+render_template = template = TemplateCallback
diff --git a/hooks/charmhelpers/core/strutils.py b/hooks/charmhelpers/core/strutils.py
new file mode 100644
index 0000000..685dabd
--- /dev/null
+++ b/hooks/charmhelpers/core/strutils.py
@@ -0,0 +1,123 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import six
+import re
+
+
+def bool_from_string(value):
+    """Interpret string value as boolean.
+
+    Returns True if value translates to True otherwise False.
+    """
+    if isinstance(value, six.string_types):
+        value = six.text_type(value)
+    else:
+        msg = "Unable to interpret non-string value '%s' as boolean" % (value)
+        raise ValueError(msg)
+
+    value = value.strip().lower()
+
+    if value in ['y', 'yes', 'true', 't', 'on']:
+        return True
+    elif value in ['n', 'no', 'false', 'f', 'off']:
+        return False
+
+    msg = "Unable to interpret string value '%s' as boolean" % (value)
+    raise ValueError(msg)
+
+
+def bytes_from_string(value):
+    """Interpret human readable string value as bytes.
+
+    Returns int
+    """
+    BYTE_POWER = {
+        'K': 1,
+        'KB': 1,
+        'M': 2,
+        'MB': 2,
+        'G': 3,
+        'GB': 3,
+        'T': 4,
+        'TB': 4,
+        'P': 5,
+        'PB': 5,
+    }
+    if isinstance(value, six.string_types):
+        value = six.text_type(value)
+    else:
+        msg = "Unable to interpret non-string value '%s' as boolean" % (value)
+        raise ValueError(msg)
+    matches = re.match("([0-9]+)([a-zA-Z]+)", value)
+    if not matches:
+        msg = "Unable to interpret string value '%s' as bytes" % (value)
+        raise ValueError(msg)
+    return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
+
+
+class BasicStringComparator(object):
+    """Provides a class that will compare strings from an iterator type object.
+    Used to provide > and < comparisons on strings that may not necessarily be
+    alphanumerically ordered.  e.g. OpenStack or Ubuntu releases AFTER the
+    z-wrap.
+    """
+
+    _list = None
+
+    def __init__(self, item):
+        if self._list is None:
+            raise Exception("Must define the _list in the class definition!")
+        try:
+            self.index = self._list.index(item)
+        except Exception:
+            raise KeyError("Item '{}' is not in list '{}'"
+                           .format(item, self._list))
+
+    def __eq__(self, other):
+        assert isinstance(other, str) or isinstance(other, self.__class__)
+        return self.index == self._list.index(other)
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+    def __lt__(self, other):
+        assert isinstance(other, str) or isinstance(other, self.__class__)
+        return self.index < self._list.index(other)
+
+    def __ge__(self, other):
+        return not self.__lt__(other)
+
+    def __gt__(self, other):
+        assert isinstance(other, str) or isinstance(other, self.__class__)
+        return self.index > self._list.index(other)
+
+    def __le__(self, other):
+        return not self.__gt__(other)
+
+    def __str__(self):
+        """Always give back the item at the index so it can be used in
+        comparisons like:
+
+        s_mitaka = CompareOpenStack('mitaka')
+        s_newton = CompareOpenstack('newton')
+
+        assert s_newton > s_mitaka
+
+        @returns: <string>
+        """
+        return self._list[self.index]
diff --git a/hooks/charmhelpers/core/sysctl.py b/hooks/charmhelpers/core/sysctl.py
new file mode 100644
index 0000000..6e413e3
--- /dev/null
+++ b/hooks/charmhelpers/core/sysctl.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import yaml
+
+from subprocess import check_call
+
+from charmhelpers.core.hookenv import (
+    log,
+    DEBUG,
+    ERROR,
+)
+
+__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@xxxxxxxxxxxxx>'
+
+
+def create(sysctl_dict, sysctl_file):
+    """Creates a sysctl.conf file from a YAML associative array
+
+    :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }"
+    :type sysctl_dict: str
+    :param sysctl_file: path to the sysctl file to be saved
+    :type sysctl_file: str or unicode
+    :returns: None
+    """
+    try:
+        sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
+    except yaml.YAMLError:
+        log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
+            level=ERROR)
+        return
+
+    with open(sysctl_file, "w") as fd:
+        for key, value in sysctl_dict_parsed.items():
+            fd.write("{}={}\n".format(key, value))
+
+    log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed),
+        level=DEBUG)
+
+    check_call(["sysctl", "-p", sysctl_file])
diff --git a/hooks/charmhelpers/core/templating.py b/hooks/charmhelpers/core/templating.py
new file mode 100644
index 0000000..7b801a3
--- /dev/null
+++ b/hooks/charmhelpers/core/templating.py
@@ -0,0 +1,84 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import sys
+
+from charmhelpers.core import host
+from charmhelpers.core import hookenv
+
+
+def render(source, target, context, owner='root', group='root',
+           perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None):
+    """
+    Render a template.
+
+    The `source` path, if not absolute, is relative to the `templates_dir`.
+
+    The `target` path should be absolute.  It can also be `None`, in which
+    case no file will be written.
+
+    The context should be a dict containing the values to be replaced in the
+    template.
+
+    The `owner`, `group`, and `perms` options will be passed to `write_file`.
+
+    If omitted, `templates_dir` defaults to the `templates` folder in the charm.
+
+    The rendered template will be written to the file as well as being returned
+    as a string.
+
+    Note: Using this requires python-jinja2 or python3-jinja2; if it is not
+    installed, calling this will attempt to use charmhelpers.fetch.apt_install
+    to install it.
+    """
+    try:
+        from jinja2 import FileSystemLoader, Environment, exceptions
+    except ImportError:
+        try:
+            from charmhelpers.fetch import apt_install
+        except ImportError:
+            hookenv.log('Could not import jinja2, and could not import '
+                        'charmhelpers.fetch to install it',
+                        level=hookenv.ERROR)
+            raise
+        if sys.version_info.major == 2:
+            apt_install('python-jinja2', fatal=True)
+        else:
+            apt_install('python3-jinja2', fatal=True)
+        from jinja2 import FileSystemLoader, Environment, exceptions
+
+    if template_loader:
+        template_env = Environment(loader=template_loader)
+    else:
+        if templates_dir is None:
+            templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
+        template_env = Environment(loader=FileSystemLoader(templates_dir))
+    try:
+        source = source
+        template = template_env.get_template(source)
+    except exceptions.TemplateNotFound as e:
+        hookenv.log('Could not load template %s from %s.' %
+                    (source, templates_dir),
+                    level=hookenv.ERROR)
+        raise e
+    content = template.render(context)
+    if target is not None:
+        target_dir = os.path.dirname(target)
+        if not os.path.exists(target_dir):
+            # This is a terrible default directory permission, as the file
+            # or its siblings will often contain secrets.
+            host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
+        host.write_file(target, content.encode(encoding), owner, group, perms)
+    return content
diff --git a/hooks/charmhelpers/core/unitdata.py b/hooks/charmhelpers/core/unitdata.py
new file mode 100644
index 0000000..54ec969
--- /dev/null
+++ b/hooks/charmhelpers/core/unitdata.py
@@ -0,0 +1,518 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Authors:
+#  Kapil Thangavelu <kapil.foss@xxxxxxxxx>
+#
+"""
+Intro
+-----
+
+A simple way to store state in units. This provides a key value
+storage with support for versioned, transactional operation,
+and can calculate deltas from previous values to simplify unit logic
+when processing changes.
+
+
+Hook Integration
+----------------
+
+There are several extant frameworks for hook execution, including
+
+ - charmhelpers.core.hookenv.Hooks
+ - charmhelpers.core.services.ServiceManager
+
+The storage classes are framework agnostic, one simple integration is
+via the HookData contextmanager. It will record the current hook
+execution environment (including relation data, config data, etc.),
+setup a transaction and allow easy access to the changes from
+previously seen values. One consequence of the integration is the
+reservation of particular keys ('rels', 'unit', 'env', 'config',
+'charm_revisions') for their respective values.
+
+Here's a fully worked integration example using hookenv.Hooks::
+
+       from charmhelper.core import hookenv, unitdata
+
+       hook_data = unitdata.HookData()
+       db = unitdata.kv()
+       hooks = hookenv.Hooks()
+
+       @hooks.hook
+       def config_changed():
+           # Print all changes to configuration from previously seen
+           # values.
+           for changed, (prev, cur) in hook_data.conf.items():
+               print('config changed', changed,
+                     'previous value', prev,
+                     'current value',  cur)
+
+           # Get some unit specific bookeeping
+           if not db.get('pkg_key'):
+               key = urllib.urlopen('https://example.com/pkg_key').read()
+               db.set('pkg_key', key)
+
+           # Directly access all charm config as a mapping.
+           conf = db.getrange('config', True)
+
+           # Directly access all relation data as a mapping
+           rels = db.getrange('rels', True)
+
+       if __name__ == '__main__':
+           with hook_data():
+               hook.execute()
+
+
+A more basic integration is via the hook_scope context manager which simply
+manages transaction scope (and records hook name, and timestamp)::
+
+  >>> from unitdata import kv
+  >>> db = kv()
+  >>> with db.hook_scope('install'):
+  ...    # do work, in transactional scope.
+  ...    db.set('x', 1)
+  >>> db.get('x')
+  1
+
+
+Usage
+-----
+
+Values are automatically json de/serialized to preserve basic typing
+and complex data struct capabilities (dicts, lists, ints, booleans, etc).
+
+Individual values can be manipulated via get/set::
+
+   >>> kv.set('y', True)
+   >>> kv.get('y')
+   True
+
+   # We can set complex values (dicts, lists) as a single key.
+   >>> kv.set('config', {'a': 1, 'b': True'})
+
+   # Also supports returning dictionaries as a record which
+   # provides attribute access.
+   >>> config = kv.get('config', record=True)
+   >>> config.b
+   True
+
+
+Groups of keys can be manipulated with update/getrange::
+
+   >>> kv.update({'z': 1, 'y': 2}, prefix="gui.")
+   >>> kv.getrange('gui.', strip=True)
+   {'z': 1, 'y': 2}
+
+When updating values, its very helpful to understand which values
+have actually changed and how have they changed. The storage
+provides a delta method to provide for this::
+
+   >>> data = {'debug': True, 'option': 2}
+   >>> delta = kv.delta(data, 'config.')
+   >>> delta.debug.previous
+   None
+   >>> delta.debug.current
+   True
+   >>> delta
+   {'debug': (None, True), 'option': (None, 2)}
+
+Note the delta method does not persist the actual change, it needs to
+be explicitly saved via 'update' method::
+
+   >>> kv.update(data, 'config.')
+
+Values modified in the context of a hook scope retain historical values
+associated to the hookname.
+
+   >>> with db.hook_scope('config-changed'):
+   ...      db.set('x', 42)
+   >>> db.gethistory('x')
+   [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'),
+    (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')]
+
+"""
+
+import collections
+import contextlib
+import datetime
+import itertools
+import json
+import os
+import pprint
+import sqlite3
+import sys
+
+__author__ = 'Kapil Thangavelu <kapil.foss@xxxxxxxxx>'
+
+
+class Storage(object):
+    """Simple key value database for local unit state within charms.
+
+    Modifications are not persisted unless :meth:`flush` is called.
+
+    To support dicts, lists, integer, floats, and booleans values
+    are automatically json encoded/decoded.
+    """
+    def __init__(self, path=None):
+        self.db_path = path
+        if path is None:
+            if 'UNIT_STATE_DB' in os.environ:
+                self.db_path = os.environ['UNIT_STATE_DB']
+            else:
+                self.db_path = os.path.join(
+                    os.environ.get('CHARM_DIR', ''), '.unit-state.db')
+        self.conn = sqlite3.connect('%s' % self.db_path)
+        self.cursor = self.conn.cursor()
+        self.revision = None
+        self._closed = False
+        self._init()
+
+    def close(self):
+        if self._closed:
+            return
+        self.flush(False)
+        self.cursor.close()
+        self.conn.close()
+        self._closed = True
+
+    def get(self, key, default=None, record=False):
+        self.cursor.execute('select data from kv where key=?', [key])
+        result = self.cursor.fetchone()
+        if not result:
+            return default
+        if record:
+            return Record(json.loads(result[0]))
+        return json.loads(result[0])
+
+    def getrange(self, key_prefix, strip=False):
+        """
+        Get a range of keys starting with a common prefix as a mapping of
+        keys to values.
+
+        :param str key_prefix: Common prefix among all keys
+        :param bool strip: Optionally strip the common prefix from the key
+            names in the returned dict
+        :return dict: A (possibly empty) dict of key-value mappings
+        """
+        self.cursor.execute("select key, data from kv where key like ?",
+                            ['%s%%' % key_prefix])
+        result = self.cursor.fetchall()
+
+        if not result:
+            return {}
+        if not strip:
+            key_prefix = ''
+        return dict([
+            (k[len(key_prefix):], json.loads(v)) for k, v in result])
+
+    def update(self, mapping, prefix=""):
+        """
+        Set the values of multiple keys at once.
+
+        :param dict mapping: Mapping of keys to values
+        :param str prefix: Optional prefix to apply to all keys in `mapping`
+            before setting
+        """
+        for k, v in mapping.items():
+            self.set("%s%s" % (prefix, k), v)
+
+    def unset(self, key):
+        """
+        Remove a key from the database entirely.
+        """
+        self.cursor.execute('delete from kv where key=?', [key])
+        if self.revision and self.cursor.rowcount:
+            self.cursor.execute(
+                'insert into kv_revisions values (?, ?, ?)',
+                [key, self.revision, json.dumps('DELETED')])
+
+    def unsetrange(self, keys=None, prefix=""):
+        """
+        Remove a range of keys starting with a common prefix, from the database
+        entirely.
+
+        :param list keys: List of keys to remove.
+        :param str prefix: Optional prefix to apply to all keys in ``keys``
+            before removing.
+        """
+        if keys is not None:
+            keys = ['%s%s' % (prefix, key) for key in keys]
+            self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys)
+            if self.revision and self.cursor.rowcount:
+                self.cursor.execute(
+                    'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)),
+                    list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys)))
+        else:
+            self.cursor.execute('delete from kv where key like ?',
+                                ['%s%%' % prefix])
+            if self.revision and self.cursor.rowcount:
+                self.cursor.execute(
+                    'insert into kv_revisions values (?, ?, ?)',
+                    ['%s%%' % prefix, self.revision, json.dumps('DELETED')])
+
+    def set(self, key, value):
+        """
+        Set a value in the database.
+
+        :param str key: Key to set the value for
+        :param value: Any JSON-serializable value to be set
+        """
+        serialized = json.dumps(value)
+
+        self.cursor.execute('select data from kv where key=?', [key])
+        exists = self.cursor.fetchone()
+
+        # Skip mutations to the same value
+        if exists:
+            if exists[0] == serialized:
+                return value
+
+        if not exists:
+            self.cursor.execute(
+                'insert into kv (key, data) values (?, ?)',
+                (key, serialized))
+        else:
+            self.cursor.execute('''
+            update kv
+            set data = ?
+            where key = ?''', [serialized, key])
+
+        # Save
+        if not self.revision:
+            return value
+
+        self.cursor.execute(
+            'select 1 from kv_revisions where key=? and revision=?',
+            [key, self.revision])
+        exists = self.cursor.fetchone()
+
+        if not exists:
+            self.cursor.execute(
+                '''insert into kv_revisions (
+                revision, key, data) values (?, ?, ?)''',
+                (self.revision, key, serialized))
+        else:
+            self.cursor.execute(
+                '''
+                update kv_revisions
+                set data = ?
+                where key = ?
+                and   revision = ?''',
+                [serialized, key, self.revision])
+
+        return value
+
+    def delta(self, mapping, prefix):
+        """
+        return a delta containing values that have changed.
+        """
+        previous = self.getrange(prefix, strip=True)
+        if not previous:
+            pk = set()
+        else:
+            pk = set(previous.keys())
+        ck = set(mapping.keys())
+        delta = DeltaSet()
+
+        # added
+        for k in ck.difference(pk):
+            delta[k] = Delta(None, mapping[k])
+
+        # removed
+        for k in pk.difference(ck):
+            delta[k] = Delta(previous[k], None)
+
+        # changed
+        for k in pk.intersection(ck):
+            c = mapping[k]
+            p = previous[k]
+            if c != p:
+                delta[k] = Delta(p, c)
+
+        return delta
+
+    @contextlib.contextmanager
+    def hook_scope(self, name=""):
+        """Scope all future interactions to the current hook execution
+        revision."""
+        assert not self.revision
+        self.cursor.execute(
+            'insert into hooks (hook, date) values (?, ?)',
+            (name or sys.argv[0],
+             datetime.datetime.utcnow().isoformat()))
+        self.revision = self.cursor.lastrowid
+        try:
+            yield self.revision
+            self.revision = None
+        except:
+            self.flush(False)
+            self.revision = None
+            raise
+        else:
+            self.flush()
+
+    def flush(self, save=True):
+        if save:
+            self.conn.commit()
+        elif self._closed:
+            return
+        else:
+            self.conn.rollback()
+
+    def _init(self):
+        self.cursor.execute('''
+            create table if not exists kv (
+               key text,
+               data text,
+               primary key (key)
+               )''')
+        self.cursor.execute('''
+            create table if not exists kv_revisions (
+               key text,
+               revision integer,
+               data text,
+               primary key (key, revision)
+               )''')
+        self.cursor.execute('''
+            create table if not exists hooks (
+               version integer primary key autoincrement,
+               hook text,
+               date text
+               )''')
+        self.conn.commit()
+
+    def gethistory(self, key, deserialize=False):
+        self.cursor.execute(
+            '''
+            select kv.revision, kv.key, kv.data, h.hook, h.date
+            from kv_revisions kv,
+                 hooks h
+            where kv.key=?
+             and kv.revision = h.version
+            ''', [key])
+        if deserialize is False:
+            return self.cursor.fetchall()
+        return map(_parse_history, self.cursor.fetchall())
+
+    def debug(self, fh=sys.stderr):
+        self.cursor.execute('select * from kv')
+        pprint.pprint(self.cursor.fetchall(), stream=fh)
+        self.cursor.execute('select * from kv_revisions')
+        pprint.pprint(self.cursor.fetchall(), stream=fh)
+
+
+def _parse_history(d):
+    return (d[0], d[1], json.loads(d[2]), d[3],
+            datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f"))
+
+
+class HookData(object):
+    """Simple integration for existing hook exec frameworks.
+
+    Records all unit information, and stores deltas for processing
+    by the hook.
+
+    Sample::
+
+       from charmhelper.core import hookenv, unitdata
+
+       changes = unitdata.HookData()
+       db = unitdata.kv()
+       hooks = hookenv.Hooks()
+
+       @hooks.hook
+       def config_changed():
+           # View all changes to configuration
+           for changed, (prev, cur) in changes.conf.items():
+               print('config changed', changed,
+                     'previous value', prev,
+                     'current value',  cur)
+
+           # Get some unit specific bookeeping
+           if not db.get('pkg_key'):
+               key = urllib.urlopen('https://example.com/pkg_key').read()
+               db.set('pkg_key', key)
+
+       if __name__ == '__main__':
+           with changes():
+               hook.execute()
+
+    """
+    def __init__(self):
+        self.kv = kv()
+        self.conf = None
+        self.rels = None
+
+    @contextlib.contextmanager
+    def __call__(self):
+        from charmhelpers.core import hookenv
+        hook_name = hookenv.hook_name()
+
+        with self.kv.hook_scope(hook_name):
+            self._record_charm_version(hookenv.charm_dir())
+            delta_config, delta_relation = self._record_hook(hookenv)
+            yield self.kv, delta_config, delta_relation
+
+    def _record_charm_version(self, charm_dir):
+        # Record revisions.. charm revisions are meaningless
+        # to charm authors as they don't control the revision.
+        # so logic dependnent on revision is not particularly
+        # useful, however it is useful for debugging analysis.
+        charm_rev = open(
+            os.path.join(charm_dir, 'revision')).read().strip()
+        charm_rev = charm_rev or '0'
+        revs = self.kv.get('charm_revisions', [])
+        if charm_rev not in revs:
+            revs.append(charm_rev.strip() or '0')
+            self.kv.set('charm_revisions', revs)
+
+    def _record_hook(self, hookenv):
+        data = hookenv.execution_environment()
+        self.conf = conf_delta = self.kv.delta(data['conf'], 'config')
+        self.rels = rels_delta = self.kv.delta(data['rels'], 'rels')
+        self.kv.set('env', dict(data['env']))
+        self.kv.set('unit', data['unit'])
+        self.kv.set('relid', data.get('relid'))
+        return conf_delta, rels_delta
+
+
+class Record(dict):
+
+    __slots__ = ()
+
+    def __getattr__(self, k):
+        if k in self:
+            return self[k]
+        raise AttributeError(k)
+
+
+class DeltaSet(Record):
+
+    __slots__ = ()
+
+
+Delta = collections.namedtuple('Delta', ['previous', 'current'])
+
+
+_KV = None
+
+
+def kv():
+    global _KV
+    if _KV is None:
+        _KV = Storage()
+    return _KV
diff --git a/hooks/charmhelpers/fetch/__init__.py b/hooks/charmhelpers/fetch/__init__.py
index 07bb707..480a627 100644
--- a/hooks/charmhelpers/fetch/__init__.py
+++ b/hooks/charmhelpers/fetch/__init__.py
@@ -1,267 +1,191 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
 import importlib
+from charmhelpers.osplatform import get_platform
 from yaml import safe_load
-from charmhelpers.core.host import (
-    lsb_release
-)
-from urlparse import (
-    urlparse,
-    urlunparse,
-)
-import subprocess
 from charmhelpers.core.hookenv import (
     config,
     log,
 )
-import apt_pkg
-import os
-
-CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
-deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
-"""
-PROPOSED_POCKET = """# Proposed
-deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
-"""
-CLOUD_ARCHIVE_POCKETS = {
-    # Folsom
-    'folsom': 'precise-updates/folsom',
-    'precise-folsom': 'precise-updates/folsom',
-    'precise-folsom/updates': 'precise-updates/folsom',
-    'precise-updates/folsom': 'precise-updates/folsom',
-    'folsom/proposed': 'precise-proposed/folsom',
-    'precise-folsom/proposed': 'precise-proposed/folsom',
-    'precise-proposed/folsom': 'precise-proposed/folsom',
-    # Grizzly
-    'grizzly': 'precise-updates/grizzly',
-    'precise-grizzly': 'precise-updates/grizzly',
-    'precise-grizzly/updates': 'precise-updates/grizzly',
-    'precise-updates/grizzly': 'precise-updates/grizzly',
-    'grizzly/proposed': 'precise-proposed/grizzly',
-    'precise-grizzly/proposed': 'precise-proposed/grizzly',
-    'precise-proposed/grizzly': 'precise-proposed/grizzly',
-    # Havana
-    'havana': 'precise-updates/havana',
-    'precise-havana': 'precise-updates/havana',
-    'precise-havana/updates': 'precise-updates/havana',
-    'precise-updates/havana': 'precise-updates/havana',
-    'havana/proposed': 'precise-proposed/havana',
-    'precise-havana/proposed': 'precise-proposed/havana',
-    'precise-proposed/havana': 'precise-proposed/havana',
-    # Icehouse
-    'icehouse': 'precise-updates/icehouse',
-    'precise-icehouse': 'precise-updates/icehouse',
-    'precise-icehouse/updates': 'precise-updates/icehouse',
-    'precise-updates/icehouse': 'precise-updates/icehouse',
-    'icehouse/proposed': 'precise-proposed/icehouse',
-    'precise-icehouse/proposed': 'precise-proposed/icehouse',
-    'precise-proposed/icehouse': 'precise-proposed/icehouse',
-}
-
-
-def filter_installed_packages(packages):
-    """Returns a list of packages that require installation"""
-    apt_pkg.init()
-    cache = apt_pkg.Cache()
-    _pkgs = []
-    for package in packages:
-        try:
-            p = cache[package]
-            p.current_ver or _pkgs.append(package)
-        except KeyError:
-            log('Package {} has no installation candidate.'.format(package),
-                level='WARNING')
-            _pkgs.append(package)
-    return _pkgs
 
+import six
+if six.PY3:
+    from urllib.parse import urlparse, urlunparse
+else:
+    from urlparse import urlparse, urlunparse
 
-def apt_install(packages, options=None, fatal=False):
-    """Install one or more packages"""
-    if options is None:
-        options = ['--option=Dpkg::Options::=--force-confold']
 
-    cmd = ['apt-get', '--assume-yes']
-    cmd.extend(options)
-    cmd.append('install')
-    if isinstance(packages, basestring):
-        cmd.append(packages)
-    else:
-        cmd.extend(packages)
-    log("Installing {} with options: {}".format(packages,
-                                                options))
-    env = os.environ.copy()
-    if 'DEBIAN_FRONTEND' not in env:
-        env['DEBIAN_FRONTEND'] = 'noninteractive'
-
-    if fatal:
-        subprocess.check_call(cmd, env=env)
-    else:
-        subprocess.call(cmd, env=env)
+# The order of this list is very important. Handlers should be listed in from
+# least- to most-specific URL matching.
+FETCH_HANDLERS = (
+    'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
+    'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
+    'charmhelpers.fetch.giturl.GitUrlFetchHandler',
+)
 
 
-def apt_update(fatal=False):
-    """Update local apt cache"""
-    cmd = ['apt-get', 'update']
-    if fatal:
-        subprocess.check_call(cmd)
-    else:
-        subprocess.call(cmd)
+class SourceConfigError(Exception):
+    pass
 
 
-def apt_purge(packages, fatal=False):
-    """Purge one or more packages"""
-    cmd = ['apt-get', '--assume-yes', 'purge']
-    if isinstance(packages, basestring):
-        cmd.append(packages)
-    else:
-        cmd.extend(packages)
-    log("Purging {}".format(packages))
-    if fatal:
-        subprocess.check_call(cmd)
-    else:
-        subprocess.call(cmd)
+class UnhandledSource(Exception):
+    pass
 
 
-def apt_hold(packages, fatal=False):
-    """Hold one or more packages"""
-    cmd = ['apt-mark', 'hold']
-    if isinstance(packages, basestring):
-        cmd.append(packages)
-    else:
-        cmd.extend(packages)
-    log("Holding {}".format(packages))
-    if fatal:
-        subprocess.check_call(cmd)
-    else:
-        subprocess.call(cmd)
-
-
-def add_source(source, key=None):
-    if (source.startswith('ppa:') or
-        source.startswith('http') or
-        source.startswith('deb ') or
-            source.startswith('cloud-archive:')):
-        subprocess.check_call(['add-apt-repository', '--yes', source])
-    elif source.startswith('cloud:'):
-        apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
-                    fatal=True)
-        pocket = source.split(':')[-1]
-        if pocket not in CLOUD_ARCHIVE_POCKETS:
-            raise SourceConfigError(
-                'Unsupported cloud: source option %s' %
-                pocket)
-        actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
-        with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
-            apt.write(CLOUD_ARCHIVE.format(actual_pocket))
-    elif source == 'proposed':
-        release = lsb_release()['DISTRIB_CODENAME']
-        with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
-            apt.write(PROPOSED_POCKET.format(release))
-    if key:
-        subprocess.check_call(['apt-key', 'adv', '--keyserver',
-                               'keyserver.ubuntu.com', '--recv',
-                               key])
+class AptLockError(Exception):
+    pass
 
 
-class SourceConfigError(Exception):
+class GPGKeyError(Exception):
+    """Exception occurs when a GPG key cannot be fetched or used.  The message
+    indicates what the problem is.
+    """
     pass
 
 
+class BaseFetchHandler(object):
+
+    """Base class for FetchHandler implementations in fetch plugins"""
+
+    def can_handle(self, source):
+        """Returns True if the source can be handled. Otherwise returns
+        a string explaining why it cannot"""
+        return "Wrong source type"
+
+    def install(self, source):
+        """Try to download and unpack the source. Return the path to the
+        unpacked files or raise UnhandledSource."""
+        raise UnhandledSource("Wrong source type {}".format(source))
+
+    def parse_url(self, url):
+        return urlparse(url)
+
+    def base_url(self, url):
+        """Return url without querystring or fragment"""
+        parts = list(self.parse_url(url))
+        parts[4:] = ['' for i in parts[4:]]
+        return urlunparse(parts)
+
+
+__platform__ = get_platform()
+module = "charmhelpers.fetch.%s" % __platform__
+fetch = importlib.import_module(module)
+
+filter_installed_packages = fetch.filter_installed_packages
+install = fetch.apt_install
+upgrade = fetch.apt_upgrade
+update = _fetch_update = fetch.apt_update
+purge = fetch.apt_purge
+add_source = fetch.add_source
+
+if __platform__ == "ubuntu":
+    apt_cache = fetch.apt_cache
+    apt_install = fetch.apt_install
+    apt_update = fetch.apt_update
+    apt_upgrade = fetch.apt_upgrade
+    apt_purge = fetch.apt_purge
+    apt_mark = fetch.apt_mark
+    apt_hold = fetch.apt_hold
+    apt_unhold = fetch.apt_unhold
+    import_key = fetch.import_key
+    get_upstream_version = fetch.get_upstream_version
+elif __platform__ == "centos":
+    yum_search = fetch.yum_search
+
+
 def configure_sources(update=False,
                       sources_var='install_sources',
                       keys_var='install_keys'):
-    """
-    Configure multiple sources from charm configuration
+    """Configure multiple sources from charm configuration.
+
+    The lists are encoded as yaml fragments in the configuration.
+    The fragment needs to be included as a string. Sources and their
+    corresponding keys are of the types supported by add_source().
 
     Example config:
-        install_sources:
+        install_sources: |
           - "ppa:foo"
           - "http://example.com/repo precise main"
-        install_keys:
+        install_keys: |
           - null
           - "a1b2c3d4"
 
     Note that 'null' (a.k.a. None) should not be quoted.
     """
-    sources = safe_load(config(sources_var))
-    keys = config(keys_var)
-    if keys is not None:
-        keys = safe_load(keys)
-    if isinstance(sources, basestring) and (
-            keys is None or isinstance(keys, basestring)):
-        add_source(sources, keys)
-    else:
-        if not len(sources) == len(keys):
-            msg = 'Install sources and keys lists are different lengths'
-            raise SourceConfigError(msg)
-        for src_num in range(len(sources)):
-            add_source(sources[src_num], keys[src_num])
-    if update:
-        apt_update(fatal=True)
+    sources = safe_load((config(sources_var) or '').strip()) or []
+    keys = safe_load((config(keys_var) or '').strip()) or None
 
-# The order of this list is very important. Handlers should be listed in from
-# least- to most-specific URL matching.
-FETCH_HANDLERS = (
-    'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
-    'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
-)
+    if isinstance(sources, six.string_types):
+        sources = [sources]
 
+    if keys is None:
+        for source in sources:
+            add_source(source, None)
+    else:
+        if isinstance(keys, six.string_types):
+            keys = [keys]
 
-class UnhandledSource(Exception):
-    pass
+        if len(sources) != len(keys):
+            raise SourceConfigError(
+                'Install sources and keys lists are different lengths')
+        for source, key in zip(sources, keys):
+            add_source(source, key)
+    if update:
+        _fetch_update(fatal=True)
 
 
-def install_remote(source):
-    """
-    Install a file tree from a remote source
+def install_remote(source, *args, **kwargs):
+    """Install a file tree from a remote source.
 
     The specified source should be a url of the form:
         scheme://[host]/path[#[option=value][&...]]
 
-    Schemes supported are based on this modules submodules
-    Options supported are submodule-specific"""
+    Schemes supported are based on this modules submodules.
+    Options supported are submodule-specific.
+    Additional arguments are passed through to the submodule.
+
+    For example::
+
+        dest = install_remote('http://example.com/archive.tgz',
+                              checksum='deadbeef',
+                              hash_type='sha1')
+
+    This will download `archive.tgz`, validate it using SHA1 and, if
+    the file is ok, extract it and return the directory in which it
+    was extracted.  If the checksum fails, it will raise
+    :class:`charmhelpers.core.host.ChecksumError`.
+    """
     # We ONLY check for True here because can_handle may return a string
     # explaining why it can't handle a given source.
     handlers = [h for h in plugins() if h.can_handle(source) is True]
-    installed_to = None
     for handler in handlers:
         try:
-            installed_to = handler.install(source)
-        except UnhandledSource:
-            pass
-    if not installed_to:
-        raise UnhandledSource("No handler found for source {}".format(source))
-    return installed_to
+            return handler.install(source, *args, **kwargs)
+        except UnhandledSource as e:
+            log('Install source attempt unsuccessful: {}'.format(e),
+                level='WARNING')
+    raise UnhandledSource("No handler found for source {}".format(source))
 
 
 def install_from_config(config_var_name):
+    """Install a file from config."""
     charm_config = config()
     source = charm_config[config_var_name]
     return install_remote(source)
 
 
-class BaseFetchHandler(object):
-
-    """Base class for FetchHandler implementations in fetch plugins"""
-
-    def can_handle(self, source):
-        """Returns True if the source can be handled. Otherwise returns
-        a string explaining why it cannot"""
-        return "Wrong source type"
-
-    def install(self, source):
-        """Try to download and unpack the source. Return the path to the
-        unpacked files or raise UnhandledSource."""
-        raise UnhandledSource("Wrong source type {}".format(source))
-
-    def parse_url(self, url):
-        return urlparse(url)
-
-    def base_url(self, url):
-        """Return url without querystring or fragment"""
-        parts = list(self.parse_url(url))
-        parts[4:] = ['' for i in parts[4:]]
-        return urlunparse(parts)
-
-
 def plugins(fetch_handlers=None):
     if not fetch_handlers:
         fetch_handlers = FETCH_HANDLERS
@@ -273,7 +197,7 @@ def plugins(fetch_handlers=None):
                 importlib.import_module(package),
                 classname)
             plugin_list.append(handler_class())
-        except (ImportError, AttributeError):
+        except NotImplementedError:
             # Skip missing plugins so that they can be ommitted from
             # installation if desired
             log("FetchHandler {} not found, skipping plugin".format(
diff --git a/hooks/charmhelpers/fetch/archiveurl.py b/hooks/charmhelpers/fetch/archiveurl.py
index e35b8f1..dd24f9e 100644
--- a/hooks/charmhelpers/fetch/archiveurl.py
+++ b/hooks/charmhelpers/fetch/archiveurl.py
@@ -1,5 +1,21 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
 import os
-import urllib2
+import hashlib
+import re
+
 from charmhelpers.fetch import (
     BaseFetchHandler,
     UnhandledSource
@@ -8,41 +24,142 @@ from charmhelpers.payload.archive import (
     get_archive_handler,
     extract,
 )
-from charmhelpers.core.host import mkdir
+from charmhelpers.core.host import mkdir, check_hash
+
+import six
+if six.PY3:
+    from urllib.request import (
+        build_opener, install_opener, urlopen, urlretrieve,
+        HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
+    )
+    from urllib.parse import urlparse, urlunparse, parse_qs
+    from urllib.error import URLError
+else:
+    from urllib import urlretrieve
+    from urllib2 import (
+        build_opener, install_opener, urlopen,
+        HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
+        URLError
+    )
+    from urlparse import urlparse, urlunparse, parse_qs
+
+
+def splituser(host):
+    '''urllib.splituser(), but six's support of this seems broken'''
+    _userprog = re.compile('^(.*)@(.*)$')
+    match = _userprog.match(host)
+    if match:
+        return match.group(1, 2)
+    return None, host
+
+
+def splitpasswd(user):
+    '''urllib.splitpasswd(), but six's support of this is missing'''
+    _passwdprog = re.compile('^([^:]*):(.*)$', re.S)
+    match = _passwdprog.match(user)
+    if match:
+        return match.group(1, 2)
+    return user, None
 
 
 class ArchiveUrlFetchHandler(BaseFetchHandler):
-    """Handler for archives via generic URLs"""
+    """
+    Handler to download archive files from arbitrary URLs.
+
+    Can fetch from http, https, ftp, and file URLs.
+
+    Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files.
+
+    Installs the contents of the archive in $CHARM_DIR/fetched/.
+    """
     def can_handle(self, source):
         url_parts = self.parse_url(source)
         if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
+            # XXX: Why is this returning a boolean and a string? It's
+            # doomed to fail since "bool(can_handle('foo://'))"  will be True.
             return "Wrong source type"
         if get_archive_handler(self.base_url(source)):
             return True
         return False
 
     def download(self, source, dest):
+        """
+        Download an archive file.
+
+        :param str source: URL pointing to an archive file.
+        :param str dest: Local path location to download archive file to.
+        """
         # propogate all exceptions
         # URLError, OSError, etc
-        response = urllib2.urlopen(source)
+        proto, netloc, path, params, query, fragment = urlparse(source)
+        if proto in ('http', 'https'):
+            auth, barehost = splituser(netloc)
+            if auth is not None:
+                source = urlunparse((proto, barehost, path, params, query, fragment))
+                username, password = splitpasswd(auth)
+                passman = HTTPPasswordMgrWithDefaultRealm()
+                # Realm is set to None in add_password to force the username and password
+                # to be used whatever the realm
+                passman.add_password(None, source, username, password)
+                authhandler = HTTPBasicAuthHandler(passman)
+                opener = build_opener(authhandler)
+                install_opener(opener)
+        response = urlopen(source)
         try:
-            with open(dest, 'w') as dest_file:
+            with open(dest, 'wb') as dest_file:
                 dest_file.write(response.read())
         except Exception as e:
             if os.path.isfile(dest):
                 os.unlink(dest)
             raise e
 
-    def install(self, source):
+    # Mandatory file validation via Sha1 or MD5 hashing.
+    def download_and_validate(self, url, hashsum, validate="sha1"):
+        tempfile, headers = urlretrieve(url)
+        check_hash(tempfile, hashsum, validate)
+        return tempfile
+
+    def install(self, source, dest=None, checksum=None, hash_type='sha1'):
+        """
+        Download and install an archive file, with optional checksum validation.
+
+        The checksum can also be given on the `source` URL's fragment.
+        For example::
+
+            handler.install('http://example.com/file.tgz#sha1=deadbeef')
+
+        :param str source: URL pointing to an archive file.
+        :param str dest: Local destination path to install to. If not given,
+            installs to `$CHARM_DIR/archives/archive_file_name`.
+        :param str checksum: If given, validate the archive file after download.
+        :param str hash_type: Algorithm used to generate `checksum`.
+            Can be any hash alrgorithm supported by :mod:`hashlib`,
+            such as md5, sha1, sha256, sha512, etc.
+
+        """
         url_parts = self.parse_url(source)
         dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
         if not os.path.exists(dest_dir):
-            mkdir(dest_dir, perms=0755)
+            mkdir(dest_dir, perms=0o755)
         dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))
         try:
             self.download(source, dld_file)
-        except urllib2.URLError as e:
+        except URLError as e:
             raise UnhandledSource(e.reason)
         except OSError as e:
             raise UnhandledSource(e.strerror)
-        return extract(dld_file)
+        options = parse_qs(url_parts.fragment)
+        for key, value in options.items():
+            if not six.PY3:
+                algorithms = hashlib.algorithms
+            else:
+                algorithms = hashlib.algorithms_available
+            if key in algorithms:
+                if len(value) != 1:
+                    raise TypeError(
+                        "Expected 1 hash value, not %d" % len(value))
+                expected = value[0]
+                check_hash(dld_file, expected, key)
+        if checksum:
+            check_hash(dld_file, checksum, hash_type)
+        return extract(dld_file, dest)
diff --git a/hooks/charmhelpers/fetch/bzrurl.py b/hooks/charmhelpers/fetch/bzrurl.py
index db5dd9a..07cd029 100644
--- a/hooks/charmhelpers/fetch/bzrurl.py
+++ b/hooks/charmhelpers/fetch/bzrurl.py
@@ -1,49 +1,76 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
 import os
+from subprocess import check_call
 from charmhelpers.fetch import (
     BaseFetchHandler,
-    UnhandledSource
+    UnhandledSource,
+    filter_installed_packages,
+    install,
 )
 from charmhelpers.core.host import mkdir
 
-try:
-    from bzrlib.branch import Branch
-except ImportError:
-    from charmhelpers.fetch import apt_install
-    apt_install("python-bzrlib")
-    from bzrlib.branch import Branch
+
+if filter_installed_packages(['bzr']) != []:
+    install(['bzr'])
+    if filter_installed_packages(['bzr']) != []:
+        raise NotImplementedError('Unable to install bzr')
 
 
 class BzrUrlFetchHandler(BaseFetchHandler):
-    """Handler for bazaar branches via generic and lp URLs"""
+    """Handler for bazaar branches via generic and lp URLs."""
+
     def can_handle(self, source):
         url_parts = self.parse_url(source)
-        if url_parts.scheme not in ('bzr+ssh', 'lp'):
+        if url_parts.scheme not in ('bzr+ssh', 'lp', ''):
             return False
+        elif not url_parts.scheme:
+            return os.path.exists(os.path.join(source, '.bzr'))
         else:
             return True
 
-    def branch(self, source, dest):
-        url_parts = self.parse_url(source)
-        # If we use lp:branchname scheme we need to load plugins
+    def branch(self, source, dest, revno=None):
         if not self.can_handle(source):
             raise UnhandledSource("Cannot handle {}".format(source))
-        if url_parts.scheme == "lp":
-            from bzrlib.plugin import load_plugins
-            load_plugins()
-        try:
-            remote_branch = Branch.open(source)
-            remote_branch.bzrdir.sprout(dest).open_branch()
-        except Exception as e:
-            raise e
+        cmd_opts = []
+        if revno:
+            cmd_opts += ['-r', str(revno)]
+        if os.path.exists(dest):
+            cmd = ['bzr', 'pull']
+            cmd += cmd_opts
+            cmd += ['--overwrite', '-d', dest, source]
+        else:
+            cmd = ['bzr', 'branch']
+            cmd += cmd_opts
+            cmd += [source, dest]
+        check_call(cmd)
 
-    def install(self, source):
+    def install(self, source, dest=None, revno=None):
         url_parts = self.parse_url(source)
         branch_name = url_parts.path.strip("/").split("/")[-1]
-        dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name)
-        if not os.path.exists(dest_dir):
-            mkdir(dest_dir, perms=0755)
+        if dest:
+            dest_dir = os.path.join(dest, branch_name)
+        else:
+            dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
+                                    branch_name)
+
+        if dest and not os.path.exists(dest):
+            mkdir(dest, perms=0o755)
+
         try:
-            self.branch(source, dest_dir)
+            self.branch(source, dest_dir, revno)
         except OSError as e:
             raise UnhandledSource(e.strerror)
         return dest_dir
diff --git a/hooks/charmhelpers/fetch/centos.py b/hooks/charmhelpers/fetch/centos.py
new file mode 100644
index 0000000..a91dcff
--- /dev/null
+++ b/hooks/charmhelpers/fetch/centos.py
@@ -0,0 +1,171 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import subprocess
+import os
+import time
+import six
+import yum
+
+from tempfile import NamedTemporaryFile
+from charmhelpers.core.hookenv import log
+
+YUM_NO_LOCK = 1  # The return code for "couldn't acquire lock" in YUM.
+YUM_NO_LOCK_RETRY_DELAY = 10  # Wait 10 seconds between apt lock checks.
+YUM_NO_LOCK_RETRY_COUNT = 30  # Retry to acquire the lock X times.
+
+
+def filter_installed_packages(packages):
+    """Return a list of packages that require installation."""
+    yb = yum.YumBase()
+    package_list = yb.doPackageLists()
+    temp_cache = {p.base_package_name: 1 for p in package_list['installed']}
+
+    _pkgs = [p for p in packages if not temp_cache.get(p, False)]
+    return _pkgs
+
+
+def install(packages, options=None, fatal=False):
+    """Install one or more packages."""
+    cmd = ['yum', '--assumeyes']
+    if options is not None:
+        cmd.extend(options)
+    cmd.append('install')
+    if isinstance(packages, six.string_types):
+        cmd.append(packages)
+    else:
+        cmd.extend(packages)
+    log("Installing {} with options: {}".format(packages,
+                                                options))
+    _run_yum_command(cmd, fatal)
+
+
+def upgrade(options=None, fatal=False, dist=False):
+    """Upgrade all packages."""
+    cmd = ['yum', '--assumeyes']
+    if options is not None:
+        cmd.extend(options)
+    cmd.append('upgrade')
+    log("Upgrading with options: {}".format(options))
+    _run_yum_command(cmd, fatal)
+
+
+def update(fatal=False):
+    """Update local yum cache."""
+    cmd = ['yum', '--assumeyes', 'update']
+    log("Update with fatal: {}".format(fatal))
+    _run_yum_command(cmd, fatal)
+
+
+def purge(packages, fatal=False):
+    """Purge one or more packages."""
+    cmd = ['yum', '--assumeyes', 'remove']
+    if isinstance(packages, six.string_types):
+        cmd.append(packages)
+    else:
+        cmd.extend(packages)
+    log("Purging {}".format(packages))
+    _run_yum_command(cmd, fatal)
+
+
+def yum_search(packages):
+    """Search for a package."""
+    output = {}
+    cmd = ['yum', 'search']
+    if isinstance(packages, six.string_types):
+        cmd.append(packages)
+    else:
+        cmd.extend(packages)
+    log("Searching for {}".format(packages))
+    result = subprocess.check_output(cmd)
+    for package in list(packages):
+        output[package] = package in result
+    return output
+
+
+def add_source(source, key=None):
+    """Add a package source to this system.
+
+    @param source: a URL with a rpm package
+
+    @param key: A key to be added to the system's keyring and used
+    to verify the signatures on packages. Ideally, this should be an
+    ASCII format GPG public key including the block headers. A GPG key
+    id may also be used, but be aware that only insecure protocols are
+    available to retrieve the actual public key from a public keyserver
+    placing your Juju environment at risk.
+    """
+    if source is None:
+        log('Source is not present. Skipping')
+        return
+
+    if source.startswith('http'):
+        directory = '/etc/yum.repos.d/'
+        for filename in os.listdir(directory):
+            with open(directory + filename, 'r') as rpm_file:
+                if source in rpm_file.read():
+                    break
+        else:
+            log("Add source: {!r}".format(source))
+            # write in the charms.repo
+            with open(directory + 'Charms.repo', 'a') as rpm_file:
+                rpm_file.write('[%s]\n' % source[7:].replace('/', '_'))
+                rpm_file.write('name=%s\n' % source[7:])
+                rpm_file.write('baseurl=%s\n\n' % source)
+    else:
+        log("Unknown source: {!r}".format(source))
+
+    if key:
+        if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
+            with NamedTemporaryFile('w+') as key_file:
+                key_file.write(key)
+                key_file.flush()
+                key_file.seek(0)
+                subprocess.check_call(['rpm', '--import', key_file.name])
+        else:
+            subprocess.check_call(['rpm', '--import', key])
+
+
+def _run_yum_command(cmd, fatal=False):
+    """Run an YUM command.
+
+    Checks the output and retry if the fatal flag is set to True.
+
+    :param: cmd: str: The yum command to run.
+    :param: fatal: bool: Whether the command's output should be checked and
+        retried.
+    """
+    env = os.environ.copy()
+
+    if fatal:
+        retry_count = 0
+        result = None
+
+        # If the command is considered "fatal", we need to retry if the yum
+        # lock was not acquired.
+
+        while result is None or result == YUM_NO_LOCK:
+            try:
+                result = subprocess.check_call(cmd, env=env)
+            except subprocess.CalledProcessError as e:
+                retry_count = retry_count + 1
+                if retry_count > YUM_NO_LOCK_RETRY_COUNT:
+                    raise
+                result = e.returncode
+                log("Couldn't acquire YUM lock. Will retry in {} seconds."
+                    "".format(YUM_NO_LOCK_RETRY_DELAY))
+                time.sleep(YUM_NO_LOCK_RETRY_DELAY)
+
+    else:
+        subprocess.call(cmd, env=env)
diff --git a/hooks/charmhelpers/fetch/giturl.py b/hooks/charmhelpers/fetch/giturl.py
new file mode 100644
index 0000000..4cf21bc
--- /dev/null
+++ b/hooks/charmhelpers/fetch/giturl.py
@@ -0,0 +1,69 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+from subprocess import check_call, CalledProcessError
+from charmhelpers.fetch import (
+    BaseFetchHandler,
+    UnhandledSource,
+    filter_installed_packages,
+    install,
+)
+
+if filter_installed_packages(['git']) != []:
+    install(['git'])
+    if filter_installed_packages(['git']) != []:
+        raise NotImplementedError('Unable to install git')
+
+
+class GitUrlFetchHandler(BaseFetchHandler):
+    """Handler for git branches via generic and github URLs."""
+
+    def can_handle(self, source):
+        url_parts = self.parse_url(source)
+        # TODO (mattyw) no support for ssh git@ yet
+        if url_parts.scheme not in ('http', 'https', 'git', ''):
+            return False
+        elif not url_parts.scheme:
+            return os.path.exists(os.path.join(source, '.git'))
+        else:
+            return True
+
+    def clone(self, source, dest, branch="master", depth=None):
+        if not self.can_handle(source):
+            raise UnhandledSource("Cannot handle {}".format(source))
+
+        if os.path.exists(dest):
+            cmd = ['git', '-C', dest, 'pull', source, branch]
+        else:
+            cmd = ['git', 'clone', source, dest, '--branch', branch]
+            if depth:
+                cmd.extend(['--depth', depth])
+        check_call(cmd)
+
+    def install(self, source, branch="master", dest=None, depth=None):
+        url_parts = self.parse_url(source)
+        branch_name = url_parts.path.strip("/").split("/")[-1]
+        if dest:
+            dest_dir = os.path.join(dest, branch_name)
+        else:
+            dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
+                                    branch_name)
+        try:
+            self.clone(source, dest_dir, branch, depth)
+        except CalledProcessError as e:
+            raise UnhandledSource(e)
+        except OSError as e:
+            raise UnhandledSource(e.strerror)
+        return dest_dir
diff --git a/hooks/charmhelpers/fetch/snap.py b/hooks/charmhelpers/fetch/snap.py
new file mode 100644
index 0000000..112a54c
--- /dev/null
+++ b/hooks/charmhelpers/fetch/snap.py
@@ -0,0 +1,134 @@
+# Copyright 2014-2017 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Charm helpers snap for classic charms.
+
+If writing reactive charms, use the snap layer:
+https://lists.ubuntu.com/archives/snapcraft/2016-September/001114.html
+"""
+import subprocess
+import os
+from time import sleep
+from charmhelpers.core.hookenv import log
+
+__author__ = 'Joseph Borg <joseph.borg@xxxxxxxxxxxxx>'
+
+# The return code for "couldn't acquire lock" in Snap
+# (hopefully this will be improved).
+SNAP_NO_LOCK = 1
+SNAP_NO_LOCK_RETRY_DELAY = 10  # Wait X seconds between Snap lock checks.
+SNAP_NO_LOCK_RETRY_COUNT = 30  # Retry to acquire the lock X times.
+SNAP_CHANNELS = [
+    'edge',
+    'beta',
+    'candidate',
+    'stable',
+]
+
+
+class CouldNotAcquireLockException(Exception):
+    pass
+
+
+def _snap_exec(commands):
+    """
+    Execute snap commands.
+
+    :param commands: List commands
+    :return: Integer exit code
+    """
+    assert type(commands) == list
+
+    retry_count = 0
+    return_code = None
+
+    while return_code is None or return_code == SNAP_NO_LOCK:
+        try:
+            return_code = subprocess.check_call(['snap'] + commands,
+                                                env=os.environ)
+        except subprocess.CalledProcessError as e:
+            retry_count += + 1
+            if retry_count > SNAP_NO_LOCK_RETRY_COUNT:
+                raise CouldNotAcquireLockException(
+                    'Could not aquire lock after {} attempts'
+                    .format(SNAP_NO_LOCK_RETRY_COUNT))
+            return_code = e.returncode
+            log('Snap failed to acquire lock, trying again in {} seconds.'
+                .format(SNAP_NO_LOCK_RETRY_DELAY, level='WARN'))
+            sleep(SNAP_NO_LOCK_RETRY_DELAY)
+
+    return return_code
+
+
+def snap_install(packages, *flags):
+    """
+    Install a snap package.
+
+    :param packages: String or List String package name
+    :param flags: List String flags to pass to install command
+    :return: Integer return code from snap
+    """
+    if type(packages) is not list:
+        packages = [packages]
+
+    flags = list(flags)
+
+    message = 'Installing snap(s) "%s"' % ', '.join(packages)
+    if flags:
+        message += ' with option(s) "%s"' % ', '.join(flags)
+
+    log(message, level='INFO')
+    return _snap_exec(['install'] + flags + packages)
+
+
+def snap_remove(packages, *flags):
+    """
+    Remove a snap package.
+
+    :param packages: String or List String package name
+    :param flags: List String flags to pass to remove command
+    :return: Integer return code from snap
+    """
+    if type(packages) is not list:
+        packages = [packages]
+
+    flags = list(flags)
+
+    message = 'Removing snap(s) "%s"' % ', '.join(packages)
+    if flags:
+        message += ' with options "%s"' % ', '.join(flags)
+
+    log(message, level='INFO')
+    return _snap_exec(['remove'] + flags + packages)
+
+
+def snap_refresh(packages, *flags):
+    """
+    Refresh / Update snap package.
+
+    :param packages: String or List String package name
+    :param flags: List String flags to pass to refresh command
+    :return: Integer return code from snap
+    """
+    if type(packages) is not list:
+        packages = [packages]
+
+    flags = list(flags)
+
+    message = 'Refreshing snap(s) "%s"' % ', '.join(packages)
+    if flags:
+        message += ' with options "%s"' % ', '.join(flags)
+
+    log(message, level='INFO')
+    return _snap_exec(['refresh'] + flags + packages)
diff --git a/hooks/charmhelpers/fetch/ubuntu.py b/hooks/charmhelpers/fetch/ubuntu.py
new file mode 100644
index 0000000..40e1cb5
--- /dev/null
+++ b/hooks/charmhelpers/fetch/ubuntu.py
@@ -0,0 +1,583 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from collections import OrderedDict
+import os
+import platform
+import re
+import six
+import time
+import subprocess
+from tempfile import NamedTemporaryFile
+
+from charmhelpers.core.host import (
+    lsb_release
+)
+from charmhelpers.core.hookenv import (
+    log,
+    DEBUG,
+    WARNING,
+)
+from charmhelpers.fetch import SourceConfigError, GPGKeyError
+
+PROPOSED_POCKET = (
+    "# Proposed\n"
+    "deb http://archive.ubuntu.com/ubuntu {}-proposed main universe "
+    "multiverse restricted\n")
+PROPOSED_PORTS_POCKET = (
+    "# Proposed\n"
+    "deb http://ports.ubuntu.com/ubuntu-ports {}-proposed main universe "
+    "multiverse restricted\n")
+# Only supports 64bit and ppc64 at the moment.
+ARCH_TO_PROPOSED_POCKET = {
+    'x86_64': PROPOSED_POCKET,
+    'ppc64le': PROPOSED_PORTS_POCKET,
+    'aarch64': PROPOSED_PORTS_POCKET,
+}
+CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu";
+CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
+CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
+deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
+"""
+CLOUD_ARCHIVE_POCKETS = {
+    # Folsom
+    'folsom': 'precise-updates/folsom',
+    'folsom/updates': 'precise-updates/folsom',
+    'precise-folsom': 'precise-updates/folsom',
+    'precise-folsom/updates': 'precise-updates/folsom',
+    'precise-updates/folsom': 'precise-updates/folsom',
+    'folsom/proposed': 'precise-proposed/folsom',
+    'precise-folsom/proposed': 'precise-proposed/folsom',
+    'precise-proposed/folsom': 'precise-proposed/folsom',
+    # Grizzly
+    'grizzly': 'precise-updates/grizzly',
+    'grizzly/updates': 'precise-updates/grizzly',
+    'precise-grizzly': 'precise-updates/grizzly',
+    'precise-grizzly/updates': 'precise-updates/grizzly',
+    'precise-updates/grizzly': 'precise-updates/grizzly',
+    'grizzly/proposed': 'precise-proposed/grizzly',
+    'precise-grizzly/proposed': 'precise-proposed/grizzly',
+    'precise-proposed/grizzly': 'precise-proposed/grizzly',
+    # Havana
+    'havana': 'precise-updates/havana',
+    'havana/updates': 'precise-updates/havana',
+    'precise-havana': 'precise-updates/havana',
+    'precise-havana/updates': 'precise-updates/havana',
+    'precise-updates/havana': 'precise-updates/havana',
+    'havana/proposed': 'precise-proposed/havana',
+    'precise-havana/proposed': 'precise-proposed/havana',
+    'precise-proposed/havana': 'precise-proposed/havana',
+    # Icehouse
+    'icehouse': 'precise-updates/icehouse',
+    'icehouse/updates': 'precise-updates/icehouse',
+    'precise-icehouse': 'precise-updates/icehouse',
+    'precise-icehouse/updates': 'precise-updates/icehouse',
+    'precise-updates/icehouse': 'precise-updates/icehouse',
+    'icehouse/proposed': 'precise-proposed/icehouse',
+    'precise-icehouse/proposed': 'precise-proposed/icehouse',
+    'precise-proposed/icehouse': 'precise-proposed/icehouse',
+    # Juno
+    'juno': 'trusty-updates/juno',
+    'juno/updates': 'trusty-updates/juno',
+    'trusty-juno': 'trusty-updates/juno',
+    'trusty-juno/updates': 'trusty-updates/juno',
+    'trusty-updates/juno': 'trusty-updates/juno',
+    'juno/proposed': 'trusty-proposed/juno',
+    'trusty-juno/proposed': 'trusty-proposed/juno',
+    'trusty-proposed/juno': 'trusty-proposed/juno',
+    # Kilo
+    'kilo': 'trusty-updates/kilo',
+    'kilo/updates': 'trusty-updates/kilo',
+    'trusty-kilo': 'trusty-updates/kilo',
+    'trusty-kilo/updates': 'trusty-updates/kilo',
+    'trusty-updates/kilo': 'trusty-updates/kilo',
+    'kilo/proposed': 'trusty-proposed/kilo',
+    'trusty-kilo/proposed': 'trusty-proposed/kilo',
+    'trusty-proposed/kilo': 'trusty-proposed/kilo',
+    # Liberty
+    'liberty': 'trusty-updates/liberty',
+    'liberty/updates': 'trusty-updates/liberty',
+    'trusty-liberty': 'trusty-updates/liberty',
+    'trusty-liberty/updates': 'trusty-updates/liberty',
+    'trusty-updates/liberty': 'trusty-updates/liberty',
+    'liberty/proposed': 'trusty-proposed/liberty',
+    'trusty-liberty/proposed': 'trusty-proposed/liberty',
+    'trusty-proposed/liberty': 'trusty-proposed/liberty',
+    # Mitaka
+    'mitaka': 'trusty-updates/mitaka',
+    'mitaka/updates': 'trusty-updates/mitaka',
+    'trusty-mitaka': 'trusty-updates/mitaka',
+    'trusty-mitaka/updates': 'trusty-updates/mitaka',
+    'trusty-updates/mitaka': 'trusty-updates/mitaka',
+    'mitaka/proposed': 'trusty-proposed/mitaka',
+    'trusty-mitaka/proposed': 'trusty-proposed/mitaka',
+    'trusty-proposed/mitaka': 'trusty-proposed/mitaka',
+    # Newton
+    'newton': 'xenial-updates/newton',
+    'newton/updates': 'xenial-updates/newton',
+    'xenial-newton': 'xenial-updates/newton',
+    'xenial-newton/updates': 'xenial-updates/newton',
+    'xenial-updates/newton': 'xenial-updates/newton',
+    'newton/proposed': 'xenial-proposed/newton',
+    'xenial-newton/proposed': 'xenial-proposed/newton',
+    'xenial-proposed/newton': 'xenial-proposed/newton',
+    # Ocata
+    'ocata': 'xenial-updates/ocata',
+    'ocata/updates': 'xenial-updates/ocata',
+    'xenial-ocata': 'xenial-updates/ocata',
+    'xenial-ocata/updates': 'xenial-updates/ocata',
+    'xenial-updates/ocata': 'xenial-updates/ocata',
+    'ocata/proposed': 'xenial-proposed/ocata',
+    'xenial-ocata/proposed': 'xenial-proposed/ocata',
+    'xenial-proposed/ocata': 'xenial-proposed/ocata',
+    # Pike
+    'pike': 'xenial-updates/pike',
+    'xenial-pike': 'xenial-updates/pike',
+    'xenial-pike/updates': 'xenial-updates/pike',
+    'xenial-updates/pike': 'xenial-updates/pike',
+    'pike/proposed': 'xenial-proposed/pike',
+    'xenial-pike/proposed': 'xenial-proposed/pike',
+    'xenial-proposed/pike': 'xenial-proposed/pike',
+    # Queens
+    'queens': 'xenial-updates/queens',
+    'xenial-queens': 'xenial-updates/queens',
+    'xenial-queens/updates': 'xenial-updates/queens',
+    'xenial-updates/queens': 'xenial-updates/queens',
+    'queens/proposed': 'xenial-proposed/queens',
+    'xenial-queens/proposed': 'xenial-proposed/queens',
+    'xenial-proposed/queens': 'xenial-proposed/queens',
+}
+
+
+APT_NO_LOCK = 100  # The return code for "couldn't acquire lock" in APT.
+CMD_RETRY_DELAY = 10  # Wait 10 seconds between command retries.
+CMD_RETRY_COUNT = 3  # Retry a failing fatal command X times.
+
+
+def filter_installed_packages(packages):
+    """Return a list of packages that require installation."""
+    cache = apt_cache()
+    _pkgs = []
+    for package in packages:
+        try:
+            p = cache[package]
+            p.current_ver or _pkgs.append(package)
+        except KeyError:
+            log('Package {} has no installation candidate.'.format(package),
+                level='WARNING')
+            _pkgs.append(package)
+    return _pkgs
+
+
+def apt_cache(in_memory=True, progress=None):
+    """Build and return an apt cache."""
+    from apt import apt_pkg
+    apt_pkg.init()
+    if in_memory:
+        apt_pkg.config.set("Dir::Cache::pkgcache", "")
+        apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
+    return apt_pkg.Cache(progress)
+
+
+def apt_install(packages, options=None, fatal=False):
+    """Install one or more packages."""
+    if options is None:
+        options = ['--option=Dpkg::Options::=--force-confold']
+
+    cmd = ['apt-get', '--assume-yes']
+    cmd.extend(options)
+    cmd.append('install')
+    if isinstance(packages, six.string_types):
+        cmd.append(packages)
+    else:
+        cmd.extend(packages)
+    log("Installing {} with options: {}".format(packages,
+                                                options))
+    _run_apt_command(cmd, fatal)
+
+
+def apt_upgrade(options=None, fatal=False, dist=False):
+    """Upgrade all packages."""
+    if options is None:
+        options = ['--option=Dpkg::Options::=--force-confold']
+
+    cmd = ['apt-get', '--assume-yes']
+    cmd.extend(options)
+    if dist:
+        cmd.append('dist-upgrade')
+    else:
+        cmd.append('upgrade')
+    log("Upgrading with options: {}".format(options))
+    _run_apt_command(cmd, fatal)
+
+
+def apt_update(fatal=False):
+    """Update local apt cache."""
+    cmd = ['apt-get', 'update']
+    _run_apt_command(cmd, fatal)
+
+
+def apt_purge(packages, fatal=False):
+    """Purge one or more packages."""
+    cmd = ['apt-get', '--assume-yes', 'purge']
+    if isinstance(packages, six.string_types):
+        cmd.append(packages)
+    else:
+        cmd.extend(packages)
+    log("Purging {}".format(packages))
+    _run_apt_command(cmd, fatal)
+
+
+def apt_mark(packages, mark, fatal=False):
+    """Flag one or more packages using apt-mark."""
+    log("Marking {} as {}".format(packages, mark))
+    cmd = ['apt-mark', mark]
+    if isinstance(packages, six.string_types):
+        cmd.append(packages)
+    else:
+        cmd.extend(packages)
+
+    if fatal:
+        subprocess.check_call(cmd, universal_newlines=True)
+    else:
+        subprocess.call(cmd, universal_newlines=True)
+
+
+def apt_hold(packages, fatal=False):
+    return apt_mark(packages, 'hold', fatal=fatal)
+
+
+def apt_unhold(packages, fatal=False):
+    return apt_mark(packages, 'unhold', fatal=fatal)
+
+
+def import_key(key):
+    """Import an ASCII Armor key.
+
+    /!\ A Radix64 format keyid is also supported for backwards
+    compatibility, but should never be used; the key retrieval
+    mechanism is insecure and subject to man-in-the-middle attacks
+    voiding all signature checks using that key.
+
+    :param keyid: The key in ASCII armor format,
+                  including BEGIN and END markers.
+    :raises: GPGKeyError if the key could not be imported
+    """
+    key = key.strip()
+    if '-' in key or '\n' in key:
+        # Send everything not obviously a keyid to GPG to import, as
+        # we trust its validation better than our own. eg. handling
+        # comments before the key.
+        log("PGP key found (looks like ASCII Armor format)", level=DEBUG)
+        if ('-----BEGIN PGP PUBLIC KEY BLOCK-----' in key and
+                '-----END PGP PUBLIC KEY BLOCK-----' in key):
+            log("Importing ASCII Armor PGP key", level=DEBUG)
+            with NamedTemporaryFile() as keyfile:
+                with open(keyfile.name, 'w') as fd:
+                    fd.write(key)
+                    fd.write("\n")
+                cmd = ['apt-key', 'add', keyfile.name]
+                try:
+                    subprocess.check_call(cmd)
+                except subprocess.CalledProcessError:
+                    error = "Error importing PGP key '{}'".format(key)
+                    log(error)
+                    raise GPGKeyError(error)
+        else:
+            raise GPGKeyError("ASCII armor markers missing from GPG key")
+    else:
+        # We should only send things obviously not a keyid offsite
+        # via this unsecured protocol, as it may be a secret or part
+        # of one.
+        log("PGP key found (looks like Radix64 format)", level=WARNING)
+        log("INSECURLY importing PGP key from keyserver; "
+            "full key not provided.", level=WARNING)
+        cmd = ['apt-key', 'adv', '--keyserver',
+               'hkp://keyserver.ubuntu.com:80', '--recv-keys', key]
+        try:
+            subprocess.check_call(cmd)
+        except subprocess.CalledProcessError:
+            error = "Error importing PGP key '{}'".format(key)
+            log(error)
+            raise GPGKeyError(error)
+
+
+def add_source(source, key=None, fail_invalid=False):
+    """Add a package source to this system.
+
+    @param source: a URL or sources.list entry, as supported by
+    add-apt-repository(1). Examples::
+
+        ppa:charmers/example
+        deb https://stub:key@xxxxxxxxxxxxxxxxxxx/ubuntu trusty main
+
+    In addition:
+        'proposed:' may be used to enable the standard 'proposed'
+        pocket for the release.
+        'cloud:' may be used to activate official cloud archive pockets,
+        such as 'cloud:icehouse'
+        'distro' may be used as a noop
+
+    Full list of source specifications supported by the function are:
+
+    'distro': A NOP; i.e. it has no effect.
+    'proposed': the proposed deb spec [2] is wrtten to
+      /etc/apt/sources.list/proposed
+    'distro-proposed': adds <version>-proposed to the debs [2]
+    'ppa:<ppa-name>': add-apt-repository --yes <ppa_name>
+    'deb <deb-spec>': add-apt-repository --yes deb <deb-spec>
+    'http://....': add-apt-repository --yes http://...
+    'cloud-archive:<spec>': add-apt-repository -yes cloud-archive:<spec>
+    'cloud:<release>[-staging]': specify a Cloud Archive pocket <release> with
+      optional staging version.  If staging is used then the staging PPA [2]
+      with be used.  If staging is NOT used then the cloud archive [3] will be
+      added, and the 'ubuntu-cloud-keyring' package will be added for the
+      current distro.
+
+    Otherwise the source is not recognised and this is logged to the juju log.
+    However, no error is raised, unless sys_error_on_exit is True.
+
+    [1] deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
+        where {} is replaced with the derived pocket name.
+    [2] deb http://archive.ubuntu.com/ubuntu {}-proposed \
+        main universe multiverse restricted
+        where {} is replaced with the lsb_release codename (e.g. xenial)
+    [3] deb http://ubuntu-cloud.archive.canonical.com/ubuntu <pocket>
+        to /etc/apt/sources.list.d/cloud-archive-list
+
+    @param key: A key to be added to the system's APT keyring and used
+    to verify the signatures on packages. Ideally, this should be an
+    ASCII format GPG public key including the block headers. A GPG key
+    id may also be used, but be aware that only insecure protocols are
+    available to retrieve the actual public key from a public keyserver
+    placing your Juju environment at risk. ppa and cloud archive keys
+    are securely added automtically, so sould not be provided.
+
+    @param fail_invalid: (boolean) if True, then the function raises a
+    SourceConfigError is there is no matching installation source.
+
+    @raises SourceConfigError() if for cloud:<pocket>, the <pocket> is not a
+    valid pocket in CLOUD_ARCHIVE_POCKETS
+    """
+    _mapping = OrderedDict([
+        (r"^distro$", lambda: None),  # This is a NOP
+        (r"^(?:proposed|distro-proposed)$", _add_proposed),
+        (r"^cloud-archive:(.*)$", _add_apt_repository),
+        (r"^((?:deb |http:|https:|ppa:).*)$", _add_apt_repository),
+        (r"^cloud:(.*)-(.*)\/staging$", _add_cloud_staging),
+        (r"^cloud:(.*)-(.*)$", _add_cloud_distro_check),
+        (r"^cloud:(.*)$", _add_cloud_pocket),
+        (r"^snap:.*-(.*)-(.*)$", _add_cloud_distro_check),
+    ])
+    if source is None:
+        source = ''
+    for r, fn in six.iteritems(_mapping):
+        m = re.match(r, source)
+        if m:
+            # call the assoicated function with the captured groups
+            # raises SourceConfigError on error.
+            fn(*m.groups())
+            if key:
+                try:
+                    import_key(key)
+                except GPGKeyError as e:
+                    raise SourceConfigError(str(e))
+            break
+    else:
+        # nothing matched.  log an error and maybe sys.exit
+        err = "Unknown source: {!r}".format(source)
+        log(err)
+        if fail_invalid:
+            raise SourceConfigError(err)
+
+
+def _add_proposed():
+    """Add the PROPOSED_POCKET as /etc/apt/source.list.d/proposed.list
+
+    Uses lsb_release()['DISTRIB_CODENAME'] to determine the correct staza for
+    the deb line.
+
+    For intel architecutres PROPOSED_POCKET is used for the release, but for
+    other architectures PROPOSED_PORTS_POCKET is used for the release.
+    """
+    release = lsb_release()['DISTRIB_CODENAME']
+    arch = platform.machine()
+    if arch not in six.iterkeys(ARCH_TO_PROPOSED_POCKET):
+        raise SourceConfigError("Arch {} not supported for (distro-)proposed"
+                                .format(arch))
+    with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
+        apt.write(ARCH_TO_PROPOSED_POCKET[arch].format(release))
+
+
+def _add_apt_repository(spec):
+    """Add the spec using add_apt_repository
+
+    :param spec: the parameter to pass to add_apt_repository
+    """
+    _run_with_retries(['add-apt-repository', '--yes', spec])
+
+
+def _add_cloud_pocket(pocket):
+    """Add a cloud pocket as /etc/apt/sources.d/cloud-archive.list
+
+    Note that this overwrites the existing file if there is one.
+
+    This function also converts the simple pocket in to the actual pocket using
+    the CLOUD_ARCHIVE_POCKETS mapping.
+
+    :param pocket: string representing the pocket to add a deb spec for.
+    :raises: SourceConfigError if the cloud pocket doesn't exist or the
+        requested release doesn't match the current distro version.
+    """
+    apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
+                fatal=True)
+    if pocket not in CLOUD_ARCHIVE_POCKETS:
+        raise SourceConfigError(
+            'Unsupported cloud: source option %s' %
+            pocket)
+    actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
+    with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
+        apt.write(CLOUD_ARCHIVE.format(actual_pocket))
+
+
+def _add_cloud_staging(cloud_archive_release, openstack_release):
+    """Add the cloud staging repository which is in
+    ppa:ubuntu-cloud-archive/<openstack_release>-staging
+
+    This function checks that the cloud_archive_release matches the current
+    codename for the distro that charm is being installed on.
+
+    :param cloud_archive_release: string, codename for the release.
+    :param openstack_release: String, codename for the openstack release.
+    :raises: SourceConfigError if the cloud_archive_release doesn't match the
+        current version of the os.
+    """
+    _verify_is_ubuntu_rel(cloud_archive_release, openstack_release)
+    ppa = 'ppa:ubuntu-cloud-archive/{}-staging'.format(openstack_release)
+    cmd = 'add-apt-repository -y {}'.format(ppa)
+    _run_with_retries(cmd.split(' '))
+
+
+def _add_cloud_distro_check(cloud_archive_release, openstack_release):
+    """Add the cloud pocket, but also check the cloud_archive_release against
+    the current distro, and use the openstack_release as the full lookup.
+
+    This just calls _add_cloud_pocket() with the openstack_release as pocket
+    to get the correct cloud-archive.list for dpkg to work with.
+
+    :param cloud_archive_release:String, codename for the distro release.
+    :param openstack_release: String, spec for the release to look up in the
+        CLOUD_ARCHIVE_POCKETS
+    :raises: SourceConfigError if this is the wrong distro, or the pocket spec
+        doesn't exist.
+    """
+    _verify_is_ubuntu_rel(cloud_archive_release, openstack_release)
+    _add_cloud_pocket("{}-{}".format(cloud_archive_release, openstack_release))
+
+
+def _verify_is_ubuntu_rel(release, os_release):
+    """Verify that the release is in the same as the current ubuntu release.
+
+    :param release: String, lowercase for the release.
+    :param os_release: String, the os_release being asked for
+    :raises: SourceConfigError if the release is not the same as the ubuntu
+        release.
+    """
+    ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
+    if release != ubuntu_rel:
+        raise SourceConfigError(
+            'Invalid Cloud Archive release specified: {}-{} on this Ubuntu'
+            'version ({})'.format(release, os_release, ubuntu_rel))
+
+
+def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,),
+                      retry_message="", cmd_env=None):
+    """Run a command and retry until success or max_retries is reached.
+
+    :param: cmd: str: The apt command to run.
+    :param: max_retries: int: The number of retries to attempt on a fatal
+        command. Defaults to CMD_RETRY_COUNT.
+    :param: retry_exitcodes: tuple: Optional additional exit codes to retry.
+        Defaults to retry on exit code 1.
+    :param: retry_message: str: Optional log prefix emitted during retries.
+    :param: cmd_env: dict: Environment variables to add to the command run.
+    """
+
+    env = None
+    kwargs = {}
+    if cmd_env:
+        env = os.environ.copy()
+        env.update(cmd_env)
+        kwargs['env'] = env
+
+    if not retry_message:
+        retry_message = "Failed executing '{}'".format(" ".join(cmd))
+    retry_message += ". Will retry in {} seconds".format(CMD_RETRY_DELAY)
+
+    retry_count = 0
+    result = None
+
+    retry_results = (None,) + retry_exitcodes
+    while result in retry_results:
+        try:
+            # result = subprocess.check_call(cmd, env=env)
+            result = subprocess.check_call(cmd, **kwargs)
+        except subprocess.CalledProcessError as e:
+            retry_count = retry_count + 1
+            if retry_count > max_retries:
+                raise
+            result = e.returncode
+            log(retry_message)
+            time.sleep(CMD_RETRY_DELAY)
+
+
+def _run_apt_command(cmd, fatal=False):
+    """Run an apt command with optional retries.
+
+    :param: cmd: str: The apt command to run.
+    :param: fatal: bool: Whether the command's output should be checked and
+        retried.
+    """
+    # Provide DEBIAN_FRONTEND=noninteractive if not present in the environment.
+    cmd_env = {
+        'DEBIAN_FRONTEND': os.environ.get('DEBIAN_FRONTEND', 'noninteractive')}
+
+    if fatal:
+        _run_with_retries(
+            cmd, cmd_env=cmd_env, retry_exitcodes=(1, APT_NO_LOCK,),
+            retry_message="Couldn't acquire DPKG lock")
+    else:
+        env = os.environ.copy()
+        env.update(cmd_env)
+        subprocess.call(cmd, env=env)
+
+
+def get_upstream_version(package):
+    """Determine upstream version based on installed package
+
+    @returns None (if not installed) or the upstream version
+    """
+    import apt_pkg
+    cache = apt_cache()
+    try:
+        pkg = cache[package]
+    except:
+        # the package is unknown to the current apt cache.
+        return None
+
+    if not pkg.current_ver:
+        # package is known, but no version is currently installed.
+        return None
+
+    return apt_pkg.upstream_version(pkg.current_ver.ver_str)
diff --git a/hooks/charmhelpers/osplatform.py b/hooks/charmhelpers/osplatform.py
new file mode 100644
index 0000000..d9a4d5c
--- /dev/null
+++ b/hooks/charmhelpers/osplatform.py
@@ -0,0 +1,25 @@
+import platform
+
+
+def get_platform():
+    """Return the current OS platform.
+
+    For example: if current os platform is Ubuntu then a string "ubuntu"
+    will be returned (which is the name of the module).
+    This string is used to decide which platform module should be imported.
+    """
+    # linux_distribution is deprecated and will be removed in Python 3.7
+    # Warings *not* disabled, as we certainly need to fix this.
+    tuple_platform = platform.linux_distribution()
+    current_platform = tuple_platform[0]
+    if "Ubuntu" in current_platform:
+        return "ubuntu"
+    elif "CentOS" in current_platform:
+        return "centos"
+    elif "debian" in current_platform:
+        # Stock Python does not detect Ubuntu and instead returns debian.
+        # Or at least it does in some build environments like Travis CI
+        return "ubuntu"
+    else:
+        raise RuntimeError("This module is not supported on {}."
+                           .format(current_platform))
diff --git a/hooks/templates/localhost_nagios2.cfg.tmpl b/hooks/templates/localhost_nagios2.cfg.tmpl
new file mode 100644
index 0000000..85c756f
--- /dev/null
+++ b/hooks/templates/localhost_nagios2.cfg.tmpl
@@ -0,0 +1,70 @@
+#------------------------------------------------
+# This file is juju managed
+#------------------------------------------------
+{% if monitor_self -%}
+
+# A simple configuration file for monitoring the local host
+# This can serve as an example for configuring other servers;
+# Custom services specific to this host are added here, but services
+# defined in nagios2-common_services.cfg may also apply.
+# 
+
+define host{
+        use                     generic-host            ; Name of host template to use
+        host_name               {{ nagios_hostname }}
+        alias                   {{ nagios_hostname }}
+        address                 127.0.0.1
+        icon_image_alt          Ubuntu Linux
+        statusmap_image         base/ubuntu.gd2
+        vrml_image              ubuntu.png
+        icon_image              base/ubuntu.png
+        }
+
+# Define a service to check the disk space of the root partition
+# on the local machine.  Warning if < 20% free, critical if
+# < 10% free space on partition.
+
+define service{
+        use                             generic-service         ; Name of service template to use
+        host_name                       {{ nagios_hostname }}
+        service_description             Disk Space
+        check_command                   check_all_disks!20%!10%
+        }
+
+
+
+# Define a service to check the number of currently logged in
+# users on the local machine.  Warning if > 20 users, critical
+# if > 50 users.
+
+define service{
+        use                             generic-service         ; Name of service template to use
+        host_name                       {{ nagios_hostname }}
+        service_description             Current Users
+        check_command                   check_users!20!50
+        }
+
+
+# Define a service to check the number of currently running procs
+# on the local machine.  Warning if > 250 processes, critical if
+# > 400 processes.
+
+define service{
+        use                             generic-service         ; Name of service template to use
+        host_name                       {{ nagios_hostname }}
+        service_description             Total Processes
+        check_command                   check_procs!250!400
+        }
+
+
+
+# Define a service to check the load on the local machine. 
+
+define service{
+        use                             generic-service         ; Name of service template to use
+        host_name                       {{ nagios_hostname }}
+        service_description             Current Load
+        check_command                   check_load!{{ load_monitor }}
+        }
+
+{% endif %}
diff --git a/hooks/upgrade-charm b/hooks/upgrade-charm
index 56b80c4..67fda0f 100755
--- a/hooks/upgrade-charm
+++ b/hooks/upgrade-charm
@@ -222,6 +222,14 @@ def enable_ssl():
 
 
 def update_config():
+    host_context = hookenv.config('nagios_host_context')
+    principal_unitname = hookenv.principal_unit()
+    # Fallback to using "primary" if it exists.
+    if not principal_unitname:
+        for relunit in self[self.name]:
+            if relunit.get('primary', 'False').lower() == 'true':
+                principal_unitname = relunit['__unit__']
+                break
     template_values = {'nagios_user': nagios_user,
                        'nagios_group': nagios_group,
                        'enable_livestatus': enable_livestatus,
@@ -238,7 +246,11 @@ def update_config():
                        'admin_pager': hookenv.config('admin_pager'),
                        'log_rotation_method': hookenv.config('log_rotation_method'),
                        'log_archive_path': hookenv.config('log_archive_path'),
-                       'use_syslog': hookenv.config('use_syslog')}
+                       'use_syslog': hookenv.config('use_syslog'),
+                       'monitor_self': hookenv.config('monitor_self'),
+                       'nagios_hostname': "{}-{}".format(host_context,principal_unitname).replace('/', '-'),
+                       'load_monitor': hookenv.config('load_monitor'),
+                       }
 
     with open('hooks/templates/nagios-cfg.tmpl', 'r') as f:
         templateDef = f.read()
@@ -247,8 +259,15 @@ def update_config():
     with open(nagios_cfg, 'w') as f:
         f.write(t.render(template_values))
 
+    with open('hooks/templates/localhost_nagios2.cfg.tmpl', 'r') as f:
+        templateDef = f.read()
+    t = Template(templateDef)
+    with open('/etc/nagios3/conf.d/localhost_nagios2.cfg', 'w') as f:
+        f.write(t.render(template_values))
+
     host.service_reload('nagios3')
 
+
 # Nagios3 is deployed as a global apache application from the archive.
 # We'll get a little funky and add the SSL keys to the default-ssl config
 # which sets our keys, including the self-signed ones, as the host keyfiles.