← Back to team overview

nagios-charmers team mailing list archive

[Merge] ~aieri/hw-health-charm:oo-rewrite-rebased into hw-health-charm:master

 

Andrea Ieri has proposed merging ~aieri/hw-health-charm:oo-rewrite-rebased into hw-health-charm:master.

Requested reviews:
  Alvaro Uría (aluria)
  Canonical IS Reviewers (canonical-is-reviewers)

For more details, see:
https://code.launchpad.net/~aieri/hw-health-charm/+git/hw-health-charm/+merge/364694

This is a more or less full rewrite of tool installation in a more OO-way.
I have tried as best as I could to separate commits in more digestible chunks, so reviewing this via cli one commit at a time is advised, although it's still a huge changeset.
-- 
Your team Nagios Charm developers is subscribed to branch hw-health-charm:master.
diff --git a/.gitignore b/.gitignore
index 3fbd340..06bf3e8 100644
--- a/.gitignore
+++ b/.gitignore
@@ -18,4 +18,8 @@ __pycache__/
 # version data
 src/repo-info
 
+# build artifacts
+builds
+
+# tools bundle (needed for functional testing)
 tools.zip
diff --git a/Makefile b/Makefile
index 20a6f9b..a5668bc 100644
--- a/Makefile
+++ b/Makefile
@@ -9,26 +9,29 @@ help:
 	@echo " make help - show this text"
 	@echo " make lint - use pre-commit to ensure consistent layout"
 	@echo " make test - run the functional test, unittests and lint"
-	@echo " make unittest - run the tests defined in the unittest "\
-		"subdirectory"
-	@echo " make functionaltest - run the tests defined in the "\
-		"functional subdirectory"
+	@echo " make unittest - run the tests defined in the unittest subdirectory"
+	@echo " make functional - run the tests defined in the functional subdirectory"
 	@echo " make release - build the charm"
 	@echo " make clean - remove unneeded files"
 	@echo ""
-	@echo " the following targets are meant to be used by the Makefile"
-	@echo " make requirements - installs the requirements"
 
 lint:
 	@echo "Running flake8"
 	cd src && tox -e pep8
 
 
-test: unittest lint
+test: unittest functional lint
 
 unittest:
 	@cd src && tox -e unit
 
+functional: build
+ifeq ("$(wildcard $(JUJU_REPOSITORY)/tools.zip)","")
+	$(error ERROR: You must save the tools resource in ${JUJU_REPOSITORY}/tools.zip before running functional tests)
+else
+	@cd src && tox -e functional
+endif
+
 build:
 	@echo "Building charm to base directory $(JUJU_REPOSITORY)"
 	@git show -s --format="commit-sha-1: %H%ncommit-short: %h" > ./src/repo-info
diff --git a/src/config.yaml b/src/config.yaml
index 1a5bcfd..a6e8797 100644
--- a/src/config.yaml
+++ b/src/config.yaml
@@ -13,7 +13,17 @@ options:
     description: |
       Choose the tools to get deployed (hp, dell, supermicro, huawei) or leave
       the charm to self discover the tools needed to run hardware health checks.
+      The special value "test" is only useful during testing to bypass
+      installation restrictions and override hardware detection.
+      Only "auto" and "test" are currently implemented.
   enable_ipmi:
     type: boolean
     default: True
-    description: Enable the use of freeipmi tools to monitor hardware status.
\ No newline at end of file
+    description: Enable the use of freeipmi tools to monitor hardware status.
+  ipmi_check_options:
+    type: string
+    default: ''
+    description: |
+      Additional options to be passed to check_ipmi_sensor. For non-standard
+      ipmi implementations you might for example need
+      "--seloptions '--assume-system-event-records'"
diff --git a/src/files/ipmi/check_ipmi_sensor_sudoer b/src/files/ipmi/99-check_ipmi_sensor.sudoer
similarity index 100%
rename from src/files/ipmi/check_ipmi_sensor_sudoer
rename to src/files/ipmi/99-check_ipmi_sensor.sudoer
diff --git a/src/files/megaraid/check_megacli.py b/src/files/megacli/check_megacli.py
similarity index 100%
rename from src/files/megaraid/check_megacli.py
rename to src/files/megacli/check_megacli.py
diff --git a/src/files/megaraid/check_megacli.sh b/src/files/megacli/check_megacli.sh
similarity index 100%
rename from src/files/megaraid/check_megacli.sh
rename to src/files/megacli/check_megacli.sh
diff --git a/src/layer.yaml b/src/layer.yaml
index 054b43e..893978e 100644
--- a/src/layer.yaml
+++ b/src/layer.yaml
@@ -4,6 +4,7 @@ includes:
 - "layer:status"
 - "layer:nagios"
 - "layer:snap"
+- "interface:juju-info"
 - "interface:nrpe-external-master"
 options:
   status:
diff --git a/src/lib/utils/__init__.py b/src/lib/hwhealth/__init__.py
similarity index 100%
rename from src/lib/utils/__init__.py
rename to src/lib/hwhealth/__init__.py
diff --git a/src/lib/hwhealth/classes.py b/src/lib/hwhealth/classes.py
new file mode 100644
index 0000000..57226cb
--- /dev/null
+++ b/src/lib/hwhealth/classes.py
@@ -0,0 +1,298 @@
+import os
+import shutil
+import subprocess
+from zipfile import ZipFile, BadZipFile
+from tempfile import TemporaryDirectory
+from charmhelpers import fetch
+from charmhelpers.core import hookenv
+from charmhelpers.core.hookenv import status_set
+from charmhelpers.contrib.charmsupport import nrpe
+
+
+class Tool():
+    '''
+    This is an abstract class representing a "tool".
+    The idea is to delegate install/configure duties to specific per-tool
+    classes instead of handling multiple exceptional cases in a main loop
+    during the install hook.
+    Every tool should implement its own internal logic regarding how to be
+    installed, configured, and removed.
+    '''
+    NRPE_PLUGINS_DIR = '/usr/local/lib/nagios/plugins'
+    NRPE_PLUGINS_MODE = 0o100755
+    NRPE_PLUGINS_UID = 0
+    NRPE_PLUGINS_GID = 0
+    TOOLS_DIR = '/usr/local/bin'
+    TOOLS_MODE = 0o100755
+    TOOLS_UID = 0
+    TOOLS_GID = 0
+    CROND_DIR = '/etc/cron.d'
+    CRONJOB_SCRIPT_MODE = 0o100755
+    CRONJOB_SCRIPT_UID = 0
+    CRONJOB_SCRIPT_GID = 0
+    SUDOERS_DIR = '/etc/sudoers.d'
+    SUDOERS_MODE = 0o100440
+    SUDOERS_UID = 0
+    SUDOERS_GID = 0
+
+    def __init__(self, shortname, nrpe_opts=''):
+        self._nagios_hostname = nrpe.get_nagios_hostname()
+        self._nrpe_opts = nrpe_opts
+        self._shortname = shortname
+        self._files_dir = os.path.join(hookenv.charm_dir(),
+                                       'files',
+                                       self._shortname)
+        self._nrpe_script = 'check_{}'.format(shortname)
+
+    def _install_nrpe_plugin(self):
+        src = os.path.join(self._files_dir, self._nrpe_script)
+        dst = shutil.copy(src, self.NRPE_PLUGINS_DIR)
+        os.chmod(dst, self.NRPE_PLUGINS_MODE)
+        os.chown(dst, uid=self.NRPE_PLUGINS_UID, gid=self.NRPE_PLUGINS_GID)
+        hookenv.log('NRPE script for tool [{}] installed at as {}'
+                    ''.format(self._shortname, dst),
+                    hookenv.DEBUG)
+        return dst
+
+    def _remove_nrpe_plugin(self):
+        plugin_path = os.path.join(self.NRPE_PLUGINS_DIR, self._nrpe_script)
+        os.remove(plugin_path)
+        hookenv.log('deleted NRPE script for tool [{}]'
+                    ''.format(self._shortname),
+                    hookenv.DEBUG)
+
+    def configure_nrpe_check(self):
+        nrpe_setup = nrpe.NRPE(hostname=self._nagios_hostname, primary=False)
+        cmd = ' '.join([self._nrpe_script, self._nrpe_opts])
+        nrpe_setup.add_check(
+            shortname=self._shortname,
+            description='{} Hardware Health',
+            check_cmd=cmd
+        )
+        nrpe_setup.write()
+        hookenv.log('configured NRPE check for tool [{}]'
+                    ''.format(self._shortname),
+                    hookenv.DEBUG)
+
+    def remove_nrpe_check(self):
+        nrpe_setup = nrpe.NRPE(hostname=self._nagios_hostname, primary=False)
+        cmd = ' '.join([self._nrpe_script, self._nrpe_opts])
+        nrpe_setup.remove_check(
+            shortname=self._shortname,
+            check_cmd=cmd
+        )
+        nrpe_setup.write()
+        hookenv.log('removed NRPE check for tool [{}]'
+                    ''.format(self._shortname),
+                    hookenv.DEBUG)
+
+    def install(self):
+        self._install_nrpe_plugin()
+        hookenv.log('Installed tool [{}]'.format(self._shortname))
+
+    def remove(self):
+        self.remove_nrpe_check()
+        self._remove_nrpe_plugin()
+        hookenv.log('Removed tool [{}]'.format(self._shortname))
+
+
+class VendorTool(Tool):
+    '''
+    A Vendor tool is a statically linked binary we ship via a resource.
+    Since it generally requires root privileges to run, we also deploy a
+    cronjob that runs as root and saves the tool output in a temporary file
+    that nrpe can read (as nagios user).
+    '''
+    def __init__(self, shortname):
+        super().__init__(shortname=shortname)
+        self._cronjob_script = 'check_{}.sh'.format(shortname)
+        self._nrpe_script = 'check_{}.py'.format(shortname)
+
+    def install(self):
+        self._install_from_resource()
+        # TODO(aluria): install vendor utilities via snaps
+        # https://forum.snapcraft.io/t/request-for-classic-confinement-sas2ircu/9023
+        # Snap interface for hw tooling is not supported
+        # in strict mode (confined). Classic mode
+        # (unconfined snaps) need approval
+        # Install the cronjob that generates the temp output
+        self._install_cronjob()
+        super().install()
+
+    def remove(self):
+        self._remove_cronjob()
+        self._remove_binary()
+        super().remove()
+
+    def _install_from_resource(self):
+        resource = hookenv.resource_get('tools')
+        if resource:
+            hookenv.log('Installing tool [{}] from resource'
+                        ''.format(self._shortname),
+                        hookenv.DEBUG)
+            status_set('waiting', 'Installing from attached resource')
+            # Move in from a temp directory to be atomic
+            with TemporaryDirectory() as tmpdir:
+                try:
+                    with ZipFile(resource, 'r') as zipfile:
+                        tmpfile = zipfile.extract(self._shortname, tmpdir)
+                        # We could just use self.TOOLS_DIR as a destination
+                        # here, but shutil.move refuses to overwrite the
+                        # destination file unless it receives a full path
+                        dst = shutil.move(tmpfile,
+                                          os.path.join(self.TOOLS_DIR,
+                                                       self._shortname))
+                        os.chmod(dst, self.TOOLS_MODE)
+                        os.chown(dst, uid=self.TOOLS_UID, gid=self.TOOLS_GID)
+
+                except BadZipFile as error:
+                    hookenv.log('BadZipFile: {}'.format(error), hookenv.ERROR)
+
+                except PermissionError as error:
+                    hookenv.log('Unable to unzip tool {} from the provided '
+                                'resource: {}'.format(self._shortname, error),
+                                hookenv.ERROR)
+                except KeyError as error:
+                    hookenv.log('Tool {} not found in the provided resource: '
+                                '{}'.format(self._shortname, error),
+                                hookenv.ERROR)
+                    status_set('blocked',
+                               'Tool {} not found'.format(self._shortname))
+
+        else:
+            hookenv.log(
+                'Missing Juju resource: tools - alternative method is not '
+                'available yet', hookenv.ERROR)
+            status_set('blocked', 'Missing Juju resource: tools')
+
+    def _remove_binary(self):
+        binary_path = os.path.join(self.TOOLS_DIR, self._shortname)
+        os.remove(binary_path)
+        hookenv.log('Removed binary tool {}'.format(binary_path),
+                    hookenv.DEBUG)
+
+    def _install_cronjob(self):
+        # Copy the cronjob script to the nagios plugins directory
+        src = os.path.join(self._files_dir, self._cronjob_script)
+        dst = shutil.copy(src, self.NRPE_PLUGINS_DIR)
+        os.chmod(dst, self.CRONJOB_SCRIPT_MODE)
+        os.chown(dst,
+                 uid=self.CRONJOB_SCRIPT_UID,
+                 gid=self.CRONJOB_SCRIPT_GID)
+        hookenv.log('Cronjob script [{}] copied to {}'
+                    ''.format(self._cronjob_script, self.NRPE_PLUGINS_DIR),
+                    hookenv.DEBUG)
+        # Run it once to generate the temp file, otherwise the nrpe check might
+        # fail at first
+        subprocess.call(dst)
+        # Now generate the cronjob file
+        cronjob_line = '*/5 * * * * root {}\n'.format(dst)
+        crond_file = os.path.join(self.CROND_DIR, self._shortname)
+        with open(crond_file, 'w') as crond_fd:
+            crond_fd.write(cronjob_line)
+            hookenv.log('Cronjob configured at {}'.format(crond_file),
+                        hookenv.DEBUG)
+
+        return dst
+
+    def _remove_cronjob(self):
+        crond_file = os.path.join(self.CROND_DIR, self._shortname)
+        cronjob_script = os.path.join(self.NRPE_PLUGINS_DIR,
+                                      self._cronjob_script)
+        os.remove(crond_file)
+        os.remove(cronjob_script)
+        hookenv.log('Removed cronjob files [{}, {}]'
+                    ''.format(crond_file, cronjob_script),
+                    hookenv.DEBUG)
+
+
+class Sas3IrcuTool(VendorTool):
+    def __init__(self):
+        super().__init__(shortname='sas3ircu')
+
+
+class Sas2IrcuTool(VendorTool):
+    def __init__(self):
+        super().__init__(shortname='sas2ircu')
+
+
+class MegaCLITool(VendorTool):
+    def __init__(self):
+        super().__init__(shortname='megacli')
+
+
+class MdadmTool(VendorTool):
+    '''
+    Our mdadm check kind of behaves like a VendorTool for the purpose of
+    installation as it has a cronjob + check script
+    '''
+    def __init__(self):
+        shortname = 'mdadm'
+        super().__init__(shortname=shortname)
+        self._cronjob_script = 'cron_{}.py'.format(shortname)
+        self._nrpe_script = 'check_{}.py'.format(shortname)
+
+    def install(self):
+        # mdadm should already be installed, but let's check
+        fetch.apt_install(['mdadm'], fatal=True)
+        self._install_cronjob()
+        # No vendor binary to install
+        Tool.install(self)
+
+    def remove(self):
+        self._remove_cronjob()
+        # There's no vendor binary to remove
+        Tool.remove(self)
+
+
+class IpmiTool(Tool):
+    '''
+    The IPMI check does not rely on a vendor binary, it only uses freeipmi
+    (available via the main repos) and a nrpe check that is imported as a git
+    submodule
+    '''
+    def __init__(self, nrpe_opts=''):
+        super().__init__(shortname='ipmi',
+                         nrpe_opts=hookenv.config('ipmi_check_options'))
+        self._nrpe_script = 'check_ipmi_sensor'
+        self._sudoer_file = '99-check_ipmi_sensor.sudoer'
+
+    def install(self):
+        # Install the sudoer file
+        self._install_sudoer()
+        super().install()
+
+    def remove(self):
+        self._remove_sudoer()
+        super().remove()
+
+    def _install_sudoer(self):
+        '''
+        User nagios is normally not able to run ipmi-sel and ipmi-sensors, so
+        we need to add a sudoer entry for it
+        '''
+        src = os.path.join(self._files_dir, self._sudoer_file)
+        dst = shutil.copy(src, self.SUDOERS_DIR)
+        os.chmod(dst, self.SUDOERS_MODE)
+        os.chown(dst, uid=self.SUDOERS_UID, gid=self.SUDOERS_GID)
+        hookenv.log('sudoer file for tool [{}] installed at {}'
+                    ''.format(self._shortname, dst),
+                    hookenv.DEBUG)
+        return dst
+
+    def _remove_sudoer(self):
+        sudoer_path = os.path.join(self.SUDOERS_DIR, self._sudoer_file)
+        os.remove(sudoer_path)
+        hookenv.log('deleted sudoer file'.format(sudoer_path), hookenv.DEBUG)
+
+
+class HPTool(VendorTool):
+    # ['hponcfg', 'hp-health', 'hpssacli']
+    def __init__(self):
+        raise NotImplementedError
+
+
+class HPETool(VendorTool):
+    # ['hponcfg', 'ilorest']
+    def __init__(self):
+        raise NotImplementedError
diff --git a/src/lib/utils/hwdiscovery.py b/src/lib/hwhealth/hwdiscovery.py
similarity index 79%
rename from src/lib/utils/hwdiscovery.py
rename to src/lib/hwhealth/hwdiscovery.py
index 480fea2..ba949ef 100644
--- a/src/lib/utils/hwdiscovery.py
+++ b/src/lib/hwhealth/hwdiscovery.py
@@ -4,20 +4,38 @@ import os
 import re
 import subprocess
 
-# import sys
+from hwhealth import classes
 from charmhelpers.core import host, hookenv
 
 
-def get_tools():
+def get_tools(manufacturer='auto'):
+    if manufacturer == 'test':
+        # Return all possible tools to aid testing
+        return [
+            classes.MegaCLITool(),
+            classes.Sas2IrcuTool(),
+            classes.Sas3IrcuTool(),
+            # classes.HPETool(), # NotImplementedYet
+            # classes.HPTool(),  # NotImplementedYet
+            classes.MdadmTool(),
+            classes.IpmiTool(),
+        ]
+    elif manufacturer == 'auto':
+        return _get_tools()
+    else:
+        raise NotImplementedError
+
+
+def _get_tools():
     tools = set()
 
     # MegaRAID SAS
     if len(glob.glob('/sys/bus/pci/drivers/megaraid_sas/00*')) > 0:
-        tools.add('megacli')
+        tools.add(classes.MegaCLITool())
 
     # Huawei LSI SAS card
     if len(glob.glob('/sys/bus/pci/drivers/mpt2sas/00*')) > 0:
-        tools.add('sas2ircu')
+        tools.add(classes.Sas2IrcuTool())
 
     # New Huawei drivers, but also Supermicro LSI SAS cards
     pci_devs = glob.glob('/sys/bus/pci/drivers/mpt3sas/00*')
@@ -28,24 +46,24 @@ def get_tools():
             with open(board) as fd:
                 board_name = fd.readline().strip()
                 if board_name == 'LSISAS2308':
-                    tools.add('sas2ircu')
+                    tools.add(classes.Sas2IrcuTool())
                 else:
                     # LSI3008-IR
-                    tools.add('sas3ircu')
+                    tools.add(classes.Sas3IrcuTool())
 
     # HPE (Gen10 and newer) or HP (older)
     hw_vendor = dmidecode('system-manufacturer')
     if hw_vendor == 'HPE':
-        tools.add('hpe')  # ['hponcfg', 'ilorest']
+        tools.add(classes.HPETool())
     elif hw_vendor == 'HP':
-        tools.add('hp')  # ['hponcfg', 'hp-health', 'hpssacli']
+        tools.add(classes.HPTool())
 
     # SW RAID?
     if _supports_mdadm():
-        tools.add('mdadm')
+        tools.add(classes.MdadmTool())
 
     if hookenv.config('enable_ipmi'):
-        tools.add('ipmi')
+        tools.add(classes.IpmiTool())
 
     return tools
 
diff --git a/src/lib/utils/tooling.py b/src/lib/utils/tooling.py
deleted file mode 100644
index 40b140c..0000000
--- a/src/lib/utils/tooling.py
+++ /dev/null
@@ -1,225 +0,0 @@
-# -*- coding: us-ascii -*-
-import glob
-import os
-import shutil
-import subprocess
-import tempfile
-import zipfile
-
-from charmhelpers.contrib.charmsupport import nrpe
-from charmhelpers.core import hookenv
-
-TOOLING = {
-    'megacli': {
-        'snap': 'megacli',
-        'filename': 'megaraid/check_megacli.py',
-        'cronjob': 'megaraid/check_megacli.sh'
-    },
-    'sas2ircu': {
-        'snap': 'sas2ircu',
-        'filename': 'sas2ircu/check_sas2ircu.py',
-        'cronjob': 'sas2ircu/check_sas2ircu.sh'
-    },
-    'sas3ircu': {
-        'snap': 'sas3ircu',
-        'filename': 'sas3ircu/check_sas3ircu.py',
-        'cronjob': 'sas3ircu/check_sas3ircu.sh'
-    },
-    'mdadm': {
-        'filename': 'mdadm/check_mdadm.py',
-        'cronjob': 'mdadm/cron_mdadm.py'
-    },
-    'ipmi': {
-        'filename': 'ipmi/check_ipmi_sensor',
-        'sudoers': 'ipmi/check_ipmi_sensor_sudoer'
-    },
-}
-
-PLUGINS_DIR = '/usr/local/lib/nagios/plugins/'
-TOOLS_DIR = '/usr/local/bin/'
-CRONJOB_PATH = '/etc/cron.d/hw_health_{}'
-SUDOERS_DIR = '/etc/sudoers.d'
-
-
-def install_resource(tools, resource):
-    """Install hardware diagnostic tools from a charm resource
-
-    :param tools: set of tools to include
-    :param resource: resource object
-    :return: boolean, status of installation
-    """
-    if not isinstance(tools, set):
-        try:
-            tools = set(tools)
-        except TypeError as e:
-            hookenv.log("install_resources called with invalid 'tools', {}".format(e))
-            return False
-    if not tools:
-        return False
-    elif tools & {'mdadm', 'ipmi'}:
-        tools = tools - {'mdadm', 'ipmi'}
-        if not tools:
-            return True
-
-    if not isinstance(resource, str) \
-            or not resource.endswith('.zip') \
-            or not os.path.exists(resource):
-        hookenv.log("The resource 'tools' does not end with .zip or does not exist")
-        return False
-
-    with tempfile.TemporaryDirectory() as tmpdir:
-        try:
-            with zipfile.ZipFile(resource, 'r') as zf:
-                zf.extractall(tmpdir)
-
-            count = 0
-            for filepath in glob.glob(os.path.join(tmpdir, '*')):
-                filebasename = os.path.basename(filepath)
-                if filebasename in tools \
-                        and filebasename in TOOLING \
-                        and os.path.isfile(filepath):
-                    os.chmod(filepath, 0o755)
-                    shutil.chown(filepath, user=0, group=0)
-                    shutil.copy2(filepath, TOOLS_DIR)
-                    count += 1
-
-            return len(tools) == count and count > 0
-
-        except zipfile.BadZipFile as error:
-            hookenv.log('BadZipFile: {}'.format(error), hookenv.ERROR)
-
-        except PermissionError as error:
-            hookenv.log(
-                'Unable to unzip the resource: {}'.format(error), hookenv.ERROR)
-
-    return False
-
-
-def install_tools(tools, method='snap'):
-    """Install hardware management tools from a snap
-
-    :param tools: list of tools to install
-    :param method: default to snap if unspecified, other means have yet to be added
-    :return:
-    """
-    count = 0
-    for tool in tools:
-        if tool in TOOLING and method in TOOLING[tool]:
-            if method == 'snap':
-                try:
-                    # snap.install(TOOLING[tool][method])
-                    count += 1
-                except Exception as error:
-                    hookenv.log(
-                        'Snap install error: {}'.format(error), hookenv.ERROR)
-        elif tool == 'mdadm':
-            # Note(aluria): mdadm is assumed to be installed by default
-            count += 1
-
-    return len(tools) >= count > 0
-
-
-def _get_filepath(filename):
-    filepath = os.path.join(hookenv.charm_dir(), 'files', filename)
-    if os.path.exists(filepath):
-        return filepath
-
-
-def configure_tools(tools):
-    count = 0
-    for tool in tools:
-        if tool in TOOLING:
-            if 'cronjob' in TOOLING[tool]:
-                filepath = _get_filepath(TOOLING[tool]['cronjob'])
-                cronjob_scriptname = os.path.basename(TOOLING[tool]['cronjob'])
-                if filepath:
-                    subprocess.call(filepath)
-                    shutil.copy2(filepath, PLUGINS_DIR)
-                    hookenv.log(
-                        'Cronjob script [{}] copied to {}'.format(
-                            filepath, PLUGINS_DIR), hookenv.DEBUG)
-                else:
-                    continue
-                cronjob_file = CRONJOB_PATH.format(tool)
-                cronjob_line = '*/5 * * * * root {}\n'
-                cronjob_script = os.path.join(PLUGINS_DIR, cronjob_scriptname)
-                with open(cronjob_file, 'w') as fd:
-                    fd.write(cronjob_line.format(cronjob_script))
-                    hookenv.log(
-                        'Cronjob configured at {}'.format(cronjob_file),
-                        hookenv.DEBUG)
-                count += 1
-                hookenv.log('Cronjob for tool [{}] configured'.format(tool))
-
-            if 'filename' in TOOLING[tool]:
-                filepath = _get_filepath(TOOLING[tool]['filename'])
-                if filepath:
-                    shutil.copy2(filepath, PLUGINS_DIR)
-                    count += 1
-                    hookenv.log(
-                        'NRPE script for tool [{}] configured'.format(tool))
-            if 'sudoers' in TOOLING[tool]:
-                sudoers_path = _get_filepath(TOOLING[tool]['sudoers'])
-                if sudoers_path:
-                    shutil.copy2(sudoers_path, SUDOERS_DIR)
-                    count += 1
-                    hookenv.log('sudoers entry for tool [{}] added'.format(tool))
-    return count > 0
-
-
-def configure_cronjobs(tools):
-    pass
-
-
-def configure_nrpe_checks(tools):
-    if not configure_tools(tools):
-        hookenv.log(
-            "No tools configured. NRPE checks can't be configured",
-            hookenv.DEBUG)
-        return False
-
-    count = 0
-    hostname = nrpe.get_nagios_hostname()
-    nrpe_setup = nrpe.NRPE(hostname=hostname, primary=False)
-    for tool in tools:
-        if tool in TOOLING and 'filename' in TOOLING[tool]:
-            scriptname = os.path.basename(TOOLING[tool]['filename'])
-            cmd = os.path.join(PLUGINS_DIR, scriptname)  # TODO args to the command?
-            if os.path.exists(cmd):
-                nrpe_setup.add_check(tool, '{} Hardware Health', cmd)
-                nrpe_setup.write()
-                count += 1
-        else:
-            cronjob_file = os.path.join('/etc/cron.d', tool)
-            if os.path.exists(cronjob_file):
-                os.unlink(cronjob_file)
-                hookenv.log(
-                    'Cronjob file [{}] removed'.format(cronjob_file),
-                    hookenv.DEBUG)
-
-    return count > 0
-
-
-def remove_nrpe_checks(tools):
-    if len(tools) == 0:
-        return False
-
-    hostname = nrpe.get_nagios_hostname()
-    nrpe_setup = nrpe.NRPE(hostname=hostname, primary=False)
-    count = 0
-    for tool in tools:
-        if tool not in TOOLING:
-            continue
-        nrpe_setup.remove_check(
-            shortname=tool, check_cmd='check_{}'.format(tool))
-
-        if os.path.exists(CRONJOB_PATH.format(tool)):
-            os.remove(CRONJOB_PATH.format(tool))
-
-        hookenv.log('Check removed from nrpe: {}'.format(tool))
-        count += 1
-
-    if count > 0:
-        nrpe_setup.write()
-        return True
-    return False
diff --git a/src/reactive/hw_health.py b/src/reactive/hw_health.py
index d1a0fc6..941b33a 100644
--- a/src/reactive/hw_health.py
+++ b/src/reactive/hw_health.py
@@ -2,99 +2,96 @@ import os
 
 from charmhelpers.core import hookenv, host, unitdata
 from charms.layer import status
-from charms.reactive import (
-    clear_flag,
+from charms.reactive.flags import (
     set_flag,
+    clear_flag,
+)
+from charms.reactive import (
+    hook,
     when,
     when_none,
     when_not,
 )
-from utils.hwdiscovery import get_tools
-from utils.tooling import (
-    configure_nrpe_checks,
-    install_resource,
-    remove_nrpe_checks,
-)
+from hwhealth.hwdiscovery import get_tools
+from hwhealth import classes
 
 
 @when_none('hw-health.installed', 'hw-health.unsupported')
+@when('general-info.connected')
 def install():
-    if host.is_container():
+    manufacturer = hookenv.config('manufacturer')
+    if host.is_container() and manufacturer != 'test':
         status.blocked('Containers are not supported')
         set_flag('hw-health.unsupported')
         return
 
-    config = hookenv.config()
-    if config.get('manufacturer', '') != 'auto':
+    if manufacturer not in ['auto', 'test']:
         status.blocked('manufacturer needs to be set to auto')
         return
 
+    # Detect hardware and return a list of tools we need to use
     status.maintenance('Autodiscovering hardware')
-    tools = get_tools()
+    tools = get_tools(manufacturer)
     if not tools:
-        set_flag('hw-health.unsupported')
         status.blocked('Hardware not supported')
+        set_flag('hw-health.unsupported')
     else:
-        resource = hookenv.resource_get('tools')
-        if resource:
-            hookenv.log('Installing from resource')
-            status.waiting('Installing from attached resource')
-            installed = install_resource(tools, resource)
-        else:
-            hookenv.log(
-                'Missing Juju resource: tools - alternative method is not'
-                ' available yet', hookenv.ERROR)
-            status.blocked('Missing Juju resource: tools')
-            return
-            # TODO(aluria): install vendor utilities via snaps
-            # https://forum.snapcraft.io/t/request-for-classic-confinement-sas2ircu/9023
-            # Snap interface for hw tooling is not supported
-            # in strict mode (confined). Classic mode
-            # (unconfined snaps) need approval
-            #
-            # hookenv.log('Installing from snap')
-            # status.waiting('Installing from snap')
-            # installed = install_tools(tools, method='snap')
-
-        if installed:
-            kv = unitdata.kv()
-            kv.set('tools', list(tools))
-            set_flag('hw-health.installed')
-            status.waiting('Preparing scripts installation')
-        else:
-            missing_tools = tools - {'mdadm', 'ipmi'}
-            status.blocked('Tools not found: {}'.format(', '.join(missing_tools)))
+        tool_list = list()
+        for tool in tools:
+            tool.install()
+            # Save the class name in the unit kv db. This will be reused when
+            # reconfiguring or removing the checks
+            tool_list.append(type(tool).__name__)
+        unitdb = unitdata.kv()
+        unitdb.set('tools', tool_list)
+        set_flag('hw-health.installed')
+
+
+@hook('upgrade-charm')
+def upgrade():
+    clear_flag('hw-health.installed')
+    clear_flag('hw-health.unsupported')
+    clear_flag('hw-health.configured')
+    status.maintenance('Charm upgrade in progress')
 
 
 @when('hw-health.installed')
-@when('nrpe-external-master.available')
-def remove_checks(nrpe):
-    if hookenv.hook_name() == 'stop':
-        # Note(aluria): Remove nrpe check(s)
-        kv = unitdata.kv()
-        tools = kv.get('tools', set())
-        removed = remove_nrpe_checks(tools)
-        if removed:
-            nrpe.updated()
+@when_not('general-info.available')
+def remove_tools():
+    unitdb = unitdata.kv()
+    for tool_class_name in unitdb.get('tools', set()):
+        # Re-instantiate the tool from the saved class name
+        tool_class = getattr(classes, tool_class_name)
+        tool_class().remove()
+    clear_flag('hw-health.installed')
+    clear_flag('hw-health.unsupported')
+    clear_flag('hw-health.configured')
 
 
 @when('config.changed')
+@when_not('config.changed.manufacturer')
 def config_changed():
-    # clear_flag('hw-health.configured')
-    clear_flag('hw-health.installed')
-    clear_flag('hw-health.unsupported')
-    clear_flag('hw-health.nrpe')
+    clear_flag('hw-health.configured')
+
+
+@when('config.changed.manufacturer')
+def toolset_changed():
+    # Changing the manufacturer option will trigger a reinstallation of the
+    # tools
+    remove_tools()
+    status.maintenance('Reinstallation of tools in progress')
 
 
 @when('hw-health.installed')
 @when_not('nrpe-external-master.available')
+@when_not('hw-health.configured')
 def blocked_on_nrpe():
     status.blocked('Missing relations: nrpe-external-master')
 
 
 @when('hw-health.installed')
 @when('nrpe-external-master.available')
-@when_not('hw-health.nrpe')
+@when_not('hw-health.configured')
 def configure_nrpe():
     if not os.path.exists('/var/lib/nagios'):
         status.waiting('Waiting for nrpe package installation')
@@ -102,12 +99,23 @@ def configure_nrpe():
 
     status.maintenance('Configuring nrpe checks')
 
-    kv = unitdata.kv()
-    tools = kv.get('tools', set())
+    unitdb = unitdata.kv()
+    for tool_class_name in unitdb.get('tools', set()):
+        # Re-instantiate the tool from the saved class name
+        tool_class = getattr(classes, tool_class_name)
+        tool_class().configure_nrpe_check()
+    status.active('ready')
+    set_flag('hw-health.configured')
 
-    configured = configure_nrpe_checks(tools)
-    if configured:
-        status.active('ready')
-        set_flag('hw-health.nrpe')
-    else:
-        status.waiting('Missing tools to monitor hw health')
+
+@when('hw-health.installed')
+@when_not('nrpe-external-master.available')
+@when('hw-health.configured')
+def remove_nrpe_checks():
+    status.maintenance('Removing nrpe checks')
+    unitdb = unitdata.kv()
+    for tool_class_name in unitdb.get('tools', set()):
+        # Re-instantiate the tool from the saved class name
+        tool_class = getattr(classes, tool_class_name)
+        tool_class().remove_nrpe_check()
+    clear_flag('hw-health.configured')
diff --git a/src/tests/download_nagios_plugin3.py b/src/tests/download_nagios_plugin3.py
index 0505ec8..173fa31 100755
--- a/src/tests/download_nagios_plugin3.py
+++ b/src/tests/download_nagios_plugin3.py
@@ -18,7 +18,7 @@ def content():
 
 
 def main():
-    for i in glob('.tox/py3/lib/python3*'):
+    for i in glob('.tox/unit/lib/python3*'):
         mod_path = os.path.join(i, MODULE_NAME)
         if os.path.isdir(i) and not os.path.exists(mod_path):
             open(mod_path, 'wb').write(content())
diff --git a/src/tests/functional/conftest.py b/src/tests/functional/conftest.py
new file mode 100644
index 0000000..cbc7d9f
--- /dev/null
+++ b/src/tests/functional/conftest.py
@@ -0,0 +1,179 @@
+#!/usr/bin/python3
+'''
+Reusable pytest fixtures for functional testing
+
+Environment variables
+---------------------
+
+test_preserve_model:
+if set, the testing model won't be torn down at the end of the testing session
+'''
+
+import asyncio
+import json
+import os
+import uuid
+import pytest
+import juju
+from juju.controller import Controller
+from juju.errors import JujuError
+
+STAT_CMD = '''python3 - <<EOF
+import json
+import os
+
+s = os.stat('%s')
+stat_hash = {
+    'uid': s.st_uid,
+    'gid': s.st_gid,
+    'mode': oct(s.st_mode),
+    'size': s.st_size
+}
+stat_json = json.dumps(stat_hash)
+print(stat_json)
+
+EOF
+'''
+
+
+@pytest.yield_fixture(scope='module')
+def event_loop():
+    '''Override the default pytest event loop to allow for fixtures using a
+    broader scope'''
+    loop = asyncio.get_event_loop_policy().new_event_loop()
+    asyncio.set_event_loop(loop)
+    loop.set_debug(True)
+    yield loop
+    loop.close()
+    asyncio.set_event_loop(None)
+
+
+@pytest.fixture(scope='module')
+async def controller():
+    '''Connect to the current controller'''
+    _controller = Controller()
+    await _controller.connect_current()
+    yield _controller
+    await _controller.disconnect()
+
+
+@pytest.fixture(scope='module')
+async def model(controller):  # pylint: disable=redefined-outer-name
+    '''This model lives only for the duration of the test'''
+    model_name = "functest-{}".format(uuid.uuid4())
+    _model = await controller.add_model(model_name)
+    yield _model
+    await _model.disconnect()
+    if not os.getenv('test_preserve_model'):
+        await controller.destroy_model(model_name)
+        while model_name in await controller.list_models():
+            await asyncio.sleep(1)
+
+
+@pytest.fixture()
+async def get_app(model):  # pylint: disable=redefined-outer-name
+    '''Returns the application requested'''
+    async def _get_app(name):
+        try:
+            return model.applications[name]
+        except KeyError:
+            raise JujuError("Cannot find application {}".format(name))
+    return _get_app
+
+
+@pytest.fixture()
+async def get_unit(model):  # pylint: disable=redefined-outer-name
+    '''Returns the requested <app_name>/<unit_number> unit'''
+    async def _get_unit(name):
+        try:
+            (app_name, unit_number) = name.split('/')
+            return model.applications[app_name].units[unit_number]
+        except (KeyError, ValueError):
+            raise JujuError("Cannot find unit {}".format(name))
+    return _get_unit
+
+
+@pytest.fixture()
+async def get_entity(get_unit, get_app):  # pylint: disable=redefined-outer-name
+    '''Returns a unit or an application'''
+    async def _get_entity(name):
+        try:
+            return await get_unit(name)
+        except JujuError:
+            try:
+                return await get_app(name)
+            except JujuError:
+                raise JujuError("Cannot find entity {}".format(name))
+    return _get_entity
+
+
+@pytest.fixture
+async def run_command(get_unit):  # pylint: disable=redefined-outer-name
+    '''
+    Runs a command on a unit.
+
+    :param cmd: Command to be run
+    :param target: Unit object or unit name string
+    '''
+    async def _run_command(cmd, target):
+        unit = (
+            target
+            if isinstance(target, juju.unit.Unit)
+            else await get_unit(target)
+        )
+        action = await unit.run(cmd)
+        return action.results
+    return _run_command
+
+
+@pytest.fixture
+async def file_stat(run_command):  # pylint: disable=redefined-outer-name
+    '''
+    Runs stat on a file
+
+    :param path: File path
+    :param target: Unit object or unit name string
+    '''
+    async def _file_stat(path, target):
+        cmd = STAT_CMD % path
+        results = await run_command(cmd, target)
+        if results['Code'] == '0':
+            return json.loads(results['Stdout'])
+        else:
+            # A common possible error is simply ENOENT, the file ain't there.
+            # A better solution would be to retrieve the exception that the
+            # remote python code raised, but that would probably require a real
+            # RPC setup
+            raise RuntimeError('Stat failed: {}'.format(results))
+
+    return _file_stat
+
+
+@pytest.fixture
+async def file_contents(run_command):  # pylint: disable=redefined-outer-name
+    '''
+    Returns the contents of a file
+
+    :param path: File path
+    :param target: Unit object or unit name string
+    '''
+    async def _file_contents(path, target):
+        cmd = 'cat {}'.format(path)
+        results = await run_command(cmd, target)
+        return results['Stdout']
+    return _file_contents
+
+
+@pytest.fixture
+async def reconfigure_app(get_app, model):  # pylint: disable=redefined-outer-name
+    '''Applies a different config to the requested app'''
+    async def _reconfigure_app(cfg, target):
+        application = (
+            target
+            if isinstance(target, juju.application.Application)
+            else await get_app(target)
+        )
+        await application.set_config(cfg)
+        await application.get_config()
+        await model.block_until(lambda: application.status == 'active')
+    return _reconfigure_app
diff --git a/src/tests/functional/requirements.txt b/src/tests/functional/requirements.txt
new file mode 100644
index 0000000..193a938
--- /dev/null
+++ b/src/tests/functional/requirements.txt
@@ -0,0 +1,7 @@
+flake8
+juju
+mock
+pytest
+pytest-asyncio
+requests
+charmhelpers
diff --git a/src/tests/functional/test_hwhealth.py b/src/tests/functional/test_hwhealth.py
new file mode 100644
index 0000000..dc31368
--- /dev/null
+++ b/src/tests/functional/test_hwhealth.py
@@ -0,0 +1,238 @@
+import os
+import pytest
+import sys
+import subprocess
+import asyncio
+sys.path.append('lib')
+from hwhealth import hwdiscovery  # noqa: E402
+from hwhealth import classes      # noqa: E402
+
+# Treat all tests as coroutines
+pytestmark = pytest.mark.asyncio
+SERIES = ['xenial', 'bionic']
+juju_repository = os.getenv('JUJU_REPOSITORY', '.').rstrip('/')
+nrpecfg_dir = '/etc/nagios/nrpe.d'
+
+
+###################
+# Custom fixtures #
+###################
+
+@pytest.fixture(scope='module',
+                params=SERIES)
+async def deploy_app(request, model):
+    '''Deploys the hw-health charm as a subordinate of ubuntu'''
+    # TODO: this might look nicer if we deployed a bundle instead. It could be
+    # a jinja template to handle the parametrization
+    release = request.param
+    channel = 'stable'
+    hw_health_app_name = 'hw-health-' + release
+
+    for principal_app in ['ubuntu', 'nagios']:
+        await model.deploy(
+            principal_app,
+            application_name='{}-{}'.format(principal_app, release),
+            series=release,
+            channel=channel,
+        )
+    nrpe_app = await model.deploy(
+        'nrpe',
+        application_name='nrpe-' + release,
+        series=release,
+        num_units=0,
+        channel=channel,
+    )
+    await nrpe_app.add_relation(
+        'general-info',
+        'ubuntu-{}:juju-info'.format(release)
+    )
+    await nrpe_app.add_relation(
+        'monitors',
+        'nagios-{}:monitors'.format(release)
+    )
+
+    # Attaching resources is not implemented yet in libjuju
+    # see https://github.com/juju/python-libjuju/issues/294
+    tools_res_path = os.path.join(os.getenv('JUJU_REPOSITORY'), 'tools.zip')
+    subprocess.check_call([
+        'juju',
+        'deploy',
+        '-m',
+        model.info.name,
+        os.path.join(os.getenv('JUJU_REPOSITORY'), 'builds', 'hw-health'),
+        hw_health_app_name,
+        '--resource',
+        'tools={}'.format(tools_res_path),
+    ])
+    # This is pretty horrible, but we can't deploy via libjuju
+    while True:
+        try:
+            hw_health_app = model.applications[hw_health_app_name]
+            break
+        except KeyError:
+            await asyncio.sleep(5)
+
+    await hw_health_app.add_relation(
+        'general-info',
+        'ubuntu-{}:juju-info'.format(release)
+    )
+    await hw_health_app.add_relation(
+        'nrpe-external-master',
+        nrpe_app.name + ':nrpe-external-master'
+    )
+    # The app will initially be in blocked state because it's running in a
+    # container
+    await model.block_until(lambda: hw_health_app.status == 'blocked')
+    yield hw_health_app
+
+
+@pytest.fixture(scope='module')
+async def deployed_unit(deploy_app):
+    '''Returns the hw-health unit we've deployed'''
+    return deploy_app.units.pop()
+
+
+@pytest.fixture(scope='function')
+async def tools(monkeypatch):
+    '''All tool classes know which files should be installed and how, so we can
+    use them to read the expected stat results. Monkeypatching is however
+    required as the classes code is not expected to be run outside of a
+    deployed charm'''
+    with monkeypatch.context() as m:
+        m.setattr('charmhelpers.core.hookenv.charm_dir',
+                  lambda: os.getenv('JUJU_REPOSITORY'))
+        m.setattr('charmhelpers.core.hookenv.config',
+                  lambda x=None: dict())
+        m.setattr('charmhelpers.contrib.charmsupport.nrpe.get_nagios_hostname',
+                  lambda: 'pytest')
+        return hwdiscovery.get_tools('test')
+
+#########
+# Tests #
+#########
+
+
+async def test_cannot_run_in_container(deploy_app):
+    assert deploy_app.status == 'blocked'
+
+
+async def test_forced_deploy(deploy_app, model):
+    await deploy_app.set_config({'manufacturer': 'test'})
+    await model.block_until(lambda: deploy_app.status == 'active')
+    assert deploy_app.status == 'active'
+
+
+async def test_stats(tools, deployed_unit, file_stat):
+    # This should really be a parametrized test, but fixtures cannot be used as
+    # params value as if they were iterators
+    # It should also check for other installed files and differentiate between
+    # tool types (e.g. IpmiTool does not use a vendor binary)
+    for tool in tools:
+        # Have we rendered the nrpe check cfg?
+        nrpecfg_path = os.path.join(nrpecfg_dir,
+                                    'check_{}.cfg'.format(tool._shortname))
+        print('Checking {}'.format(nrpecfg_path))
+        test_stat = await file_stat(nrpecfg_path, deployed_unit)
+        assert test_stat['size'] > 0
+
+        # Have we installed the nrpe check script?
+        nrpescript_path = os.path.join(tool.NRPE_PLUGINS_DIR,
+                                       tool._nrpe_script)
+        print('Checking {}'.format(nrpescript_path))
+        test_stat = await file_stat(nrpescript_path, deployed_unit)
+        assert test_stat['size'] > 0
+        assert test_stat['gid'] == tool.NRPE_PLUGINS_GID
+        assert test_stat['uid'] == tool.NRPE_PLUGINS_UID
+        assert test_stat['mode'] == oct(tool.NRPE_PLUGINS_MODE)
+        if isinstance(tool, classes.IpmiTool):
+            # Have we added sudo rights for running freeipmi commands?
+            sudoer_path = os.path.join(tool.SUDOERS_DIR, tool._sudoer_file)
+            print('Checking {}'.format(sudoer_path))
+            test_stat = await file_stat(sudoer_path, deployed_unit)
+            assert test_stat['size'] > 0
+            assert test_stat['gid'] == tool.SUDOERS_GID
+            assert test_stat['uid'] == tool.SUDOERS_UID
+            assert test_stat['mode'] == oct(tool.SUDOERS_MODE)
+        elif isinstance(tool, classes.VendorTool):
+            # Have we installed the cronjob script helper?
+            cronjob_script_path = os.path.join(tool.NRPE_PLUGINS_DIR,
+                                               tool._cronjob_script)
+            print('Checking {}'.format(cronjob_script_path))
+            test_stat = await file_stat(cronjob_script_path, deployed_unit)
+            assert test_stat['size'] > 0
+            assert test_stat['gid'] == tool.CRONJOB_SCRIPT_GID
+            assert test_stat['uid'] == tool.CRONJOB_SCRIPT_UID
+            assert test_stat['mode'] == oct(tool.CRONJOB_SCRIPT_MODE)
+
+            # Have we installed the cronjob itself?
+            cronjob_path = os.path.join(tool.CROND_DIR, tool._shortname)
+            print('Checking {}'.format(cronjob_path))
+            test_stat = await file_stat(cronjob_path, deployed_unit)
+            assert test_stat['size'] > 0
+
+            # Have we installed the vendor binary?
+            if isinstance(tool, classes.MdadmTool):
+                bin_path = os.path.join('/sbin', tool._shortname)
+            else:
+                bin_path = os.path.join(tool.TOOLS_DIR, tool._shortname)
+            print('Checking {}'.format(bin_path))
+            test_stat = await file_stat(bin_path, deployed_unit)
+            assert test_stat['size'] > 0
+            assert test_stat['gid'] == tool.TOOLS_GID
+            assert test_stat['uid'] == tool.TOOLS_UID
+            assert test_stat['mode'] == oct(tool.TOOLS_MODE)
+
+
+async def test_removal(tools, model, deploy_app, file_stat):
+    '''Remove the unit, test that all files have been cleaned up'''
+    hw_health_app_name = deploy_app.name
+    await deploy_app.remove()
+    await model.block_until(
+        lambda: hw_health_app_name not in model.applications
+    )
+    # Since we've removed the hw-health app, we can't target it anymore, we
+    # need to find the principal unit
+    for unit in model.units.values():
+        if unit.entity_id.startswith('ubuntu'):
+            ubuntu_unit = unit
+    for tool in tools:
+        # Have we removed the nrpe check cfg?
+        nrpecfg_path = os.path.join(nrpecfg_dir,
+                                    'check_{}.cfg'.format(tool._shortname))
+        print('Checking {}'.format(nrpecfg_path))
+        with pytest.raises(RuntimeError):
+            await file_stat(nrpecfg_path, ubuntu_unit)
+
+        # Have we removed the nrpe check script?
+        nrpescript_path = os.path.join(tool.NRPE_PLUGINS_DIR,
+                                       tool._nrpe_script)
+        print('Checking {}'.format(nrpescript_path))
+        with pytest.raises(RuntimeError):
+            await file_stat(nrpescript_path, ubuntu_unit)
+        if isinstance(tool, classes.IpmiTool):
+            # Have we removed sudo rights for running freeipmi commands?
+            sudoer_path = os.path.join(tool.SUDOERS_DIR, tool._sudoer_file)
+            print('Checking {}'.format(sudoer_path))
+            with pytest.raises(RuntimeError):
+                await file_stat(sudoer_path, ubuntu_unit)
+        elif isinstance(tool, classes.VendorTool):
+            # Have we removed the cronjob script helper?
+            cronjob_path = os.path.join(tool.NRPE_PLUGINS_DIR,
+                                        tool._cronjob_script)
+            print('Checking {}'.format(cronjob_path))
+            with pytest.raises(RuntimeError):
+                await file_stat(cronjob_path, ubuntu_unit)
+
+            # Have we removed the cronjob itself?
+            cronjob_path = os.path.join(tool.CROND_DIR, tool._shortname)
+            print('Checking {}'.format(cronjob_path))
+            with pytest.raises(RuntimeError):
+                await file_stat(cronjob_path, ubuntu_unit)
+
+            if not isinstance(tool, classes.MdadmTool):
+                # /sbin/mdadm will not be removed, but the vendor binaries
+                # should have been
+                bin_path = os.path.join(tool.TOOLS_DIR, tool._shortname)
+                print('Checking {}'.format(bin_path))
+                with pytest.raises(RuntimeError):
+                    await file_stat(bin_path, ubuntu_unit)
diff --git a/src/tests/unit/test_check_megacli.py b/src/tests/unit/test_check_megacli.py
index 3fc9b6b..d2e21c4 100644
--- a/src/tests/unit/test_check_megacli.py
+++ b/src/tests/unit/test_check_megacli.py
@@ -4,7 +4,7 @@ import sys
 import unittest
 import unittest.mock as mock
 
-sys.path.append('files/megaraid')
+sys.path.append('files/megacli')
 import check_megacli  # noqa: E402
 
 
diff --git a/src/tests/unit/test_hwdiscovery.py b/src/tests/unit/test_hwdiscovery.py
index 7cb516e..9c184b7 100644
--- a/src/tests/unit/test_hwdiscovery.py
+++ b/src/tests/unit/test_hwdiscovery.py
@@ -5,54 +5,83 @@ import sys
 import unittest
 import unittest.mock as mock
 
-sys.path.append('lib/utils')
+sys.path.append('lib/hwhealth')
 import hwdiscovery  # noqa: E402
 
 
-class TestHWDiscovery(unittest.TestCase):
-    @mock.patch('hwdiscovery._supports_mdadm')
-    @mock.patch('hwdiscovery.dmidecode')
-    @mock.patch('glob.glob')
-    @mock.patch('charmhelpers.core.hookenv.config')
-    def test_get_tools_megacli(self, mock_hookenv, driver, dmidecode, supports_mdadm):
+class TestGetTools(unittest.TestCase):
+    def setUp(self):
+        patchers = list()
+        # Mock all Tool classes
+        hwhealth_classes = [
+            'HPETool',
+            'HPTool',
+            'MdadmTool',
+            'IpmiTool',
+            'Sas2IrcuTool',
+            'Sas3IrcuTool',
+            'MegaCLITool'
+        ]
+        for hwhealth_class in hwhealth_classes:
+            patchers.append(mock.patch('hwdiscovery.classes.{}'
+                                       ''.format(hwhealth_class),
+                                       autospec=True))
+
+        # Provide test defaults that should make get_tools detect no tool
+        # support. Specific hardware will be mocked in the specific tests
+
+        # Mock the hookenv; disable IPMI support
+        def config_return_values(value):
+            if value == 'enable_ipmi':
+                return False
+            else:
+                return True
+
+        patchers.append(mock.patch('hwdiscovery.hookenv.config',
+                                   autospec=True,
+                                   side_effect=config_return_values))
+        # Disable mdadm support
+        patchers.append(mock.patch('hwdiscovery._supports_mdadm',
+                                   return_value=False))
+        # Assume a dummy 'unittest' hardware model
+        patchers.append(mock.patch('hwdiscovery.dmidecode',
+                                   return_value='unittest'))
+
+        # Start all mocks
+        for patcher in patchers:
+            patcher.start()
+
+        # Clean up all mocks at the end of the testrun
+        self.addCleanup(mock.patch.stopall)
+
+    @mock.patch('hwdiscovery.glob.glob')
+    def test_get_tools_megacli(self, driver):
         def side_effect_glob(value):
             if value == '/sys/bus/pci/drivers/megaraid_sas/00*':
                 return ['/...megaraid']
             else:
                 return []
 
-        supports_mdadm.return_value = False
-        dmidecode.return_value = 'unittest'
         driver.side_effect = side_effect_glob
-        mock_hookenv.return_value = True
         real = hwdiscovery.get_tools()
-        expected = set(['megacli', 'ipmi'])
+        expected = set([hwdiscovery.classes.MegaCLITool()])
         self.assertEqual(real, expected)
 
-    @mock.patch('hwdiscovery._supports_mdadm')
-    @mock.patch('hwdiscovery.dmidecode')
-    @mock.patch('glob.glob')
-    @mock.patch('charmhelpers.core.hookenv.config')
-    def test_get_tools_sas2ircu(self, mock_hookenv, driver, dmidecode, supports_mdadm):
+    @mock.patch('hwdiscovery.glob.glob')
+    def test_get_tools_sas2ircu(self, driver):
         def side_effect_glob(value):
             if value == '/sys/bus/pci/drivers/mpt2sas/00*':
                 return ['/...mp2sas']
             else:
                 return []
 
-        supports_mdadm.return_value = False
-        dmidecode.return_value = 'unittest'
-        mock_hookenv.return_value = True
         driver.side_effect = side_effect_glob
         real = hwdiscovery.get_tools()
-        expected = set(['sas2ircu', 'ipmi'])
+        expected = set([hwdiscovery.classes.Sas2IrcuTool()])
         self.assertEqual(real, expected)
 
-    @mock.patch('hwdiscovery._supports_mdadm')
-    @mock.patch('hwdiscovery.dmidecode')
-    @mock.patch('glob.glob')
-    @mock.patch('charmhelpers.core.hookenv.config')
-    def test_get_tools_sas3ircu(self, mock_hookenv, driver, dmidecode, supports_mdadm):
+    @mock.patch('hwdiscovery.glob.glob')
+    def test_get_tools_sas3ircu(self, driver):
         def side_effect_glob(value):
             if value not in ('/sys/bus/pci/drivers/megaraid_sas/00*',
                              '/sys/bus/pci/drivers/mpt2sas/00*'):
@@ -60,52 +89,34 @@ class TestHWDiscovery(unittest.TestCase):
             else:
                 return []
 
-        supports_mdadm.return_value = False
-        dmidecode.return_value = 'unittest'
         driver.side_effect = side_effect_glob
-        mock_hookenv.return_value = False
         real = hwdiscovery.get_tools()
-        expected = set(['sas3ircu'])
+        expected = set([hwdiscovery.classes.Sas3IrcuTool()])
         self.assertEqual(real, expected)
 
-    @mock.patch('hwdiscovery._supports_mdadm')
-    @mock.patch('hwdiscovery.dmidecode')
-    @mock.patch('glob.glob')
-    @mock.patch('charmhelpers.core.hookenv.config')
-    def test_get_tools_hpe(self, mock_hookenv, driver, dmidecode, supports_mdadm):
-        supports_mdadm.return_value = False
-        dmidecode.return_value = 'HPE'
-        driver.side_effect = lambda x: []
-        mock_hookenv.return_value = False
+    @mock.patch('hwdiscovery.dmidecode', return_value='HPE')
+    @mock.patch('hwdiscovery.glob.glob', return_value=[])
+    def test_get_tools_hpe(self, driver, dmidecode):
         real = hwdiscovery.get_tools()
-        expected = set(['hpe'])
+        expected = set([hwdiscovery.classes.HPETool()])
         self.assertEqual(real, expected)
 
-    @mock.patch('hwdiscovery._supports_mdadm')
-    @mock.patch('hwdiscovery.dmidecode')
-    @mock.patch('glob.glob')
-    @mock.patch('charmhelpers.core.hookenv.config')
-    def test_get_tools_hp(self, mock_hookenv, driver, dmidecode, supports_mdadm):
-        supports_mdadm.return_value = False
-        dmidecode.return_value = 'HP'
-        driver.side_effect = lambda x: []
-        mock_hookenv.return_value = False
+    @mock.patch('hwdiscovery.dmidecode', return_value='HP')
+    @mock.patch('hwdiscovery.glob.glob', return_value=[])
+    def test_get_tools_hp(self, driver, dmidecode):
         real = hwdiscovery.get_tools()
-        expected = set(['hp'])
+        expected = set([hwdiscovery.classes.HPTool()])
         self.assertEqual(real, expected)
 
-    @mock.patch('hwdiscovery._supports_mdadm')
-    @mock.patch('hwdiscovery.dmidecode')
+    @mock.patch('hwdiscovery._supports_mdadm', return_value=True)
     @mock.patch('glob.glob')
-    @mock.patch('charmhelpers.core.hookenv.config')
-    def test_get_tools_mdadm(self, mock_hookenv, driver, dmidecode, supports_mdadm):
-        supports_mdadm.return_value = True
-        dmidecode.return_value = 'unittest'
-        mock_hookenv.return_value = False
+    def test_get_tools_mdadm(self, driver, supports_mdadm):
         real = hwdiscovery.get_tools()
-        expected = set(['mdadm'])
+        expected = set([hwdiscovery.classes.MdadmTool()])
         self.assertEqual(real, expected)
 
+
+class TestDmiDecode(unittest.TestCase):
     @mock.patch('subprocess.check_output')
     @mock.patch('charmhelpers.core.host.is_container')
     def test_dmidecode(self, is_container, dmidecode_call):
@@ -116,6 +127,8 @@ class TestHWDiscovery(unittest.TestCase):
         expected = 'manufacturer'
         self.assertEqual(real, expected)
 
+
+class TestMdadmSupport(unittest.TestCase):
     @mock.patch('os.path.exists')
     @mock.patch('subprocess.Popen')
     @mock.patch('charmhelpers.core.hookenv.log')
diff --git a/src/tests/unit/test_tooling.py b/src/tests/unit/test_tooling.py
deleted file mode 100644
index 5236f6e..0000000
--- a/src/tests/unit/test_tooling.py
+++ /dev/null
@@ -1,133 +0,0 @@
-import os  # noqa: F401
-import sys
-import unittest
-import unittest.mock as mock
-import zipfile
-
-
-sys.path.append('lib/utils')
-import tooling  # noqa: E402
-
-
-class TestTooling(unittest.TestCase):
-    def test_install_resource_notools(self):
-        self.assertEqual(tooling.install_resource([], False), False)
-        self.assertEqual(
-            tooling.install_resource([], '/path/to/resource'), False)
-
-    def test_install_resource_noresource_nomdadm(self):
-        self.assertEqual(tooling.install_resource(['unittest'], False), False)
-
-    def test_install_resource_noresource_yesmdadm(self):
-        self.assertEqual(tooling.install_resource(['mdadm'], False), True)
-
-    @mock.patch('glob.glob')
-    @mock.patch('os.chmod')
-    @mock.patch('os.path.exists')
-    @mock.patch('os.path.isfile')
-    @mock.patch('shutil.chown')
-    @mock.patch('shutil.copy2')
-    @mock.patch('tempfile.TemporaryDirectory')
-    @mock.patch('zipfile.ZipFile')
-    def test_install_resource_ok(self, zfile, tmpdir, shutil_copy2,
-                                 shutil_chown, path_isfile, path_exists,
-                                 os_chmod, glob_glob):
-        class _WrapCls(object):
-            def __init__(cls):
-                cls.extractall = lambda x: None
-                cls.close = lambda: None
-
-        class Test_resource_zipfile(object):
-            def __enter__(cls):
-                return _WrapCls()
-
-            def __exit__(cls, type, value, traceback):
-                pass
-
-        class Test_resource_tmpdir(Test_resource_zipfile):
-            def __enter__(cls):
-                return ''
-
-        path_exists.return_value = True
-        zfile.return_value = Test_resource_zipfile()
-        tmpdir.return_value = Test_resource_tmpdir()
-        glob_glob.return_value = ['/tmp/test/megacli']
-        os_chmod.return_value = None
-        path_isfile.return_value = True
-        shutil_chown.return_value = None
-        shutil_copy2.return_value = None
-        actual = tooling.install_resource(['megacli'], '/tmp/test.zip')
-        expected = True
-        self.assertEqual(actual, expected)
-
-    @mock.patch('os.path.exists')
-    @mock.patch('zipfile.ZipFile')
-    def test_install_resource_zipfile_error(self, zfile, path_exists):
-        path_exists.return_value = True
-        zfile.side_effect = zipfile.BadZipFile('Intended error for unit test')
-        actual = tooling.install_resource(['megacli'], '/tmp/test.zip')
-        expected = False
-        self.assertEqual(actual, expected)
-
-    @mock.patch('glob.glob')
-    @mock.patch('os.path.exists')
-    @mock.patch('os.path.isfile')
-    @mock.patch('tempfile.TemporaryDirectory')
-    @mock.patch('zipfile.ZipFile')
-    def test_install_resource_nomatchedresource(self, zfile, tmpdir,
-                                                path_isfile, path_exists,
-                                                glob_glob):
-        class _WrapCls(object):
-            def __init__(cls):
-                cls.extractall = lambda x: None
-                cls.close = lambda: None
-
-        class Test_resource_zipfile(object):
-            def __enter__(cls):
-                return _WrapCls()
-
-            def __exit__(cls, type, value, traceback):
-                pass
-
-        class Test_resource_tmpdir(Test_resource_zipfile):
-            def __enter__(cls):
-                return ''
-
-        path_exists.return_value = True
-        zfile.return_value = Test_resource_zipfile()
-        tmpdir.return_value = Test_resource_tmpdir()
-        glob_glob.return_value = ['/tmp/test/megacli']
-        path_isfile.return_value = True
-        self.assertEqual(
-            tooling.install_resource(['unittest'], '/tmp/test.zip'), False)
-
-    def test_install_tools_notools(self):
-        self.assertEqual(tooling.install_tools([]), False)
-
-    def test_install_tools_unsupported_tools(self):
-        actual = tooling.install_tools(['unittest1', 'unittest2'])
-        expected = False
-        self.assertEqual(actual, expected)
-
-    def test_install_tools_supported_tools(self):
-        self.assertEqual(tooling.install_tools(['megacli', 'sas2ircu']), True)
-
-    def test_install_tools_mdadm(self):
-        self.assertEqual(tooling.install_tools(['mdadm']), True)
-
-    @mock.patch('os.environ')
-    @mock.patch('os.path.exists')
-    @mock.patch('os.path.join')
-    def test_get_filepath(self, path_join, path_exists, environ):
-        expected = 'unittest1'
-        environ.return_value = {'CHARM_DIR': 'xxx'}
-        path_join.return_value = expected
-        path_exists.return_value = True
-        self.assertEqual(tooling._get_filepath('asdfasdf'), expected)
-
-    def test_configure_tools_notools(self):
-        self.assertEqual(tooling.configure_tools([]), False)
-
-    def test_configure_tools_unsupported_tools(self):
-        self.assertEqual(
-            tooling.configure_tools(['unittest1', 'unittest2']), False)
diff --git a/src/tox.ini b/src/tox.ini
index 522cffd..77597a0 100644
--- a/src/tox.ini
+++ b/src/tox.ini
@@ -1,10 +1,9 @@
 [tox]
-envlist = unit, pep8
+envlist = unit, functional, pep8
 skipsdist = true
 
 [testenv:unit]
 basepython=python3
-envdir={toxworkdir}/py3
 deps=
   charms.reactive
   nose
@@ -13,9 +12,17 @@ commands =
   {toxworkdir}/../tests/download_nagios_plugin3.py
   nosetests tests/unit
 
+[testenv:functional]
+passenv =
+  HOME
+  JUJU_REPOSITORY
+  PATH
+commands = pytest -v --ignore {toxinidir}/tests/unit
+deps = -r{toxinidir}/tests/functional/requirements.txt
+       -r{toxinidir}/requirements.txt
+
 [testenv:pep8]
 basepython = python3
 deps =
   flake8
 commands = flake8 {posargs} --max-complexity=20 --max-line-length=120 .
-
diff --git a/tools.zip b/tools.zip
new file mode 100644
index 0000000..1932bfb
Binary files /dev/null and b/tools.zip differ

Follow ups