← Back to team overview

nagios-charmers team mailing list archive

[Merge] ~aluria/hw-health-charm/+git/hw-health-charm:oo-rewrite-hwdiscovery into hw-health-charm:oo-rewrite-integration

 

Alvaro Uría has proposed merging ~aluria/hw-health-charm/+git/hw-health-charm:oo-rewrite-hwdiscovery into hw-health-charm:oo-rewrite-integration.

Requested reviews:
  Nagios Charm developers (nagios-charmers)

For more details, see:
https://code.launchpad.net/~aluria/hw-health-charm/+git/hw-health-charm/+merge/364844
-- 
Your team Nagios Charm developers is requested to review the proposed merge of ~aluria/hw-health-charm/+git/hw-health-charm:oo-rewrite-hwdiscovery into hw-health-charm:oo-rewrite-integration.
diff --git a/src/lib/hwhealth/discovery/lshw.py b/src/lib/hwhealth/discovery/lshw.py
new file mode 100644
index 0000000..9258b95
--- /dev/null
+++ b/src/lib/hwhealth/discovery/lshw.py
@@ -0,0 +1,212 @@
+# -*- coding: us-ascii -*-
+import json
+import os
+import subprocess
+
+
+class Hardware(object):
+    def __init__(self, filename='/var/run/hw_health_lshw.json'):
+        self.__filename = filename
+        self._lshw = self.__load_hwinfo()
+
+    def __load_hwinfo(self):
+        try:
+            if os.path.exists(self.__filename):
+                with open(self.__filename, 'r') as fd:
+                    hwinfo = json.load(fd)
+            else:
+                output = subprocess.check_output(['lshw', '-json'])
+                hwinfo = json.loads(output.decode())
+                with open(self.__filename, 'w') as fd:
+                    fd.write(output.decode())
+
+            return hwinfo
+        except PermissionError as error:
+            return {}
+        except subprocess.CalledProcessError as error:
+            return {}
+
+    @property
+    def get_system(self):
+        """Helper to get vendor info retrieved via actions
+        """
+        keys = 'id description vendor product version serial'.split()
+        sysinfo = {}
+        for k in keys:
+            v = self._lshw.get(k)
+            if k == 'id':
+                k = 'hostname'
+            sysinfo.update({k: v})
+        return sysinfo
+
+    @property
+    def get_motherboard(self):
+        """Helper to get vendor info retrieved via actions
+        """
+        keys = 'description vendor product version serial'.split()
+        buses = []
+        for child in self._lshw.get('children', [{}]):
+            if child.get('class') != 'bus':
+                continue
+            buses.append(dict([(k, child.get(k)) for k in keys]))
+        return buses
+
+    def _get_inspect_bridges(self, bridge_item, bridge_class='storage'):
+        bridge_class_items = []
+        for item in bridge_item.get('children', [{}]):
+            if item.get('class', '') == 'bridge':
+                bridge_class_items.extend(self._get_inspect_bridges(item, bridge_class))
+            elif item.get('class', '') == bridge_class:
+                bridge_class_items.append(item)
+        return bridge_class_items
+
+    @property
+    def _get_storage_class(self):
+        """returns all the storage classes. Used by get_storage_class_info
+        and get_disk_class_info.
+
+        The aim of the function is to easily parse products to detect which tool(s)
+        need to be used.
+        """
+        storage = []
+        # system -> bus -> bridge -> storage
+        for bus in self._lshw.get('children', [{}]):
+            if bus.get('class', '') != 'bus':
+                continue
+            for bridge in bus.get('children', [{}]):
+                if bridge.get('class', '') != 'bridge':
+                    continue
+                for item in bridge.get('children', [{}]):
+                    if item.get('class', '') == 'bridge':
+                        storage.extend(self._get_inspect_bridges(item, 'storage'))
+                    elif item.get('class', '') == 'storage':
+                        storage.append(item)
+        return storage
+
+    @property
+    def get_storage_class_info(self):
+        """returns a list of storage controllers with the following keys:
+        vendor, product name, businfo and linux driver
+
+        The aim of the function is to easily parse products to detect which tool(s)
+        need to be used.
+        """
+        keys = 'vendor product businfo'.split()
+        config_keys = ['driver']
+        storage = []
+        for item in self._get_storage_class:
+            storage_item = dict([(k, item.get(k)) for k in keys])
+            storage_item.update(dict([(k, item.get('configuration', {}).get(k)) for k in config_keys]))
+            storage_item.update({'has_children': 'children' in item})
+            storage.append(storage_item)
+
+        return storage
+
+    @property
+    def get_disk_class_info(self):
+        """returns a list of storage devices with the following keys:
+        product name, serial number, PCI bus info, physical device and ID,
+        size (capacity) and logicalname (/dev/sdX), storage parent (ie.
+        RAID controller)
+
+        The aim of the function is to easily parse products to detect which tool(s)
+        need to be used.
+        """
+        keys = 'product serial businfo physid dev size logicalname'.split()
+        disks = []
+        for item in self._get_storage_class:
+            for child in item.get('children', [{}]):
+                if child.get('class', '') != 'disk':
+                    continue
+                disk = dict([(k, child.get(k)) for k in keys])
+                disk.update({'storage_parent': item.get('product')})
+                disks.append(disk)
+        return disks
+
+    @property
+    def get_network_class_info(self):
+        """returns the list of network cards with the following keys:
+        vendor, product name, businfo, logicalname (eno1...), serial number,
+        linux driver and driver version, NIC firmware version and speed
+
+        The aim of the function is to easily parse products to detect which tool(s)
+        need to be used.
+        """
+        keys = 'vendor product businfo logicalname serial'.split()
+        config_keys = 'driver driverversion firmware speed'.split()
+        nics = []
+        # system -> bus -> bridge -> network
+        for bus in self._lshw.get('children', [{}]):
+            if bus.get('class', '') != 'bus':
+                continue
+            for bridge in bus.get('children', [{}]):
+                if bridge.get('class', '') != 'bridge':
+                    continue
+                for item in bridge.get('children', [{}]):
+                    if item.get('class', '') == 'bridge':
+                        nics.extend(self._get_inspect_bridges(item, 'network'))
+                    elif item.get('class', '') == 'network':
+                        nics.append(item)
+
+        nics_filtered = []
+        for nic in nics:
+            nic_item = dict([(k, nic.get(k)) for k in keys])
+            nic_item.update(dict([(k, nic.get('configuration', {}).get(k)) for k in config_keys]))
+            nics_filtered.append(nic_item)
+        return nics_filtered
+
+    @property
+    def formatted_system_info(self):
+        ctxt = self.get_system
+        return (
+            '{description}: vendor[{vendor}], product_name[{product}], version[{version}]'
+            ', serial[{serial}], hostname[{hostname}]').format(**ctxt)
+
+    @property
+    def formatted_motherboard_info(self):
+        return '\n'.join([
+            '{description}: vendor[{vendor}], product_name[{product}], version[{version}]'
+            ', serial[{serial}]'.format(**ctxt) for ctxt in self.get_motherboard])
+
+    @property
+    def formatted_storage_class_info(self):
+        LINE = 'driver[{driver}], businfo[{businfo}], has_children[{has_children}]'
+        ctxts = []
+        for ctxt in self.get_storage_class_info:
+            if ctxt.get('vendor') and ctxt.get('product'):
+                tmpl = 'Storage class: vendor[{vendor}], product_name[{product}], ' + LINE
+            else:
+                tmpl = 'Storage class: {}'.format(LINE)
+            ctxts.append(tmpl.format(**ctxt))
+
+        if ctxts:
+            return '\n'.join(ctxts)
+
+    @property
+    def formatted_disk_class_info(self):
+        return '\n'.join([
+            'Disk class: ld[{logicalname}], dev[{dev}], physid[{physid}], businfo[{businfo}]'
+            ', product_name[{product}], serial[{serial}], size[{size}]'
+            ', storage_parent[{storage_parent}]'.format(**ctxt) for ctxt in self.get_disk_class_info
+        ])
+
+    @property
+    def formatted_network_class_info(self):
+        return '\n'.join([
+            'NIC: iface[{logicalname}], businfo[{businfo}], vendor[{vendor}]'
+            ', product_name[{product}], firmware[{firmware}], driver[{driver}'
+            ', {driverversion}], serial[{serial}]'
+            ', speed[{speed}]'.format(**ctxt) for ctxt in self.get_network_class_info
+        ])
+
+
+if __name__ == '__main__':
+    hw = Hardware()
+    print(hw.formatted_system_info)
+    print(hw.formatted_motherboard_info)
+    print('\n== get_storage_classes')
+    print(hw.formatted_storage_class_info)
+    print('== get_disk_classes')
+    print(hw.formatted_disk_class_info)
+    print('\n== get_network_class_info')
+    print(hw.formatted_network_class_info)
diff --git a/src/lib/hwhealth/discovery/supported_vendors.py b/src/lib/hwhealth/discovery/supported_vendors.py
new file mode 100644
index 0000000..1480754
--- /dev/null
+++ b/src/lib/hwhealth/discovery/supported_vendors.py
@@ -0,0 +1,37 @@
+# -*- coding: us-ascii -*-
+
+SUPPORTED_VENDORS = {
+    'LSI Logic / Symbios Logic': {
+        'SAS2308 PCI-Express Fusion-MPT SAS-2': 'sas2ircu',
+        'SAS3008 PCI-Express Fusion-MPT SAS-3': 'sas3ircu',
+        'MegaRAID SAS-3 3108 [Invader]': 'megacli',
+        },
+    'Mellanox Technologies': {
+        'product': [
+            'MT27710 Family [ConnectX-4 Lx]',
+            'MT27700 Family [ConnectX-4]',
+        ],
+        'tool': 'mlxconfig'
+    },
+    'Intel Corporation': {
+        'PCIe Data Center SSD': 'nvme',
+    },
+    'Samsung Electronics Co Ltd': {
+        'NVMe SSD Controller SM961/PM961': 'nvme',
+    },
+    'Hewlett-Packard Company': {
+        'Smart Array Gen9 Controllers': 'ssacli'
+    }
+}
+
+SUPPORTED_DRIVERS = {
+    # ES3000 SSD /dev/hioa
+    'hio': {
+        'apt': 'hioa',
+        'tool': 'hio_info'
+        },
+    'nvme': {
+        'apt': 'nvme-cli',
+        'tool': 'nvme',
+    }
+}
diff --git a/src/lib/hwhealth/hwdiscovery.py b/src/lib/hwhealth/hwdiscovery.py
index b57c99e..f40fe70 100644
--- a/src/lib/hwhealth/hwdiscovery.py
+++ b/src/lib/hwhealth/hwdiscovery.py
@@ -1,9 +1,9 @@
 # -*- coding: us-ascii -*-
-import glob
 import os
 import re
 import subprocess
 
+<<<<<<< src/lib/hwhealth/hwdiscovery.py
 from charmhelpers.core import host, hookenv
 from hwhealth import tools
 
@@ -57,6 +57,29 @@ def _get_tools():
         toolset.add(tools.HPE())
     elif hw_vendor == 'HP':
         toolset.add(tools.HP())
+=======
+from hwhealth.discovery.lshw import Hardware
+from hwhealth.discovery.supported_vendors import SUPPORTED_VENDORS, SUPPORTED_DRIVERS
+
+from charmhelpers.core import hookenv
+
+
+def get_tools():
+    hwinfo = Hardware()
+    tools = set()
+    for storage in hwinfo.get_storage_class_info:
+        if storage.get('vendor'):
+            if storage.get('product') in SUPPORTED_VENDORS.get(storage['vendor'], []):
+                tools.add(SUPPORTED_VENDORS[storage['vendor']][storage['product']])
+            else:
+                hookenv.log('Product not supported: [{}][{}]'.format(storage.get('vendor'), storage.get('product')),
+                            hookenv.DEBUG)
+
+        if storage.get('driver') in SUPPORTED_DRIVERS:
+            tools.add(SUPPORTED_DRIVERS[storage['driver']]['tool'])
+        elif storage.get('driver'):
+            hookenv.log('Driver not supported: {}'.format(storage.get('driver')), hookenv.DEBUG)
+>>>>>>> src/lib/hwhealth/hwdiscovery.py
 
     # SW RAID?
     if _supports_mdadm():
@@ -68,42 +91,22 @@ def _get_tools():
     return toolset
 
 
-def dmidecode(key):
-    # dmidecode fails to access /dev/mem in containers.
-    assert not host.is_container()
-
-    try:
-        output = subprocess.check_output(['dmidecode', '-s', key])
-    except subprocess.CalledProcessError as e:  # noqa
-        # print('Unable to get "{}" value from DMI table: {}'.format(key, e))
-        return
-    # Ignore comments
-    new = []
-    for line in output.decode('ascii', 'ignore').splitlines():
-        if line.startswith('#'):
-            continue
-        new.append(line)
-    return '\n'.join(new)
-
-
 def _supports_mdadm():
-    found_raid_devices = False
+    """scans for mdadm devices and returns True when the first one is found (otherwise, it returns False)
+    """
     if os.path.exists('/sbin/mdadm'):
         try:
-            devices_raw = subprocess.Popen(
-                ['/sbin/mdadm', '--detail', '--scan'],
-                stdout=subprocess.PIPE,
-                stderr=subprocess.PIPE)
+            devices_raw = subprocess.check_output(['/sbin/mdadm', '--detail', '--scan'])
             devices_re = re.compile(r'^ARRAY\s+(\S+) ')
-            for line in devices_raw.stdout.readlines():
+            for line in devices_raw.splitlines():
                 line = line.decode().strip()
                 raid_dev = devices_re.search(line)
                 if raid_dev:
                     hookenv.log("Found md raid array {}".format(raid_dev.group(1)))
-                    found_raid_devices = True
+                    return True
         except Exception as e:
             hookenv.log("mdadm scan failed with {}".format(e))
-    return found_raid_devices
+    return False
 
 
 if __name__ == '__main__':
diff --git a/src/lib/hwhealth/tooling.py b/src/lib/hwhealth/tooling.py
new file mode 100644
index 0000000..aa45fca
--- /dev/null
+++ b/src/lib/hwhealth/tooling.py
@@ -0,0 +1,236 @@
+<<<<<<< src/lib/hwhealth/tooling.py
+=======
+# -*- coding: us-ascii -*-
+import glob
+import os
+import shutil
+import subprocess
+import tempfile
+import zipfile
+
+from charmhelpers.contrib.charmsupport import nrpe
+from charmhelpers.core import hookenv
+
+TOOLING = {
+    'megacli': {
+        'snap': 'megacli',
+        'filename': 'megaraid/check_megacli.py',
+        'cronjob': 'megaraid/check_megacli.sh'
+    },
+    'sas2ircu': {
+        'snap': 'sas2ircu',
+        'filename': 'sas2ircu/check_sas2ircu.py',
+        'cronjob': 'sas2ircu/check_sas2ircu.sh'
+    },
+    'sas3ircu': {
+        'snap': 'sas3ircu',
+        'filename': 'sas3ircu/check_sas3ircu.py',
+        'cronjob': 'sas3ircu/check_sas3ircu.sh'
+    },
+    'mdadm': {
+        'filename': 'mdadm/check_mdadm.py',
+        'cronjob': 'mdadm/cron_mdadm.py'
+    },
+    'ipmi': {
+        'filename': 'ipmi/check_ipmi_sensor',
+        'sudoers': 'ipmi/check_ipmi_sensor_sudoer'
+    },
+}
+
+SKIP_RESOURCES = set([
+    'hio_info',
+    'ipmi',
+    'mdadm',
+    'nvme',
+    'ssacli',
+])
+
+PLUGINS_DIR = '/usr/local/lib/nagios/plugins/'
+TOOLS_DIR = '/usr/local/bin/'
+CRONJOB_PATH = '/etc/cron.d/hw_health_{}'
+SUDOERS_DIR = '/etc/sudoers.d'
+
+
+def install_resource(tools, resource):
+    """Install hardware diagnostic tools from a charm resource
+
+    :param tools: set of tools to include
+    :param resource: resource object
+    :return: boolean, status of installation
+    """
+    if not isinstance(tools, set):
+        try:
+            tools = set(tools)
+        except TypeError as e:
+            hookenv.log("install_resources called with invalid 'tools', {}".format(e))
+            return False
+    if not tools:
+        return False
+    elif tools & SKIP_RESOURCES:
+        tools = tools - SKIP_RESOURCES
+        if not tools:
+            return True
+
+    if not isinstance(resource, str) \
+            or not resource.endswith('.zip') \
+            or not os.path.exists(resource):
+        hookenv.log("The resource 'tools' does not end with .zip or does not exist")
+        return False
+
+    with tempfile.TemporaryDirectory() as tmpdir:
+        try:
+            with zipfile.ZipFile(resource, 'r') as zf:
+                zf.extractall(tmpdir)
+
+            count = 0
+            for filepath in glob.glob(os.path.join(tmpdir, '*')):
+                filebasename = os.path.basename(filepath)
+                if filebasename in tools \
+                        and filebasename in TOOLING \
+                        and os.path.isfile(filepath):
+                    os.chmod(filepath, 0o755)
+                    shutil.chown(filepath, user=0, group=0)
+                    shutil.copy2(filepath, TOOLS_DIR)
+                    count += 1
+
+            return len(tools) == count and count > 0
+
+        except zipfile.BadZipFile as error:
+            hookenv.log('BadZipFile: {}'.format(error), hookenv.ERROR)
+
+        except PermissionError as error:
+            hookenv.log(
+                'Unable to unzip the resource: {}'.format(error), hookenv.ERROR)
+
+    return False
+
+
+def install_tools(tools, method='snap'):
+    """Install hardware management tools from a snap
+
+    :param tools: list of tools to install
+    :param method: default to snap if unspecified, other means have yet to be added
+    :return:
+    """
+    count = 0
+    for tool in tools:
+        if tool in TOOLING and method in TOOLING[tool]:
+            if method == 'snap':
+                try:
+                    # snap.install(TOOLING[tool][method])
+                    count += 1
+                except Exception as error:
+                    hookenv.log(
+                        'Snap install error: {}'.format(error), hookenv.ERROR)
+        elif tool == 'mdadm':
+            # Note(aluria): mdadm is assumed to be installed by default
+            count += 1
+
+    return len(tools) >= count > 0
+
+
+def _get_filepath(filename):
+    filepath = os.path.join(hookenv.charm_dir(), 'files', filename)
+    if os.path.exists(filepath):
+        return filepath
+
+
+def configure_tools(tools):
+    count = 0
+    for tool in tools:
+        if tool in TOOLING:
+            if 'cronjob' in TOOLING[tool]:
+                filepath = _get_filepath(TOOLING[tool]['cronjob'])
+                cronjob_scriptname = os.path.basename(TOOLING[tool]['cronjob'])
+                if filepath:
+                    subprocess.call(filepath)
+                    shutil.copy2(filepath, PLUGINS_DIR)
+                    hookenv.log(
+                        'Cronjob script [{}] copied to {}'.format(
+                            filepath, PLUGINS_DIR), hookenv.DEBUG)
+                else:
+                    continue
+                cronjob_file = CRONJOB_PATH.format(tool)
+                cronjob_line = '*/5 * * * * root {}\n'
+                cronjob_script = os.path.join(PLUGINS_DIR, cronjob_scriptname)
+                with open(cronjob_file, 'w') as fd:
+                    fd.write(cronjob_line.format(cronjob_script))
+                    hookenv.log(
+                        'Cronjob configured at {}'.format(cronjob_file),
+                        hookenv.DEBUG)
+                count += 1
+                hookenv.log('Cronjob for tool [{}] configured'.format(tool))
+
+            if 'filename' in TOOLING[tool]:
+                filepath = _get_filepath(TOOLING[tool]['filename'])
+                if filepath:
+                    shutil.copy2(filepath, PLUGINS_DIR)
+                    count += 1
+                    hookenv.log(
+                        'NRPE script for tool [{}] configured'.format(tool))
+            if 'sudoers' in TOOLING[tool]:
+                sudoers_path = _get_filepath(TOOLING[tool]['sudoers'])
+                if sudoers_path:
+                    shutil.copy2(sudoers_path, SUDOERS_DIR)
+                    count += 1
+                    hookenv.log('sudoers entry for tool [{}] added'.format(tool))
+    return count > 0
+
+
+def configure_cronjobs(tools):
+    pass
+
+
+def configure_nrpe_checks(tools):
+    if not configure_tools(tools):
+        hookenv.log(
+            "No tools configured. NRPE checks can't be configured",
+            hookenv.DEBUG)
+        return False
+
+    count = 0
+    hostname = nrpe.get_nagios_hostname()
+    nrpe_setup = nrpe.NRPE(hostname=hostname, primary=False)
+    for tool in tools:
+        if tool in TOOLING and 'filename' in TOOLING[tool]:
+            scriptname = os.path.basename(TOOLING[tool]['filename'])
+            cmd = os.path.join(PLUGINS_DIR, scriptname)  # TODO args to the command?
+            if os.path.exists(cmd):
+                nrpe_setup.add_check(tool, '{} Hardware Health', cmd)
+                nrpe_setup.write()
+                count += 1
+        else:
+            cronjob_file = os.path.join('/etc/cron.d', tool)
+            if os.path.exists(cronjob_file):
+                os.unlink(cronjob_file)
+                hookenv.log(
+                    'Cronjob file [{}] removed'.format(cronjob_file),
+                    hookenv.DEBUG)
+
+    return count > 0
+
+
+def remove_nrpe_checks(tools):
+    if len(tools) == 0:
+        return False
+
+    hostname = nrpe.get_nagios_hostname()
+    nrpe_setup = nrpe.NRPE(hostname=hostname, primary=False)
+    count = 0
+    for tool in tools:
+        if tool not in TOOLING:
+            continue
+        nrpe_setup.remove_check(
+            shortname=tool, check_cmd='check_{}'.format(tool))
+
+        if os.path.exists(CRONJOB_PATH.format(tool)):
+            os.remove(CRONJOB_PATH.format(tool))
+
+        hookenv.log('Check removed from nrpe: {}'.format(tool))
+        count += 1
+
+    if count > 0:
+        nrpe_setup.write()
+        return True
+    return False
+>>>>>>> src/lib/hwhealth/tooling.py
diff --git a/src/reactive/hw_health.py b/src/reactive/hw_health.py
index f5d84d0..720ce7e 100644
--- a/src/reactive/hw_health.py
+++ b/src/reactive/hw_health.py
@@ -13,7 +13,16 @@ from charms.reactive import (
     when_not,
 )
 from hwhealth.hwdiscovery import get_tools
+<<<<<<< src/reactive/hw_health.py
 from hwhealth import tools
+=======
+from hwhealth.tooling import (
+    configure_nrpe_checks,
+    install_resource,
+    remove_nrpe_checks,
+    SKIP_RESOURCES,
+)
+>>>>>>> src/reactive/hw_health.py
 
 
 @when_none('hw-health.installed', 'hw-health.unsupported')
@@ -36,6 +45,7 @@ def install():
         status.blocked('Hardware not supported')
         set_flag('hw-health.unsupported')
     else:
+<<<<<<< src/reactive/hw_health.py
         tool_list = list()
         for tool in toolset:
             tool.install()
@@ -45,6 +55,37 @@ def install():
         unitdb = unitdata.kv()
         unitdb.set('toolset', tool_list)
         set_flag('hw-health.installed')
+=======
+        resource = hookenv.resource_get('tools')
+        if resource:
+            hookenv.log('Installing from resource')
+            status.waiting('Installing from attached resource')
+            installed = install_resource(tools, resource)
+        else:
+            hookenv.log(
+                'Missing Juju resource: tools - alternative method is not'
+                ' available yet', hookenv.ERROR)
+            status.blocked('Missing Juju resource: tools')
+            return
+            # TODO(aluria): install vendor utilities via snaps
+            # https://forum.snapcraft.io/t/request-for-classic-confinement-sas2ircu/9023
+            # Snap interface for hw tooling is not supported
+            # in strict mode (confined). Classic mode
+            # (unconfined snaps) need approval
+            #
+            # hookenv.log('Installing from snap')
+            # status.waiting('Installing from snap')
+            # installed = install_tools(tools, method='snap')
+
+        if installed:
+            kv = unitdata.kv()
+            kv.set('tools', list(tools))
+            set_flag('hw-health.installed')
+            status.waiting('Preparing scripts installation')
+        else:
+            missing_tools = tools - SKIP_RESOURCES
+            status.blocked('Tools not found: {}'.format(', '.join(missing_tools)))
+>>>>>>> src/reactive/hw_health.py
 
 
 @hook('upgrade-charm')
diff --git a/src/tests/unit/test_actions.py b/src/tests/unit/test_actions.py
index 3df07a3..9cf5ab6 100644
--- a/src/tests/unit/test_actions.py
+++ b/src/tests/unit/test_actions.py
@@ -20,10 +20,10 @@ from actions.actions import clear_sel
 
 class ClearSelTestCase(unittest.TestCase):
 
+    @mock.patch('actions.actions.log')
     @mock.patch('subprocess.check_output')
     @mock.patch('subprocess.check_call')
-    @mock.patch('charmhelpers.core.hookenv.log')
-    def test_clear_sel(self, mock_log, mock_check_call, mock_subprocess):
+    def test_clear_sel(self, mock_check_call, mock_subprocess, mock_log):
         sel_output = "Unittest system event log output"
         mock_subprocess.return_value = sel_output
         mock_check_call.return_value = None
diff --git a/src/tests/unit/test_hwdiscovery.py b/src/tests/unit/test_hwdiscovery.py
index af9a18e..e352d9d 100644
--- a/src/tests/unit/test_hwdiscovery.py
+++ b/src/tests/unit/test_hwdiscovery.py
@@ -1,4 +1,4 @@
-import io
+import glob
 import os  # noqa: F401
 import subprocess  # noqa: F401
 import sys
@@ -7,8 +7,10 @@ import unittest.mock as mock
 
 sys.path.append('lib/hwhealth')
 import hwdiscovery  # noqa: E402
+from discovery.lshw import Hardware  # noqa: E402
 
 
+<<<<<<< src/tests/unit/test_hwdiscovery.py
 class TestGetTools(unittest.TestCase):
     def setUp(self):
         patchers = list()
@@ -83,9 +85,38 @@ class TestGetTools(unittest.TestCase):
             if value not in ('/sys/bus/pci/drivers/megaraid_sas/00*',
                              '/sys/bus/pci/drivers/mpt2sas/00*'):
                 return ['/dev/null']
+=======
+class TestHWDiscovery(unittest.TestCase):
+    @mock.patch('charmhelpers.core.hookenv.config')
+    @mock.patch('charmhelpers.core.hookenv.log')
+    @mock.patch('hwdiscovery._supports_mdadm')
+    @mock.patch('hwdiscovery.Hardware')
+    def test_get_tools(self, mock_hwinfo, supports_mdadm, mock_log, mock_config):
+        supports_mdadm.return_value = False
+        mock_config.return_value = False
+
+        TOOLS = {
+            'lshw.dell.01.json': set(['megacli', 'nvme']),
+            'lshw.dell.02.json': set(['nvme']),
+            'lshw.hp.json': set(['nvme', 'ssacli']),
+            'lshw.huawei.json': set(['hio_info', 'sas2ircu']),
+            'lshw.supermicro.notools.json': set([]),
+            'lshw.supermicro.nvme.json': set(['nvme']),
+            'lshw.supermicro.sas.01.json': set(['nvme', 'sas3ircu']),
+            'lshw.supermicro.sas.02.json': set(['nvme', 'sas3ircu']),
+        }
+
+        for filename in glob.glob(os.path.join(
+              os.getcwd(), 'tests/hw-health-samples/lshw.*.json')):
+            mock_hwinfo.return_value = Hardware(filename)
+            real = hwdiscovery.get_tools()
+            if os.path.basename(filename) in TOOLS:
+                self.assertEqual(real, TOOLS[os.path.basename(filename)])
+>>>>>>> src/tests/unit/test_hwdiscovery.py
             else:
-                return []
+                assert False
 
+<<<<<<< src/tests/unit/test_hwdiscovery.py
         driver.side_effect = side_effect_glob
         real = hwdiscovery.get_tools()
         expected = set([hwdiscovery.tools.Sas3Ircu()])
@@ -128,24 +159,20 @@ class TestDmiDecode(unittest.TestCase):
 class TestMdadmSupport(unittest.TestCase):
     @mock.patch('os.path.exists')
     @mock.patch('subprocess.Popen')
+=======
+>>>>>>> src/tests/unit/test_hwdiscovery.py
     @mock.patch('charmhelpers.core.hookenv.log')
-    def test_supports_mdadm_true(self, log, mdadm_details, path_exists):
-        class Test_Popen(object):
-            def __init__(cls):
-                data = b'ARRAY /dev/md0 metadata=1.2 name=node00:0'
-                data += b' UUID=dfaf7413:7551b000:56dd7442:5b020adb\n'
-                cls.stdout = io.BufferedReader(io.BytesIO(data))
-
+    @mock.patch('os.path.exists')
+    @mock.patch('subprocess.check_output')
+    def test_supports_mdadm_true(self, mdadm_details, path_exists, mock_log):
         path_exists.return_value = True
-        log.return_value = True
-        mdadm_details.return_value = Test_Popen()
-        real = hwdiscovery._supports_mdadm()
-        expected = True
-        self.assertEqual(real, expected)
+        mdadm_details.return_value = (
+            b'ARRAY /dev/md0 metadata=1.2 name=node00:0'
+            b' UUID=dfaf7413:7551b000:56dd7442:5b020adb\n'
+        )
+        self.assertEqual(hwdiscovery._supports_mdadm(), True)
 
     @mock.patch('os.path.exists')
     def test_supports_mdadm_false(self, path_exists):
         path_exists.return_value = False
-        real = hwdiscovery._supports_mdadm()
-        expected = False
-        self.assertEqual(real, expected)
+        self.assertEqual(hwdiscovery._supports_mdadm(), False)
diff --git a/src/tests/unit/test_tooling.py b/src/tests/unit/test_tooling.py
new file mode 100644
index 0000000..0c20173
--- /dev/null
+++ b/src/tests/unit/test_tooling.py
@@ -0,0 +1,139 @@
+<<<<<<< src/tests/unit/test_tooling.py
+=======
+import os  # noqa: F401
+import sys
+import unittest
+import unittest.mock as mock
+import zipfile
+
+
+sys.path.append('lib/hwhealth')
+import tooling  # noqa: E402
+
+
+class TestTooling(unittest.TestCase):
+    def test_install_resource_notools(self):
+        self.assertEqual(tooling.install_resource([], False), False)
+        self.assertEqual(
+            tooling.install_resource([], '/path/to/resource'), False)
+
+    @mock.patch('tooling.hookenv.log')
+    def test_install_resource_noresource_nomdadm(self, mock_log):
+        self.assertEqual(tooling.install_resource(['unittest'], False), False)
+
+    def test_install_resource_noresource_yesmdadm(self):
+        self.assertEqual(tooling.install_resource(['mdadm'], False), True)
+
+    @mock.patch('glob.glob')
+    @mock.patch('os.chmod')
+    @mock.patch('os.path.exists')
+    @mock.patch('os.path.isfile')
+    @mock.patch('shutil.chown')
+    @mock.patch('shutil.copy2')
+    @mock.patch('tempfile.TemporaryDirectory')
+    @mock.patch('zipfile.ZipFile')
+    def test_install_resource_ok(self, zfile, tmpdir, shutil_copy2,
+                                 shutil_chown, path_isfile, path_exists,
+                                 os_chmod, glob_glob):
+        class _WrapCls(object):
+            def __init__(cls):
+                cls.extractall = lambda x: None
+                cls.close = lambda: None
+
+        class Test_resource_zipfile(object):
+            def __enter__(cls):
+                return _WrapCls()
+
+            def __exit__(cls, type, value, traceback):
+                pass
+
+        class Test_resource_tmpdir(Test_resource_zipfile):
+            def __enter__(cls):
+                return ''
+
+        path_exists.return_value = True
+        zfile.return_value = Test_resource_zipfile()
+        tmpdir.return_value = Test_resource_tmpdir()
+        glob_glob.return_value = ['/tmp/test/megacli']
+        os_chmod.return_value = None
+        path_isfile.return_value = True
+        shutil_chown.return_value = None
+        shutil_copy2.return_value = None
+        actual = tooling.install_resource(['megacli'], '/tmp/test.zip')
+        expected = True
+        self.assertEqual(actual, expected)
+
+    @mock.patch('tooling.hookenv.log')
+    @mock.patch('os.path.exists')
+    @mock.patch('zipfile.ZipFile')
+    def test_install_resource_zipfile_error(self, zfile, path_exists, mock_log):
+        path_exists.return_value = True
+        zfile.side_effect = zipfile.BadZipFile('Intended error for unit test')
+        actual = tooling.install_resource(['megacli'], '/tmp/test.zip')
+        expected = False
+        self.assertEqual(actual, expected)
+
+    @mock.patch('tooling.hookenv.log')
+    @mock.patch('glob.glob')
+    @mock.patch('os.path.exists')
+    @mock.patch('os.path.isfile')
+    @mock.patch('tempfile.TemporaryDirectory')
+    @mock.patch('zipfile.ZipFile')
+    def test_install_resource_nomatchedresource(self, zfile, tmpdir,
+                                                path_isfile, path_exists,
+                                                glob_glob, mock_log):
+        class _WrapCls(object):
+            def __init__(cls):
+                cls.extractall = lambda x: None
+                cls.close = lambda: None
+
+        class Test_resource_zipfile(object):
+            def __enter__(cls):
+                return _WrapCls()
+
+            def __exit__(cls, type, value, traceback):
+                pass
+
+        class Test_resource_tmpdir(Test_resource_zipfile):
+            def __enter__(cls):
+                return ''
+
+        path_exists.return_value = True
+        zfile.return_value = Test_resource_zipfile()
+        tmpdir.return_value = Test_resource_tmpdir()
+        glob_glob.return_value = ['/tmp/test/megacli']
+        path_isfile.return_value = True
+        self.assertEqual(
+            tooling.install_resource(['unittest'], '/tmp/test.zip'), False)
+
+    def test_install_tools_notools(self):
+        self.assertEqual(tooling.install_tools([]), False)
+
+    def test_install_tools_unsupported_tools(self):
+        actual = tooling.install_tools(['unittest1', 'unittest2'])
+        expected = False
+        self.assertEqual(actual, expected)
+
+    def test_install_tools_supported_tools(self):
+        self.assertEqual(tooling.install_tools(['megacli', 'sas2ircu']), True)
+
+    def test_install_tools_mdadm(self):
+        self.assertEqual(tooling.install_tools(['mdadm']), True)
+
+    @mock.patch('os.environ')
+    @mock.patch('os.path.exists')
+    @mock.patch('os.path.join')
+    def test_get_filepath(self, path_join, path_exists, environ):
+        expected = 'unittest1'
+        environ.return_value = {'CHARM_DIR': 'xxx'}
+        path_join.return_value = expected
+        path_exists.return_value = True
+        self.assertEqual(tooling._get_filepath('asdfasdf'), expected)
+
+    def test_configure_tools_notools(self):
+        self.assertEqual(tooling.configure_tools([]), False)
+
+    def test_configure_tools_unsupported_tools(self):
+        self.assertEqual(
+            tooling.configure_tools(['unittest1', 'unittest2']), False)
+>>>>>>> src/tests/unit/test_tooling.py
diff --git a/src/tox.ini b/src/tox.ini
index 04b6de8..7cc6375 100644
--- a/src/tox.ini
+++ b/src/tox.ini
@@ -17,4 +17,3 @@ basepython = python3
 deps =
   flake8
 commands = flake8 {posargs} --max-complexity=20 --max-line-length=120 .
-