← Back to team overview

cloud-init-dev team mailing list archive

[Merge] ~chad.smith/cloud-init:ubuntu/xenial into cloud-init:ubuntu/xenial

 

Chad Smith has proposed merging ~chad.smith/cloud-init:ubuntu/xenial into cloud-init:ubuntu/xenial.

Commit message:
new-upstream snapshot for SRU into Xenial

Requested reviews:
  cloud-init Commiters (cloud-init-dev)
Related bugs:
  Bug #1844191 in cloud-init: "azure advanced networking sometimes triggers duplicate mac detection"
  https://bugs.launchpad.net/cloud-init/+bug/1844191

For more details, see:
https://code.launchpad.net/~chad.smith/cloud-init/+git/cloud-init/+merge/372891
-- 
Your team cloud-init Commiters is requested to review the proposed merge of ~chad.smith/cloud-init:ubuntu/xenial into cloud-init:ubuntu/xenial.
diff --git a/.gitignore b/.gitignore
index 80c509e..b9b98e7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -12,3 +12,14 @@ stage
 *.snap
 *.cover
 .idea/
+
+# Ignore packaging artifacts
+cloud-init.dsc
+cloud-init_*.build
+cloud-init_*.buildinfo
+cloud-init_*.changes
+cloud-init_*.deb
+cloud-init_*.dsc
+cloud-init_*.orig.tar.gz
+cloud-init_*.tar.xz
+cloud-init_*.upload
diff --git a/Makefile b/Makefile
index 4ace227..2c6d0c8 100644
--- a/Makefile
+++ b/Makefile
@@ -106,7 +106,9 @@ deb-src:
 		  echo sudo apt-get install devscripts; exit 1; }
 	$(PYVER) ./packages/bddeb -S -d
 
+doc:
+	tox -e doc
 
 .PHONY: test pyflakes pyflakes3 clean pep8 rpm srpm deb deb-src yaml
 .PHONY: check_version pip-test-requirements pip-requirements clean_pyc
-.PHONY: unittest unittest3 style-check
+.PHONY: unittest unittest3 style-check doc
diff --git a/cloudinit/atomic_helper.py b/cloudinit/atomic_helper.py
index 587b994..1f61faa 100644
--- a/cloudinit/atomic_helper.py
+++ b/cloudinit/atomic_helper.py
@@ -1,11 +1,13 @@
 # This file is part of cloud-init. See LICENSE file for license information.
 
 import json
+import logging
 import os
 import stat
 import tempfile
 
 _DEF_PERMS = 0o644
+LOG = logging.getLogger(__name__)
 
 
 def write_file(filename, content, mode=_DEF_PERMS,
@@ -23,6 +25,10 @@ def write_file(filename, content, mode=_DEF_PERMS,
     try:
         tf = tempfile.NamedTemporaryFile(dir=os.path.dirname(filename),
                                          delete=False, mode=omode)
+        LOG.debug(
+            "Atomically writing to file %s (via temporary file %s) - %s: [%o]"
+            " %d bytes/chars",
+            filename, tf.name, omode, mode, len(content))
         tf.write(content)
         tf.close()
         os.chmod(tf.name, mode)
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
index ea707c0..5de5c6d 100644
--- a/cloudinit/net/__init__.py
+++ b/cloudinit/net/__init__.py
@@ -109,6 +109,127 @@ def is_bond(devname):
     return os.path.exists(sys_dev_path(devname, "bonding"))
 
 
+def has_master(devname):
+    return os.path.exists(sys_dev_path(devname, path="master"))
+
+
+def is_netfailover(devname, driver=None):
+    """ netfailover driver uses 3 nics, master, primary and standby.
+        this returns True if the device is either the primary or standby
+        as these devices are to be ignored.
+    """
+    if driver is None:
+        driver = device_driver(devname)
+    if is_netfail_primary(devname, driver) or is_netfail_standby(devname,
+                                                                 driver):
+        return True
+    return False
+
+
+def get_dev_features(devname):
+    """ Returns a str from reading /sys/class/net/<devname>/device/features."""
+    features = ''
+    try:
+        features = read_sys_net(devname, 'device/features')
+    except Exception:
+        pass
+    return features
+
+
+def has_netfail_standby_feature(devname):
+    """ Return True if VIRTIO_NET_F_STANDBY bit (62) is set.
+
+    https://github.com/torvalds/linux/blob/ \
+        089cf7f6ecb266b6a4164919a2e69bd2f938374a/ \
+        include/uapi/linux/virtio_net.h#L60
+    """
+    features = get_dev_features(devname)
+    if not features or len(features) < 64:
+        return False
+    return features[62] == "1"
+
+
+def is_netfail_master(devname, driver=None):
+    """ A device is a "netfail master" device if:
+
+        - The device does NOT have the 'master' sysfs attribute
+        - The device driver is 'virtio_net'
+        - The device has the standby feature bit set
+
+        Return True if all of the above is True.
+    """
+    if has_master(devname):
+        return False
+
+    if driver is None:
+        driver = device_driver(devname)
+
+    if driver != "virtio_net":
+        return False
+
+    if not has_netfail_standby_feature(devname):
+        return False
+
+    return True
+
+
+def is_netfail_primary(devname, driver=None):
+    """ A device is a "netfail primary" device if:
+
+        - the device has a 'master' sysfs file
+        - the device driver is not 'virtio_net'
+        - the 'master' sysfs file points to device with virtio_net driver
+        - the 'master' device has the 'standby' feature bit set
+
+        Return True if all of the above is True.
+    """
+    # /sys/class/net/<devname>/master -> ../../<master devname>
+    master_sysfs_path = sys_dev_path(devname, path='master')
+    if not os.path.exists(master_sysfs_path):
+        return False
+
+    if driver is None:
+        driver = device_driver(devname)
+
+    if driver == "virtio_net":
+        return False
+
+    master_devname = os.path.basename(os.path.realpath(master_sysfs_path))
+    master_driver = device_driver(master_devname)
+    if master_driver != "virtio_net":
+        return False
+
+    master_has_standby = has_netfail_standby_feature(master_devname)
+    if not master_has_standby:
+        return False
+
+    return True
+
+
+def is_netfail_standby(devname, driver=None):
+    """ A device is a "netfail standby" device if:
+
+        - The device has a 'master' sysfs attribute
+        - The device driver is 'virtio_net'
+        - The device has the standby feature bit set
+
+        Return True if all of the above is True.
+    """
+    if not has_master(devname):
+        return False
+
+    if driver is None:
+        driver = device_driver(devname)
+
+    if driver != "virtio_net":
+        return False
+
+    if not has_netfail_standby_feature(devname):
+        return False
+
+    return True
+
+
 def is_renamed(devname):
     """
     /* interface name assignment types (sysfs name_assign_type attribute) */
@@ -227,6 +348,9 @@ def find_fallback_nic(blacklist_drivers=None):
         if is_bond(interface):
             # skip any bonds
             continue
+        if is_netfailover(interface):
+            # ignore netfailover primary/standby interfaces
+            continue
         carrier = read_sys_net_int(interface, 'carrier')
         if carrier:
             connected.append(interface)
@@ -273,9 +397,14 @@ def generate_fallback_config(blacklist_drivers=None, config_driver=None):
     if not target_name:
         # can't read any interfaces addresses (or there are none); give up
         return None
-    target_mac = read_sys_net_safe(target_name, 'address')
-    cfg = {'dhcp4': True, 'set-name': target_name,
-           'match': {'macaddress': target_mac.lower()}}
+
+    # netfail cannot use mac for matching, they have duplicate macs
+    if is_netfail_master(target_name):
+        match = {'name': target_name}
+    else:
+        match = {
+            'macaddress': read_sys_net_safe(target_name, 'address').lower()}
+    cfg = {'dhcp4': True, 'set-name': target_name, 'match': match}
     if config_driver:
         driver = device_driver(target_name)
         if driver:
@@ -661,6 +790,10 @@ def get_interfaces():
             continue
         if is_bond(name):
             continue
+        if has_master(name):
+            continue
+        if is_netfailover(name):
+            continue
         mac = get_interface_mac(name)
         # some devices may not have a mac (tun0)
         if not mac:
diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py
index d2e38f0..999db98 100644
--- a/cloudinit/net/tests/test_init.py
+++ b/cloudinit/net/tests/test_init.py
@@ -157,6 +157,12 @@ class TestReadSysNet(CiTestCase):
         ensure_file(os.path.join(self.sysdir, 'eth0', 'bonding'))
         self.assertTrue(net.is_bond('eth0'))
 
+    def test_has_master(self):
+        """has_master is True when /sys/net/devname/master exists."""
+        self.assertFalse(net.has_master('enP1s1'))
+        ensure_file(os.path.join(self.sysdir, 'enP1s1', 'master'))
+        self.assertTrue(net.has_master('enP1s1'))
+
     def test_is_vlan(self):
         """is_vlan is True when /sys/net/devname/uevent has DEVTYPE=vlan."""
         ensure_file(os.path.join(self.sysdir, 'eth0', 'uevent'))
@@ -204,6 +210,10 @@ class TestGenerateFallbackConfig(CiTestCase):
         self.add_patch('cloudinit.net.util.is_container', 'm_is_container',
                        return_value=False)
         self.add_patch('cloudinit.net.util.udevadm_settle', 'm_settle')
+        self.add_patch('cloudinit.net.is_netfailover', 'm_netfail',
+                       return_value=False)
+        self.add_patch('cloudinit.net.is_netfail_master', 'm_netfail_master',
+                       return_value=False)
 
     def test_generate_fallback_finds_connected_eth_with_mac(self):
         """generate_fallback_config finds any connected device with a mac."""
@@ -268,6 +278,61 @@ class TestGenerateFallbackConfig(CiTestCase):
         ensure_file(os.path.join(self.sysdir, 'eth0', 'bonding'))
         self.assertIsNone(net.generate_fallback_config())
 
+    def test_generate_fallback_config_skips_netfail_devs(self):
+        """gen_fallback_config ignores netfail primary,sby no mac on master."""
+        mac = 'aa:bb:cc:aa:bb:cc'  # netfailover devs share the same mac
+        for iface in ['ens3', 'ens3sby', 'enP0s1f3']:
+            write_file(os.path.join(self.sysdir, iface, 'carrier'), '1')
+            write_file(
+                os.path.join(self.sysdir, iface, 'addr_assign_type'), '0')
+            write_file(
+                os.path.join(self.sysdir, iface, 'address'), mac)
+
+        def is_netfail(iface, _driver=None):
+            # ens3 is the master
+            if iface == 'ens3':
+                return False
+            return True
+        self.m_netfail.side_effect = is_netfail
+
+        def is_netfail_master(iface, _driver=None):
+            # ens3 is the master
+            if iface == 'ens3':
+                return True
+            return False
+        self.m_netfail_master.side_effect = is_netfail_master
+        expected = {
+            'ethernets': {
+                'ens3': {'dhcp4': True, 'match': {'name': 'ens3'},
+                         'set-name': 'ens3'}},
+            'version': 2}
+        result = net.generate_fallback_config()
+        self.assertEqual(expected, result)
+
+
+class TestNetFindFallBackNic(CiTestCase):
+
+    with_logs = True
+
+    def setUp(self):
+        super(TestNetFindFallBackNic, self).setUp()
+        sys_mock = mock.patch('cloudinit.net.get_sys_class_path')
+        self.m_sys_path = sys_mock.start()
+        self.sysdir = self.tmp_dir() + '/'
+        self.m_sys_path.return_value = self.sysdir
+        self.addCleanup(sys_mock.stop)
+        self.add_patch('cloudinit.net.util.is_container', 'm_is_container',
+                       return_value=False)
+        self.add_patch('cloudinit.net.util.udevadm_settle', 'm_settle')
+
+    def test_generate_fallback_finds_first_connected_eth_with_mac(self):
+        """find_fallback_nic finds any connected device with a mac."""
+        write_file(os.path.join(self.sysdir, 'eth0', 'carrier'), '1')
+        write_file(os.path.join(self.sysdir, 'eth1', 'carrier'), '1')
+        mac = 'aa:bb:cc:aa:bb:cc'
+        write_file(os.path.join(self.sysdir, 'eth1', 'address'), mac)
+        self.assertEqual('eth1', net.find_fallback_nic())
+
 
 class TestGetDeviceList(CiTestCase):
 
@@ -365,6 +430,37 @@ class TestGetInterfaceMAC(CiTestCase):
         expected = [('eth2', 'aa:bb:cc:aa:bb:cc', None, None)]
         self.assertEqual(expected, net.get_interfaces())
 
+    def test_get_interfaces_by_mac_skips_master_devs(self):
+        """Ignore interfaces with a master device which would have dup mac."""
+        mac1 = mac2 = 'aa:bb:cc:aa:bb:cc'
+        write_file(os.path.join(self.sysdir, 'eth1', 'addr_assign_type'), '0')
+        write_file(os.path.join(self.sysdir, 'eth1', 'address'), mac1)
+        write_file(os.path.join(self.sysdir, 'eth1', 'master'), "blah")
+        write_file(os.path.join(self.sysdir, 'eth2', 'addr_assign_type'), '0')
+        write_file(os.path.join(self.sysdir, 'eth2', 'address'), mac2)
+        expected = [('eth2', mac2, None, None)]
+        self.assertEqual(expected, net.get_interfaces())
+
+    @mock.patch('cloudinit.net.is_netfailover')
+    def test_get_interfaces_by_mac_skips_netfailvoer(self, m_netfail):
+        """Ignore interfaces if netfailover primary or standby."""
+        mac = 'aa:bb:cc:aa:bb:cc'  # netfailover devs share the same mac
+        for iface in ['ens3', 'ens3sby', 'enP0s1f3']:
+            write_file(
+                os.path.join(self.sysdir, iface, 'addr_assign_type'), '0')
+            write_file(
+                os.path.join(self.sysdir, iface, 'address'), mac)
+
+        def is_netfail(iface, _driver=None):
+            # ens3 is the master
+            if iface == 'ens3':
+                return False
+            else:
+                return True
+        m_netfail.side_effect = is_netfail
+        expected = [('ens3', mac, None, None)]
+        self.assertEqual(expected, net.get_interfaces())
+
 
 class TestInterfaceHasOwnMAC(CiTestCase):
 
@@ -922,3 +1018,234 @@ class TestWaitForPhysdevs(CiTestCase):
         self.m_get_iface_mac.return_value = {}
         net.wait_for_physdevs(netcfg, strict=False)
         self.assertEqual(5 * len(physdevs), self.m_udev_settle.call_count)
+
+
+class TestNetFailOver(CiTestCase):
+
+    with_logs = True
+
+    def setUp(self):
+        super(TestNetFailOver, self).setUp()
+        self.add_patch('cloudinit.net.util', 'm_util')
+        self.add_patch('cloudinit.net.read_sys_net', 'm_read_sys_net')
+        self.add_patch('cloudinit.net.device_driver', 'm_device_driver')
+
+    def test_get_dev_features(self):
+        devname = self.random_string()
+        features = self.random_string()
+        self.m_read_sys_net.return_value = features
+
+        self.assertEqual(features, net.get_dev_features(devname))
+        self.assertEqual(1, self.m_read_sys_net.call_count)
+        self.assertEqual(mock.call(devname, 'device/features'),
+                         self.m_read_sys_net.call_args_list[0])
+
+    def test_get_dev_features_none_returns_empty_string(self):
+        devname = self.random_string()
+        self.m_read_sys_net.side_effect = Exception('error')
+        self.assertEqual('', net.get_dev_features(devname))
+        self.assertEqual(1, self.m_read_sys_net.call_count)
+        self.assertEqual(mock.call(devname, 'device/features'),
+                         self.m_read_sys_net.call_args_list[0])
+
+    @mock.patch('cloudinit.net.get_dev_features')
+    def test_has_netfail_standby_feature(self, m_dev_features):
+        devname = self.random_string()
+        standby_features = ('0' * 62) + '1' + '0'
+        m_dev_features.return_value = standby_features
+        self.assertTrue(net.has_netfail_standby_feature(devname))
+
+    @mock.patch('cloudinit.net.get_dev_features')
+    def test_has_netfail_standby_feature_short_is_false(self, m_dev_features):
+        devname = self.random_string()
+        standby_features = self.random_string()
+        m_dev_features.return_value = standby_features
+        self.assertFalse(net.has_netfail_standby_feature(devname))
+
+    @mock.patch('cloudinit.net.get_dev_features')
+    def test_has_netfail_standby_feature_not_present_is_false(self,
+                                                              m_dev_features):
+        devname = self.random_string()
+        standby_features = '0' * 64
+        m_dev_features.return_value = standby_features
+        self.assertFalse(net.has_netfail_standby_feature(devname))
+
+    @mock.patch('cloudinit.net.get_dev_features')
+    def test_has_netfail_standby_feature_no_features_is_false(self,
+                                                              m_dev_features):
+        devname = self.random_string()
+        standby_features = None
+        m_dev_features.return_value = standby_features
+        self.assertFalse(net.has_netfail_standby_feature(devname))
+
+    @mock.patch('cloudinit.net.has_netfail_standby_feature')
+    @mock.patch('cloudinit.net.os.path.exists')
+    def test_is_netfail_master(self, m_exists, m_standby):
+        devname = self.random_string()
+        driver = 'virtio_net'
+        m_exists.return_value = False  # no master sysfs attr
+        m_standby.return_value = True  # has standby feature flag
+        self.assertTrue(net.is_netfail_master(devname, driver))
+
+    @mock.patch('cloudinit.net.sys_dev_path')
+    def test_is_netfail_master_checks_master_attr(self, m_sysdev):
+        devname = self.random_string()
+        driver = 'virtio_net'
+        m_sysdev.return_value = self.random_string()
+        self.assertFalse(net.is_netfail_master(devname, driver))
+        self.assertEqual(1, m_sysdev.call_count)
+        self.assertEqual(mock.call(devname, path='master'),
+                         m_sysdev.call_args_list[0])
+
+    @mock.patch('cloudinit.net.has_netfail_standby_feature')
+    @mock.patch('cloudinit.net.os.path.exists')
+    def test_is_netfail_master_wrong_driver(self, m_exists, m_standby):
+        devname = self.random_string()
+        driver = self.random_string()
+        self.assertFalse(net.is_netfail_master(devname, driver))
+
+    @mock.patch('cloudinit.net.has_netfail_standby_feature')
+    @mock.patch('cloudinit.net.os.path.exists')
+    def test_is_netfail_master_has_master_attr(self, m_exists, m_standby):
+        devname = self.random_string()
+        driver = 'virtio_net'
+        m_exists.return_value = True  # has master sysfs attr
+        self.assertFalse(net.is_netfail_master(devname, driver))
+
+    @mock.patch('cloudinit.net.has_netfail_standby_feature')
+    @mock.patch('cloudinit.net.os.path.exists')
+    def test_is_netfail_master_no_standby_feat(self, m_exists, m_standby):
+        devname = self.random_string()
+        driver = 'virtio_net'
+        m_exists.return_value = False  # no master sysfs attr
+        m_standby.return_value = False  # no standby feature flag
+        self.assertFalse(net.is_netfail_master(devname, driver))
+
+    @mock.patch('cloudinit.net.has_netfail_standby_feature')
+    @mock.patch('cloudinit.net.os.path.exists')
+    @mock.patch('cloudinit.net.sys_dev_path')
+    def test_is_netfail_primary(self, m_sysdev, m_exists, m_standby):
+        devname = self.random_string()
+        driver = self.random_string()  # device not virtio_net
+        master_devname = self.random_string()
+        m_sysdev.return_value = "%s/%s" % (self.random_string(),
+                                           master_devname)
+        m_exists.return_value = True  # has master sysfs attr
+        self.m_device_driver.return_value = 'virtio_net'  # master virtio_net
+        m_standby.return_value = True  # has standby feature flag
+        self.assertTrue(net.is_netfail_primary(devname, driver))
+        self.assertEqual(1, self.m_device_driver.call_count)
+        self.assertEqual(mock.call(master_devname),
+                         self.m_device_driver.call_args_list[0])
+        self.assertEqual(1, m_standby.call_count)
+        self.assertEqual(mock.call(master_devname),
+                         m_standby.call_args_list[0])
+
+    @mock.patch('cloudinit.net.has_netfail_standby_feature')
+    @mock.patch('cloudinit.net.os.path.exists')
+    @mock.patch('cloudinit.net.sys_dev_path')
+    def test_is_netfail_primary_wrong_driver(self, m_sysdev, m_exists,
+                                             m_standby):
+        devname = self.random_string()
+        driver = 'virtio_net'
+        self.assertFalse(net.is_netfail_primary(devname, driver))
+
+    @mock.patch('cloudinit.net.has_netfail_standby_feature')
+    @mock.patch('cloudinit.net.os.path.exists')
+    @mock.patch('cloudinit.net.sys_dev_path')
+    def test_is_netfail_primary_no_master(self, m_sysdev, m_exists, m_standby):
+        devname = self.random_string()
+        driver = self.random_string()  # device not virtio_net
+        m_exists.return_value = False  # no master sysfs attr
+        self.assertFalse(net.is_netfail_primary(devname, driver))
+
+    @mock.patch('cloudinit.net.has_netfail_standby_feature')
+    @mock.patch('cloudinit.net.os.path.exists')
+    @mock.patch('cloudinit.net.sys_dev_path')
+    def test_is_netfail_primary_bad_master(self, m_sysdev, m_exists,
+                                           m_standby):
+        devname = self.random_string()
+        driver = self.random_string()  # device not virtio_net
+        master_devname = self.random_string()
+        m_sysdev.return_value = "%s/%s" % (self.random_string(),
+                                           master_devname)
+        m_exists.return_value = True  # has master sysfs attr
+        self.m_device_driver.return_value = 'XXXX'  # master not virtio_net
+        self.assertFalse(net.is_netfail_primary(devname, driver))
+
+    @mock.patch('cloudinit.net.has_netfail_standby_feature')
+    @mock.patch('cloudinit.net.os.path.exists')
+    @mock.patch('cloudinit.net.sys_dev_path')
+    def test_is_netfail_primary_no_standby(self, m_sysdev, m_exists,
+                                           m_standby):
+        devname = self.random_string()
+        driver = self.random_string()  # device not virtio_net
+        master_devname = self.random_string()
+        m_sysdev.return_value = "%s/%s" % (self.random_string(),
+                                           master_devname)
+        m_exists.return_value = True  # has master sysfs attr
+        self.m_device_driver.return_value = 'virtio_net'  # master virtio_net
+        m_standby.return_value = False  # master has no standby feature flag
+        self.assertFalse(net.is_netfail_primary(devname, driver))
+
+    @mock.patch('cloudinit.net.has_netfail_standby_feature')
+    @mock.patch('cloudinit.net.os.path.exists')
+    def test_is_netfail_standby(self, m_exists, m_standby):
+        devname = self.random_string()
+        driver = 'virtio_net'
+        m_exists.return_value = True  # has master sysfs attr
+        m_standby.return_value = True  # has standby feature flag
+        self.assertTrue(net.is_netfail_standby(devname, driver))
+
+    @mock.patch('cloudinit.net.has_netfail_standby_feature')
+    @mock.patch('cloudinit.net.os.path.exists')
+    def test_is_netfail_standby_wrong_driver(self, m_exists, m_standby):
+        devname = self.random_string()
+        driver = self.random_string()
+        self.assertFalse(net.is_netfail_standby(devname, driver))
+
+    @mock.patch('cloudinit.net.has_netfail_standby_feature')
+    @mock.patch('cloudinit.net.os.path.exists')
+    def test_is_netfail_standby_no_master(self, m_exists, m_standby):
+        devname = self.random_string()
+        driver = 'virtio_net'
+        m_exists.return_value = False  # has master sysfs attr
+        self.assertFalse(net.is_netfail_standby(devname, driver))
+
+    @mock.patch('cloudinit.net.has_netfail_standby_feature')
+    @mock.patch('cloudinit.net.os.path.exists')
+    def test_is_netfail_standby_no_standby_feature(self, m_exists, m_standby):
+        devname = self.random_string()
+        driver = 'virtio_net'
+        m_exists.return_value = True  # has master sysfs attr
+        m_standby.return_value = False  # has standby feature flag
+        self.assertFalse(net.is_netfail_standby(devname, driver))
+
+    @mock.patch('cloudinit.net.is_netfail_standby')
+    @mock.patch('cloudinit.net.is_netfail_primary')
+    def test_is_netfailover_primary(self, m_primary, m_standby):
+        devname = self.random_string()
+        driver = self.random_string()
+        m_primary.return_value = True
+        m_standby.return_value = False
+        self.assertTrue(net.is_netfailover(devname, driver))
+
+    @mock.patch('cloudinit.net.is_netfail_standby')
+    @mock.patch('cloudinit.net.is_netfail_primary')
+    def test_is_netfailover_standby(self, m_primary, m_standby):
+        devname = self.random_string()
+        driver = self.random_string()
+        m_primary.return_value = False
+        m_standby.return_value = True
+        self.assertTrue(net.is_netfailover(devname, driver))
+
+    @mock.patch('cloudinit.net.is_netfail_standby')
+    @mock.patch('cloudinit.net.is_netfail_primary')
+    def test_is_netfailover_returns_false(self, m_primary, m_standby):
+        devname = self.random_string()
+        driver = self.random_string()
+        m_primary.return_value = False
+        m_standby.return_value = False
+        self.assertFalse(net.is_netfailover(devname, driver))
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index 5c017bf..1010745 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -473,7 +473,7 @@ def identify_aws(data):
 
 
 def identify_brightbox(data):
-    if data['serial'].endswith('brightbox.com'):
+    if data['serial'].endswith('.brightbox.com'):
         return CloudNames.BRIGHTBOX
 
 
diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
index dd941d2..b156189 100644
--- a/cloudinit/sources/DataSourceOVF.py
+++ b/cloudinit/sources/DataSourceOVF.py
@@ -40,11 +40,15 @@ from cloudinit.sources.helpers.vmware.imc.guestcust_state \
 from cloudinit.sources.helpers.vmware.imc.guestcust_util import (
     enable_nics,
     get_nics_to_enable,
-    set_customization_status
+    set_customization_status,
+    get_tools_config
 )
 
 LOG = logging.getLogger(__name__)
 
+CONFGROUPNAME_GUESTCUSTOMIZATION = "deployPkg"
+GUESTCUSTOMIZATION_ENABLE_CUST_SCRIPTS = "enable-custom-scripts"
+
 
 class DataSourceOVF(sources.DataSource):
 
@@ -148,6 +152,21 @@ class DataSourceOVF(sources.DataSource):
                     product_marker, os.path.join(self.paths.cloud_dir, 'data'))
                 special_customization = product_marker and not hasmarkerfile
                 customscript = self._vmware_cust_conf.custom_script_name
+                custScriptConfig = get_tools_config(
+                    CONFGROUPNAME_GUESTCUSTOMIZATION,
+                    GUESTCUSTOMIZATION_ENABLE_CUST_SCRIPTS,
+                    "true")
+                if custScriptConfig.lower() == "false":
+                    # Update the customization status if there is a
+                    # custom script is disabled
+                    if special_customization and customscript:
+                        msg = "Custom script is disabled by VM Administrator"
+                        LOG.debug(msg)
+                        set_customization_status(
+                            GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
+                            GuestCustErrorEnum.GUESTCUST_ERROR_SCRIPT_DISABLED)
+                        raise RuntimeError(msg)
+
                 ccScriptsDir = os.path.join(
                     self.paths.get_cpath("scripts"),
                     "per-instance")
diff --git a/cloudinit/sources/DataSourceOracle.py b/cloudinit/sources/DataSourceOracle.py
index 1cb0636..eec8740 100644
--- a/cloudinit/sources/DataSourceOracle.py
+++ b/cloudinit/sources/DataSourceOracle.py
@@ -16,7 +16,7 @@ Notes:
 """
 
 from cloudinit.url_helper import combine_url, readurl, UrlError
-from cloudinit.net import dhcp, get_interfaces_by_mac
+from cloudinit.net import dhcp, get_interfaces_by_mac, is_netfail_master
 from cloudinit import net
 from cloudinit import sources
 from cloudinit import util
@@ -108,6 +108,56 @@ def _add_network_config_from_opc_imds(network_config):
                 'match': {'macaddress': mac_address}}
 
 
+def _ensure_netfailover_safe(network_config):
+    """
+    Search network config physical interfaces to see if any of them are
+    a netfailover master.  If found, we prevent matching by MAC as the other
+    failover devices have the same MAC but need to be ignored.
+
+    Note: we rely on cloudinit.net changes which prevent netfailover devices
+    from being present in the provided network config.  For more details about
+    netfailover devices, refer to cloudinit.net module.
+
+    :param network_config
+       A v1 or v2 network config dict with the primary NIC, and possibly
+       secondary nic configured.  This dict will be mutated.
+
+    """
+    # ignore anything that's not an actual network-config
+    if 'version' not in network_config:
+        return
+
+    if network_config['version'] not in [1, 2]:
+        LOG.debug('Ignoring unknown network config version: %s',
+                  network_config['version'])
+        return
+
+    mac_to_name = get_interfaces_by_mac()
+    if network_config['version'] == 1:
+        for cfg in [c for c in network_config['config'] if 'type' in c]:
+            if cfg['type'] == 'physical':
+                if 'mac_address' in cfg:
+                    mac = cfg['mac_address']
+                    cur_name = mac_to_name.get(mac)
+                    if not cur_name:
+                        continue
+                    elif is_netfail_master(cur_name):
+                        del cfg['mac_address']
+
+    elif network_config['version'] == 2:
+        for _, cfg in network_config.get('ethernets', {}).items():
+            if 'match' in cfg:
+                macaddr = cfg.get('match', {}).get('macaddress')
+                if macaddr:
+                    cur_name = mac_to_name.get(macaddr)
+                    if not cur_name:
+                        continue
+                    elif is_netfail_master(cur_name):
+                        del cfg['match']['macaddress']
+                        del cfg['set-name']
+                        cfg['match']['name'] = cur_name
+
+
 class DataSourceOracle(sources.DataSource):
 
     dsname = 'Oracle'
@@ -208,9 +258,13 @@ class DataSourceOracle(sources.DataSource):
         We nonetheless return cmdline provided config if present
         and fallback to generate fallback."""
         if self._network_config == sources.UNSET:
+            # this is v1
             self._network_config = cmdline.read_initramfs_config()
+
             if not self._network_config:
+                # this is now v2
                 self._network_config = self.distro.generate_fallback_config()
+
             if self.ds_cfg.get('configure_secondary_nics'):
                 try:
                     # Mutate self._network_config to include secondary VNICs
@@ -219,6 +273,12 @@ class DataSourceOracle(sources.DataSource):
                     util.logexc(
                         LOG,
                         "Failed to fetch secondary network configuration!")
+
+            # we need to verify that the nic selected is not a netfail over
+            # device and, if it is a netfail master, then we need to avoid
+            # emitting any match by mac
+            _ensure_netfailover_safe(self._network_config)
+
         return self._network_config
 
 
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_error.py b/cloudinit/sources/helpers/vmware/imc/guestcust_error.py
index db5a00d..65ae739 100644
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_error.py
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_error.py
@@ -10,5 +10,6 @@ class GuestCustErrorEnum(object):
     """Specifies different errors of Guest Customization engine"""
 
     GUESTCUST_ERROR_SUCCESS = 0
+    GUESTCUST_ERROR_SCRIPT_DISABLED = 6
 
 # vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
index a590f32..eb78172 100644
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
@@ -7,6 +7,7 @@
 
 import logging
 import os
+import re
 import time
 
 from cloudinit import util
@@ -117,4 +118,40 @@ def enable_nics(nics):
     logger.warning("Can't connect network interfaces after %d attempts",
                    enableNicsWaitRetries)
 
+
+def get_tools_config(section, key, defaultVal):
+    """ Return the value of [section] key from VMTools configuration.
+
+        @param section: String of section to read from VMTools config
+        @returns: String value from key in [section] or defaultVal if
+                  [section] is not present or vmware-toolbox-cmd is
+                  not installed.
+    """
+
+    if not util.which('vmware-toolbox-cmd'):
+        logger.debug(
+            'vmware-toolbox-cmd not installed, returning default value')
+        return defaultVal
+
+    retValue = defaultVal
+    cmd = ['vmware-toolbox-cmd', 'config', 'get', section, key]
+
+    try:
+        (outText, _) = util.subp(cmd)
+        m = re.match(r'([a-zA-Z0-9 ]+)=(.*)', outText)
+        if m:
+            retValue = m.group(2).strip()
+            logger.debug("Get tools config: [%s] %s = %s",
+                         section, key, retValue)
+        else:
+            logger.debug(
+                "Tools config: [%s] %s is not found, return default value: %s",
+                section, key, retValue)
+    except util.ProcessExecutionError as e:
+        logger.error("Failed running %s[%s]", cmd, e.exit_code)
+        logger.exception(e)
+
+    return retValue
+
+
 # vi: ts=4 expandtab
diff --git a/cloudinit/sources/tests/test_oracle.py b/cloudinit/sources/tests/test_oracle.py
index 2a70bbc..85b6db9 100644
--- a/cloudinit/sources/tests/test_oracle.py
+++ b/cloudinit/sources/tests/test_oracle.py
@@ -8,6 +8,7 @@ from cloudinit.tests import helpers as test_helpers
 
 from textwrap import dedent
 import argparse
+import copy
 import httpretty
 import json
 import mock
@@ -586,4 +587,150 @@ class TestNetworkConfigFromOpcImds(test_helpers.CiTestCase):
         self.assertEqual('10.0.0.231', secondary_nic_cfg['addresses'][0])
 
 
+class TestNetworkConfigFiltersNetFailover(test_helpers.CiTestCase):
+
+    with_logs = True
+
+    def setUp(self):
+        super(TestNetworkConfigFiltersNetFailover, self).setUp()
+        self.add_patch(DS_PATH + '.get_interfaces_by_mac',
+                       'm_get_interfaces_by_mac')
+        self.add_patch(DS_PATH + '.is_netfail_master', 'm_netfail_master')
+
+    def test_ignore_bogus_network_config(self):
+        netcfg = {'something': 'here'}
+        passed_netcfg = copy.copy(netcfg)
+        oracle._ensure_netfailover_safe(passed_netcfg)
+        self.assertEqual(netcfg, passed_netcfg)
+
+    def test_ignore_network_config_unknown_versions(self):
+        netcfg = {'something': 'here', 'version': 3}
+        passed_netcfg = copy.copy(netcfg)
+        oracle._ensure_netfailover_safe(passed_netcfg)
+        self.assertEqual(netcfg, passed_netcfg)
+
+    def test_checks_v1_type_physical_interfaces(self):
+        mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3'
+        self.m_get_interfaces_by_mac.return_value = {
+            mac_addr: nic_name,
+        }
+        netcfg = {'version': 1, 'config': [
+            {'type': 'physical', 'name': nic_name, 'mac_address': mac_addr,
+             'subnets': [{'type': 'dhcp4'}]}]}
+        passed_netcfg = copy.copy(netcfg)
+        self.m_netfail_master.return_value = False
+        oracle._ensure_netfailover_safe(passed_netcfg)
+        self.assertEqual(netcfg, passed_netcfg)
+        self.assertEqual([mock.call(nic_name)],
+                         self.m_netfail_master.call_args_list)
+
+    def test_checks_v1_skips_non_phys_interfaces(self):
+        mac_addr, nic_name = '00:00:17:02:2b:b1', 'bond0'
+        self.m_get_interfaces_by_mac.return_value = {
+            mac_addr: nic_name,
+        }
+        netcfg = {'version': 1, 'config': [
+            {'type': 'bond', 'name': nic_name, 'mac_address': mac_addr,
+             'subnets': [{'type': 'dhcp4'}]}]}
+        passed_netcfg = copy.copy(netcfg)
+        oracle._ensure_netfailover_safe(passed_netcfg)
+        self.assertEqual(netcfg, passed_netcfg)
+        self.assertEqual(0, self.m_netfail_master.call_count)
+
+    def test_removes_master_mac_property_v1(self):
+        nic_master, mac_master = 'ens3', self.random_string()
+        nic_other, mac_other = 'ens7', self.random_string()
+        nic_extra, mac_extra = 'enp0s1f2', self.random_string()
+        self.m_get_interfaces_by_mac.return_value = {
+            mac_master: nic_master,
+            mac_other: nic_other,
+            mac_extra: nic_extra,
+        }
+        netcfg = {'version': 1, 'config': [
+            {'type': 'physical', 'name': nic_master,
+             'mac_address': mac_master},
+            {'type': 'physical', 'name': nic_other, 'mac_address': mac_other},
+            {'type': 'physical', 'name': nic_extra, 'mac_address': mac_extra},
+        ]}
+
+        def _is_netfail_master(iface):
+            if iface == 'ens3':
+                return True
+            return False
+        self.m_netfail_master.side_effect = _is_netfail_master
+        expected_cfg = {'version': 1, 'config': [
+            {'type': 'physical', 'name': nic_master},
+            {'type': 'physical', 'name': nic_other, 'mac_address': mac_other},
+            {'type': 'physical', 'name': nic_extra, 'mac_address': mac_extra},
+        ]}
+        oracle._ensure_netfailover_safe(netcfg)
+        self.assertEqual(expected_cfg, netcfg)
+
+    def test_checks_v2_type_ethernet_interfaces(self):
+        mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3'
+        self.m_get_interfaces_by_mac.return_value = {
+            mac_addr: nic_name,
+        }
+        netcfg = {'version': 2, 'ethernets': {
+            nic_name: {'dhcp4': True, 'critical': True, 'set-name': nic_name,
+                       'match': {'macaddress': mac_addr}}}}
+        passed_netcfg = copy.copy(netcfg)
+        self.m_netfail_master.return_value = False
+        oracle._ensure_netfailover_safe(passed_netcfg)
+        self.assertEqual(netcfg, passed_netcfg)
+        self.assertEqual([mock.call(nic_name)],
+                         self.m_netfail_master.call_args_list)
+
+    def test_skips_v2_non_ethernet_interfaces(self):
+        mac_addr, nic_name = '00:00:17:02:2b:b1', 'wlps0'
+        self.m_get_interfaces_by_mac.return_value = {
+            mac_addr: nic_name,
+        }
+        netcfg = {'version': 2, 'wifis': {
+            nic_name: {'dhcp4': True, 'critical': True, 'set-name': nic_name,
+                       'match': {'macaddress': mac_addr}}}}
+        passed_netcfg = copy.copy(netcfg)
+        oracle._ensure_netfailover_safe(passed_netcfg)
+        self.assertEqual(netcfg, passed_netcfg)
+        self.assertEqual(0, self.m_netfail_master.call_count)
+
+    def test_removes_master_mac_property_v2(self):
+        nic_master, mac_master = 'ens3', self.random_string()
+        nic_other, mac_other = 'ens7', self.random_string()
+        nic_extra, mac_extra = 'enp0s1f2', self.random_string()
+        self.m_get_interfaces_by_mac.return_value = {
+            mac_master: nic_master,
+            mac_other: nic_other,
+            mac_extra: nic_extra,
+        }
+        netcfg = {'version': 2, 'ethernets': {
+            nic_extra: {'dhcp4': True, 'set-name': nic_extra,
+                        'match': {'macaddress': mac_extra}},
+            nic_other: {'dhcp4': True, 'set-name': nic_other,
+                        'match': {'macaddress': mac_other}},
+            nic_master: {'dhcp4': True, 'set-name': nic_master,
+                         'match': {'macaddress': mac_master}},
+        }}
+
+        def _is_netfail_master(iface):
+            if iface == 'ens3':
+                return True
+            return False
+        self.m_netfail_master.side_effect = _is_netfail_master
+
+        expected_cfg = {'version': 2, 'ethernets': {
+            nic_master: {'dhcp4': True, 'match': {'name': nic_master}},
+            nic_extra: {'dhcp4': True, 'set-name': nic_extra,
+                        'match': {'macaddress': mac_extra}},
+            nic_other: {'dhcp4': True, 'set-name': nic_other,
+                        'match': {'macaddress': mac_other}},
+        }}
+        oracle._ensure_netfailover_safe(netcfg)
+        import pprint
+        pprint.pprint(netcfg)
+        print('---- ^^ modified ^^ ---- vv original vv ----')
+        pprint.pprint(expected_cfg)
+        self.assertEqual(expected_cfg, netcfg)
+
+
 # vi: ts=4 expandtab
diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py
index 23fddd0..4dad2af 100644
--- a/cloudinit/tests/helpers.py
+++ b/cloudinit/tests/helpers.py
@@ -6,7 +6,9 @@ import functools
 import httpretty
 import logging
 import os
+import random
 import shutil
+import string
 import sys
 import tempfile
 import time
@@ -243,6 +245,12 @@ class CiTestCase(TestCase):
             myds.metadata.update(metadata)
         return cloud.Cloud(myds, self.paths, sys_cfg, mydist, None)
 
+    @classmethod
+    def random_string(cls, length=8):
+        """ return a random lowercase string with default length of 8"""
+        return ''.join(
+            random.choice(string.ascii_lowercase) for _ in range(length))
+
 
 class ResourceUsingTestCase(CiTestCase):
 
diff --git a/debian/changelog b/debian/changelog
index 93c1751..5b03045 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,23 @@
+cloud-init (19.2-36-g059d049c-0ubuntu1~16.04.1) xenial; urgency=medium
+
+  * New upstream snapshot. (LP: #1844334)
+    - net: add is_master check for filtering device list
+    - docs: more complete list of availability [Joshua Powers]
+    - docs: start FAQ page [Joshua Powers]
+    - docs: cleanup output & order of datasource page [Joshua Powers]
+    - Brightbox: restrict detection to require full domain match
+      .brightbox.com [Scott Moser]
+    - VMWware: add option into VMTools config to enable/disable custom script.
+      [Xiaofeng Wang]
+    - net,Oracle: Add support for netfailover detection
+    - atomic_helper: add DEBUG logging to write_file
+    - doc: document doc, create makefile and tox target [Joshua Powers]
+    - .gitignore: ignore files produced by package builds
+    - docs: fix whitespace, spelling, and line length [Joshua Powers]
+    - docs: remove unnecessary file in doc directory [Joshua Powers]
+
+ -- Chad Smith <chad.smith@xxxxxxxxxxxxx>  Tue, 17 Sep 2019 08:18:06 -0600
+
 cloud-init (19.2-24-ge7881d5c-0ubuntu1~16.04.1) xenial; urgency=medium
 
   * New upstream snapshot. (LP: #1841099)
diff --git a/doc/README b/doc/README
deleted file mode 100644
index 8355919..0000000
--- a/doc/README
+++ /dev/null
@@ -1,4 +0,0 @@
-This project is cloud-init it is hosted on launchpad at 
-https://launchpad.net/cloud-init
-
-The package was previously named ec2-init.
diff --git a/doc/rtd/conf.py b/doc/rtd/conf.py
index 4174477..9b27484 100644
--- a/doc/rtd/conf.py
+++ b/doc/rtd/conf.py
@@ -17,7 +17,8 @@ from cloudinit.config.schema import get_schema_doc
 # ]
 
 # General information about the project.
-project = 'Cloud-Init'
+project = 'cloud-init'
+copyright = '2019, Canonical Ltd.'
 
 # -- General configuration ----------------------------------------------------
 
@@ -59,15 +60,7 @@ show_authors = False
 
 # The theme to use for HTML and HTML Help pages.  See the documentation for
 # a list of builtin themes.
-html_theme = 'default'
-
-# Theme options are theme-specific and customize the look and feel of a theme
-# further.  For a list of options available for each theme, see the
-# documentation.
-html_theme_options = {
-    "bodyfont": "Ubuntu, Arial, sans-serif",
-    "headfont": "Ubuntu, Arial, sans-serif"
-}
+html_theme = 'sphinx_rtd_theme'
 
 # The name of an image file (relative to this directory) to place at the top
 # of the sidebar.
diff --git a/doc/rtd/index.rst b/doc/rtd/index.rst
index 20a99a3..c670b20 100644
--- a/doc/rtd/index.rst
+++ b/doc/rtd/index.rst
@@ -1,14 +1,5 @@
 .. _index:
 
-.. http://thomas-cokelaer.info/tutorials/sphinx/rest_syntax.html
-.. As suggested at link above for headings use:
-..   # with overline, for parts
-..   * with overline, for chapters
-..   =, for sections
-..   -, for subsections
-..   ^, for subsubsections
-..   “, for paragraphs
-
 #############
 Documentation
 #############
diff --git a/doc/rtd/topics/availability.rst b/doc/rtd/topics/availability.rst
index ef5ae7b..3f215b1 100644
--- a/doc/rtd/topics/availability.rst
+++ b/doc/rtd/topics/availability.rst
@@ -1,21 +1,64 @@
-************
+.. _availability:
+
 Availability
 ************
 
-It is currently installed in the `Ubuntu Cloud Images`_ and also in the official `Ubuntu`_ images available on EC2, Azure, GCE and many other clouds.
+Below outlines the current availability of cloud-init across
+distributions and clouds, both public and private.
+
+.. note::
+
+    If a distribution or cloud does not show up in the list below contact
+    them and ask for images to be generated using cloud-init!
 
-Versions for other systems can be (or have been) created for the following distributions:
+Distributions
+=============
+
+Cloud-init has support across all major Linux distributions and
+FreeBSD:
 
 - Ubuntu
+- SLES/openSUSE
+- RHEL/CentOS
 - Fedora
+- Gentoo Linux
 - Debian
-- RHEL
-- CentOS
-- *and more...*
+- ArchLinux
+- FreeBSD
+
+Clouds
+======
+
+Cloud-init provides support across a wide ranging list of execution
+environments in the public cloud:
+
+- Amazon Web Services
+- Microsoft Azure
+- Google Cloud Platform
+- Oracle Cloud Infrastructure
+- Softlayer
+- Rackspace Public Cloud
+- IBM Cloud
+- Digital Ocean
+- Bigstep
+- Hetzner
+- Joyent
+- CloudSigma
+- Alibaba Cloud
+- OVH
+- OpenNebula
+- Exoscale
+- Scaleway
+- CloudStack
+- AltCloud
+- SmartOS
 
-So ask your distribution provider where you can obtain an image with it built-in if one is not already available ☺
+Additionally, cloud-init is supported on these private clouds:
 
+- Bare metal installs
+- OpenStack
+- LXD
+- KVM
+- Metal-as-a-Service (MAAS)
 
-.. _Ubuntu Cloud Images: http://cloud-images.ubuntu.com/
-.. _Ubuntu: http://www.ubuntu.com/
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst
index 2148cd5..8e58be9 100644
--- a/doc/rtd/topics/datasources.rst
+++ b/doc/rtd/topics/datasources.rst
@@ -1,89 +1,57 @@
 .. _datasources:
 
-***********
 Datasources
 ***********
 
-What is a datasource?
-=====================
-
 Datasources are sources of configuration data for cloud-init that typically
-come from the user (aka userdata) or come from the stack that created the
-configuration drive (aka metadata). Typical userdata would include files,
+come from the user (e.g. userdata) or come from the cloud that created the
+configuration drive (e.g. metadata). Typical userdata would include files,
 yaml, and shell scripts while typical metadata would include server name,
-instance id, display name and other cloud specific details. Since there are
-multiple ways to provide this data (each cloud solution seems to prefer its
-own way) internally a datasource abstract class was created to allow for a
-single way to access the different cloud systems methods to provide this data
-through the typical usage of subclasses.
-
-Any metadata processed by cloud-init's datasources is persisted as
-``/run/cloud-init/instance-data.json``. Cloud-init provides tooling
-to quickly introspect some of that data. See :ref:`instance_metadata` for
-more information.
-
-
-Datasource API
---------------
-The current interface that a datasource object must provide is the following:
-
-.. sourcecode:: python
+instance id, display name and other cloud specific details.
 
-    # returns a mime multipart message that contains
-    # all the various fully-expanded components that
-    # were found from processing the raw userdata string
-    # - when filtering only the mime messages targeting
-    #   this instance id will be returned (or messages with
-    #   no instance id)
-    def get_userdata(self, apply_filter=False)
-
-    # returns the raw userdata string (or none)
-    def get_userdata_raw(self)
+Since there are multiple ways to provide this data (each cloud solution seems
+to prefer its own way) internally a datasource abstract class was created to
+allow for a single way to access the different cloud systems methods to provide
+this data through the typical usage of subclasses.
 
-    # returns a integer (or none) which can be used to identify
-    # this instance in a group of instances which are typically
-    # created from a single command, thus allowing programmatic
-    # filtering on this launch index (or other selective actions)
-    @property
-    def launch_index(self)
-
-    # the data sources' config_obj is a cloud-config formatted
-    # object that came to it from ways other than cloud-config
-    # because cloud-config content would be handled elsewhere
-    def get_config_obj(self)
-
-    #returns a list of public ssh keys
-    def get_public_ssh_keys(self)
-
-    # translates a device 'short' name into the actual physical device
-    # fully qualified name (or none if said physical device is not attached
-    # or does not exist)
-    def device_name_to_device(self, name)
+Any metadata processed by cloud-init's datasources is persisted as
+``/run/cloud-init/instance-data.json``. Cloud-init provides tooling to quickly
+introspect some of that data. See :ref:`instance_metadata` for more
+information.
 
-    # gets the locale string this instance should be applying
-    # which typically used to adjust the instances locale settings files
-    def get_locale(self)
+Known Sources
+=============
 
-    @property
-    def availability_zone(self)
+The following is a list of documents for each supported datasource:
 
-    # gets the instance id that was assigned to this instance by the
-    # cloud provider or when said instance id does not exist in the backing
-    # metadata this will return 'iid-datasource'
-    def get_instance_id(self)
+.. toctree::
+   :titlesonly:
 
-    # gets the fully qualified domain name that this host should  be using
-    # when configuring network or hostname releated settings, typically
-    # assigned either by the cloud provider or the user creating the vm
-    def get_hostname(self, fqdn=False)
+   datasources/aliyun.rst
+   datasources/altcloud.rst
+   datasources/ec2.rst
+   datasources/azure.rst
+   datasources/cloudsigma.rst
+   datasources/cloudstack.rst
+   datasources/configdrive.rst
+   datasources/digitalocean.rst
+   datasources/exoscale.rst
+   datasources/fallback.rst
+   datasources/gce.rst
+   datasources/maas.rst
+   datasources/nocloud.rst
+   datasources/opennebula.rst
+   datasources/openstack.rst
+   datasources/oracle.rst
+   datasources/ovf.rst
+   datasources/smartos.rst
 
-    def get_package_mirror_info(self)
 
+Creation
+========
 
-Adding a new Datasource
------------------------
 The datasource objects have a few touch points with cloud-init.  If you
-are interested in adding a new datasource for your cloud platform you'll
+are interested in adding a new datasource for your cloud platform you will
 need to take care of the following items:
 
 * **Identify a mechanism for positive identification of the platform**:
@@ -139,31 +107,61 @@ need to take care of the following items:
   file in ``doc/datasources/<cloudplatform>.rst``
 
 
-Datasource Documentation
-========================
-The following is a list of the implemented datasources.
-Follow for more information.
+API
+===
 
-.. toctree::
-   :maxdepth: 2
+The current interface that a datasource object must provide is the following:
 
-   datasources/aliyun.rst
-   datasources/altcloud.rst
-   datasources/azure.rst
-   datasources/cloudsigma.rst
-   datasources/cloudstack.rst
-   datasources/configdrive.rst
-   datasources/digitalocean.rst
-   datasources/ec2.rst
-   datasources/exoscale.rst
-   datasources/maas.rst
-   datasources/nocloud.rst
-   datasources/opennebula.rst
-   datasources/openstack.rst
-   datasources/oracle.rst
-   datasources/ovf.rst
-   datasources/smartos.rst
-   datasources/fallback.rst
-   datasources/gce.rst
+.. sourcecode:: python
+
+    # returns a mime multipart message that contains
+    # all the various fully-expanded components that
+    # were found from processing the raw user data string
+    # - when filtering only the mime messages targeting
+    #   this instance id will be returned (or messages with
+    #   no instance id)
+    def get_userdata(self, apply_filter=False)
+
+    # returns the raw userdata string (or none)
+    def get_userdata_raw(self)
+
+    # returns a integer (or none) which can be used to identify
+    # this instance in a group of instances which are typically
+    # created from a single command, thus allowing programmatic
+    # filtering on this launch index (or other selective actions)
+    @property
+    def launch_index(self)
+
+    # the data sources' config_obj is a cloud-config formatted
+    # object that came to it from ways other than cloud-config
+    # because cloud-config content would be handled elsewhere
+    def get_config_obj(self)
+
+    #returns a list of public ssh keys
+    def get_public_ssh_keys(self)
+
+    # translates a device 'short' name into the actual physical device
+    # fully qualified name (or none if said physical device is not attached
+    # or does not exist)
+    def device_name_to_device(self, name)
+
+    # gets the locale string this instance should be applying
+    # which typically used to adjust the instances locale settings files
+    def get_locale(self)
+
+    @property
+    def availability_zone(self)
+
+    # gets the instance id that was assigned to this instance by the
+    # cloud provider or when said instance id does not exist in the backing
+    # metadata this will return 'iid-datasource'
+    def get_instance_id(self)
+
+    # gets the fully qualified domain name that this host should  be using
+    # when configuring network or hostname related settings, typically
+    # assigned either by the cloud provider or the user creating the vm
+    def get_hostname(self, fqdn=False)
+
+    def get_package_mirror_info(self)
 
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/altcloud.rst b/doc/rtd/topics/datasources/altcloud.rst
index eeb197f..9d7e3de 100644
--- a/doc/rtd/topics/datasources/altcloud.rst
+++ b/doc/rtd/topics/datasources/altcloud.rst
@@ -3,24 +3,25 @@
 Alt Cloud
 =========
 
-The datasource altcloud will be used to pick up user data on `RHEVm`_ and `vSphere`_.
+The datasource altcloud will be used to pick up user data on `RHEVm`_ and
+`vSphere`_.
 
 RHEVm
 -----
 
 For `RHEVm`_ v3.0 the userdata is injected into the VM using floppy
-injection via the `RHEVm`_ dashboard "Custom Properties". 
+injection via the `RHEVm`_ dashboard "Custom Properties".
 
 The format of the Custom Properties entry must be:
 
 ::
-    
+
     floppyinject=user-data.txt:<base64 encoded data>
 
 For example to pass a simple bash script:
 
 .. sourcecode:: sh
-    
+
     % cat simple_script.bash
     #!/bin/bash
     echo "Hello Joe!" >> /tmp/JJV_Joe_out.txt
@@ -38,7 +39,7 @@ set the "Custom Properties" when creating the RHEMv v3.0 VM to:
 **NOTE:** The prefix with file name must be: ``floppyinject=user-data.txt:``
 
 It is also possible to launch a `RHEVm`_ v3.0 VM and pass optional user
-data to it using the Delta Cloud. 
+data to it using the Delta Cloud.
 
 For more information on Delta Cloud see: http://deltacloud.apache.org
 
@@ -46,12 +47,12 @@ vSphere
 -------
 
 For VMWare's `vSphere`_ the userdata is injected into the VM as an ISO
-via the cdrom. This can be done using the `vSphere`_ dashboard 
+via the cdrom. This can be done using the `vSphere`_ dashboard
 by connecting an ISO image to the CD/DVD drive.
 
 To pass this example script to cloud-init running in a `vSphere`_ VM
 set the CD/DVD drive when creating the vSphere VM to point to an
-ISO on the data store. 
+ISO on the data store.
 
 **Note:** The ISO must contain the user data.
 
@@ -61,13 +62,13 @@ Create the ISO
 ^^^^^^^^^^^^^^
 
 .. sourcecode:: sh
-    
+
     % mkdir my-iso
 
 NOTE: The file name on the ISO must be: ``user-data.txt``
 
 .. sourcecode:: sh
-    
+
     % cp simple_script.bash my-iso/user-data.txt
     % genisoimage -o user-data.iso -r my-iso
 
@@ -75,7 +76,7 @@ Verify the ISO
 ^^^^^^^^^^^^^^
 
 .. sourcecode:: sh
-    
+
     % sudo mkdir /media/vsphere_iso
     % sudo mount -o loop user-data.iso /media/vsphere_iso
     % cat /media/vsphere_iso/user-data.txt
@@ -84,7 +85,7 @@ Verify the ISO
 Then, launch the `vSphere`_ VM the ISO user-data.iso attached as a CDROM.
 
 It is also possible to launch a `vSphere`_ VM and pass optional user
-data to it using the Delta Cloud. 
+data to it using the Delta Cloud.
 
 For more information on Delta Cloud see: http://deltacloud.apache.org
 
diff --git a/doc/rtd/topics/datasources/azure.rst b/doc/rtd/topics/datasources/azure.rst
index b41cddd..8328dfa 100644
--- a/doc/rtd/topics/datasources/azure.rst
+++ b/doc/rtd/topics/datasources/azure.rst
@@ -82,7 +82,8 @@ The settings that may be configured are:
    provided command to obtain metadata.
  * **apply_network_config**: Boolean set to True to use network configuration
    described by Azure's IMDS endpoint instead of fallback network config of
-   dhcp on eth0. Default is True. For Ubuntu 16.04 or earlier, default is False.
+   dhcp on eth0. Default is True. For Ubuntu 16.04 or earlier, default is
+   False.
  * **data_dir**: Path used to read metadata files and write crawled data.
  * **dhclient_lease_file**: The fallback lease file to source when looking for
    custom DHCP option 245 from Azure fabric.
diff --git a/doc/rtd/topics/datasources/cloudstack.rst b/doc/rtd/topics/datasources/cloudstack.rst
index a3101ed..95b9587 100644
--- a/doc/rtd/topics/datasources/cloudstack.rst
+++ b/doc/rtd/topics/datasources/cloudstack.rst
@@ -7,7 +7,7 @@ CloudStack
 sshkey thru the Virtual-Router. The datasource obtains the VR address via
 dhcp lease information given to the instance.
 For more details on meta-data and user-data,
-refer the `CloudStack Administrator Guide`_. 
+refer the `CloudStack Administrator Guide`_.
 
 URLs to access user-data and meta-data from the Virtual Machine. Here 10.1.1.1
 is the Virtual Router IP:
diff --git a/doc/rtd/topics/datasources/configdrive.rst b/doc/rtd/topics/datasources/configdrive.rst
index f1a488a..f4c5a34 100644
--- a/doc/rtd/topics/datasources/configdrive.rst
+++ b/doc/rtd/topics/datasources/configdrive.rst
@@ -64,7 +64,7 @@ The following criteria are required to as a config drive:
 ::
 
   openstack/
-    - 2012-08-10/ or latest/ 
+    - 2012-08-10/ or latest/
       - meta_data.json
       - user_data (not mandatory)
     - content/
@@ -83,7 +83,7 @@ only) file in the following ways.
 
 ::
 
-   dsmode:  
+   dsmode:
      values: local, net, pass
      default: pass
 
@@ -97,10 +97,10 @@ The difference between 'local' and 'net' is that local will not require
 networking to be up before user-data actions (or boothooks) are run.
 
 ::
-    
+
    instance-id:
      default: iid-dsconfigdrive
-     
+
 This is utilized as the metadata's instance-id.  It should generally
 be unique, as it is what is used to determine "is this a new instance".
 
@@ -108,18 +108,18 @@ be unique, as it is what is used to determine "is this a new instance".
 
    public-keys:
      default: None
-  
+
 If present, these keys will be used as the public keys for the
 instance.  This value overrides the content in authorized_keys.
 
 Note: it is likely preferable to provide keys via user-data
 
 ::
-    
+
    user-data:
      default: None
-     
-This provides cloud-init user-data. See :ref:`examples <yaml_examples>` for 
+
+This provides cloud-init user-data. See :ref:`examples <yaml_examples>` for
 what all can be present here.
 
 .. _OpenStack: http://www.openstack.org/
diff --git a/doc/rtd/topics/datasources/digitalocean.rst b/doc/rtd/topics/datasources/digitalocean.rst
index 938ede8..88f1e5f 100644
--- a/doc/rtd/topics/datasources/digitalocean.rst
+++ b/doc/rtd/topics/datasources/digitalocean.rst
@@ -20,8 +20,10 @@ DigitalOcean's datasource can be configured as follows:
       retries: 3
       timeout: 2
 
-- *retries*: Determines the number of times to attempt to connect to the metadata service
-- *timeout*: Determines the timeout in seconds to wait for a response from the metadata service
+- *retries*: Determines the number of times to attempt to connect to the
+  metadata service
+- *timeout*: Determines the timeout in seconds to wait for a response from the
+  metadata service
 
 .. _DigitalOcean: http://digitalocean.com/
 .. _metadata service: https://developers.digitalocean.com/metadata/
diff --git a/doc/rtd/topics/datasources/ec2.rst b/doc/rtd/topics/datasources/ec2.rst
index 76beca9..a90f377 100644
--- a/doc/rtd/topics/datasources/ec2.rst
+++ b/doc/rtd/topics/datasources/ec2.rst
@@ -13,7 +13,7 @@ instance metadata.
 Metadata is accessible via the following URL:
 
 ::
-    
+
     GET http://169.254.169.254/2009-04-04/meta-data/
     ami-id
     ami-launch-index
@@ -34,19 +34,20 @@ Metadata is accessible via the following URL:
 Userdata is accessible via the following URL:
 
 ::
-    
+
     GET http://169.254.169.254/2009-04-04/user-data
     1234,fred,reboot,true | 4512,jimbo, | 173,,,
 
 Note that there are multiple versions of this data provided, cloud-init
 by default uses **2009-04-04** but newer versions can be supported with
 relative ease (newer versions have more data exposed, while maintaining
-backward compatibility with the previous versions). 
+backward compatibility with the previous versions).
 
-To see which versions are supported from your cloud provider use the following URL:
+To see which versions are supported from your cloud provider use the following
+URL:
 
 ::
-    
+
     GET http://169.254.169.254/
     1.0
     2007-01-19
diff --git a/doc/rtd/topics/datasources/exoscale.rst b/doc/rtd/topics/datasources/exoscale.rst
index 27aec9c..9074edc 100644
--- a/doc/rtd/topics/datasources/exoscale.rst
+++ b/doc/rtd/topics/datasources/exoscale.rst
@@ -26,8 +26,8 @@ In the password server case, the following rules apply in order to enable the
 "restore instance password" functionality:
 
  * If a password is returned by the password server, it is then marked "saved"
-   by the cloud-init datasource. Subsequent boots will skip setting the password
-   (the password server will return "saved_password").
+   by the cloud-init datasource. Subsequent boots will skip setting the
+   password (the password server will return "saved_password").
  * When the instance password is reset (via the Exoscale UI), the password
    server will return the non-empty password at next boot, therefore causing
    cloud-init to reset the instance's password.
@@ -38,15 +38,15 @@ Configuration
 Users of this datasource are discouraged from changing the default settings
 unless instructed to by Exoscale support.
 
-The following settings are available and can be set for the datasource in system
-configuration (in `/etc/cloud/cloud.cfg.d/`).
+The following settings are available and can be set for the datasource in
+system configuration (in `/etc/cloud/cloud.cfg.d/`).
 
 The settings available are:
 
  * **metadata_url**: The URL for the metadata service (defaults to
    ``http://169.254.169.254``)
- * **api_version**: The API version path on which to query the instance metadata
-   (defaults to ``1.0``)
+ * **api_version**: The API version path on which to query the instance
+   metadata (defaults to ``1.0``)
  * **password_server_port**: The port (on the metadata server) on which the
    password server listens (defaults to ``8080``).
  * **timeout**: the timeout value provided to urlopen for each individual http
diff --git a/doc/rtd/topics/datasources/nocloud.rst b/doc/rtd/topics/datasources/nocloud.rst
index 1c5cf96..bc96f7f 100644
--- a/doc/rtd/topics/datasources/nocloud.rst
+++ b/doc/rtd/topics/datasources/nocloud.rst
@@ -57,24 +57,24 @@ Given a disk ubuntu 12.04 cloud image in 'disk.img', you can create a
 sufficient disk by following the example below.
 
 ::
-    
+
     ## create user-data and meta-data files that will be used
     ## to modify image on first boot
     $ { echo instance-id: iid-local01; echo local-hostname: cloudimg; } > meta-data
-    
+
     $ printf "#cloud-config\npassword: passw0rd\nchpasswd: { expire: False }\nssh_pwauth: True\n" > user-data
-    
+
     ## create a disk to attach with some user-data and meta-data
     $ genisoimage  -output seed.iso -volid cidata -joliet -rock user-data meta-data
-    
+
     ## alternatively, create a vfat filesystem with same files
     ## $ truncate --size 2M seed.img
     ## $ mkfs.vfat -n cidata seed.img
     ## $ mcopy -oi seed.img user-data meta-data ::
-    
+
     ## create a new qcow image to boot, backed by your original image
     $ qemu-img create -f qcow2 -b disk.img boot-disk.img
-    
+
     ## boot the image and login as 'ubuntu' with password 'passw0rd'
     ## note, passw0rd was set as password through the user-data above,
     ## there is no password set on these images.
@@ -88,12 +88,12 @@ to determine if this is "first boot".  So if you are making updates to
 user-data you will also have to change that, or start the disk fresh.
 
 Also, you can inject an ``/etc/network/interfaces`` file by providing the
-content for that file in the ``network-interfaces`` field of metadata.  
+content for that file in the ``network-interfaces`` field of metadata.
 
 Example metadata:
 
 ::
-    
+
     instance-id: iid-abcdefg
     network-interfaces: |
       iface eth0 inet static
diff --git a/doc/rtd/topics/datasources/opennebula.rst b/doc/rtd/topics/datasources/opennebula.rst
index 7c0367c..8e7c255 100644
--- a/doc/rtd/topics/datasources/opennebula.rst
+++ b/doc/rtd/topics/datasources/opennebula.rst
@@ -21,7 +21,7 @@ Datasource configuration
 Datasource accepts following configuration options.
 
 ::
-    
+
     dsmode:
       values: local, net, disabled
       default: net
@@ -30,7 +30,7 @@ Tells if this datasource will be processed in 'local' (pre-networking) or
 'net' (post-networking) stage or even completely 'disabled'.
 
 ::
-    
+
     parseuser:
       default: nobody
 
@@ -46,7 +46,7 @@ The following criteria are required:
    or have a *filesystem* label of **CONTEXT** or **CDROM**
 2. Must contain file *context.sh* with contextualization variables.
    File is generated by OpenNebula, it has a KEY='VALUE' format and
-   can be easily read by bash 
+   can be easily read by bash
 
 Contextualization variables
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -57,7 +57,7 @@ the OpenNebula documentation. Where multiple similar variables are
 specified, only first found is taken.
 
 ::
-    
+
     DSMODE
 
 Datasource mode configuration override. Values: local, net, disabled.
@@ -75,30 +75,30 @@ Datasource mode configuration override. Values: local, net, disabled.
 Static `network configuration`_.
 
 ::
-    
+
     HOSTNAME
 
 Instance hostname.
 
 ::
-    
+
     PUBLIC_IP
     IP_PUBLIC
     ETH0_IP
 
 If no hostname has been specified, cloud-init will try to create hostname
-from instance's IP address in 'local' dsmode. In 'net' dsmode, cloud-init 
+from instance's IP address in 'local' dsmode. In 'net' dsmode, cloud-init
 tries to resolve one of its IP addresses to get hostname.
 
 ::
-    
+
     SSH_KEY
     SSH_PUBLIC_KEY
 
 One or multiple SSH keys (separated by newlines) can be specified.
 
 ::
-    
+
     USER_DATA
     USERDATA
 
@@ -111,7 +111,7 @@ This example cloud-init configuration (*cloud.cfg*) enables
 OpenNebula datasource only in 'net' mode.
 
 ::
-    
+
     disable_ec2_metadata: True
     datasource_list: ['OpenNebula']
     datasource:
@@ -123,17 +123,17 @@ Example VM's context section
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 ::
-    
+
     CONTEXT=[
       PUBLIC_IP="$NIC[IP]",
-      SSH_KEY="$USER[SSH_KEY] 
-    $USER[SSH_KEY1] 
+      SSH_KEY="$USER[SSH_KEY]
+    $USER[SSH_KEY1]
     $USER[SSH_KEY2] ",
       USER_DATA="#cloud-config
     # see https://help.ubuntu.com/community/CloudInit
-    
+
     packages: []
-    
+
     mounts:
     - [vdc,none,swap,sw,0,0]
     runcmd:
diff --git a/doc/rtd/topics/datasources/openstack.rst b/doc/rtd/topics/datasources/openstack.rst
index 421da08..8ce2a53 100644
--- a/doc/rtd/topics/datasources/openstack.rst
+++ b/doc/rtd/topics/datasources/openstack.rst
@@ -78,6 +78,7 @@ upgrade packages and install ``htop`` on all instances:
   {"cloud-init": "#cloud-config\npackage_upgrade: True\npackages:\n - htop"}
 
 For more general information about how cloud-init handles vendor data,
-including how it can be disabled by users on instances, see :doc:`/topics/vendordata`.
+including how it can be disabled by users on instances, see
+:doc:`/topics/vendordata`.
 
 .. vi: textwidth=78
diff --git a/doc/rtd/topics/datasources/smartos.rst b/doc/rtd/topics/datasources/smartos.rst
index cb9a128..be11dfb 100644
--- a/doc/rtd/topics/datasources/smartos.rst
+++ b/doc/rtd/topics/datasources/smartos.rst
@@ -15,7 +15,8 @@ second serial console. On Linux, this is /dev/ttyS1. The data is a provided
 via a simple protocol: something queries for the data, the console responds
 responds with the status and if "SUCCESS" returns until a single ".\n".
 
-New versions of the SmartOS tooling will include support for base64 encoded data.
+New versions of the SmartOS tooling will include support for base64 encoded
+data.
 
 Meta-data channels
 ------------------
@@ -27,7 +28,7 @@ channels of SmartOS.
 
   - per the spec, user-data is for consumption by the end-user, not
     provisioning tools
-  - cloud-init entirely ignores this channel other than writting it to disk
+  - cloud-init entirely ignores this channel other than writing it to disk
   - removal of the meta-data key means that /var/db/user-data gets removed
   - a backup of previous meta-data is maintained as
     /var/db/user-data.<timestamp>. <timestamp> is the epoch time when
@@ -42,8 +43,9 @@ channels of SmartOS.
     - <timestamp> is the epoch time when cloud-init ran.
   - when the 'user-script' meta-data key goes missing, the user-script is
     removed from the file system, although a backup is maintained.
-  - if the script is not shebanged (i.e. starts with #!<executable>), then
-    or is not an executable, cloud-init will add a shebang of "#!/bin/bash"
+  - if the script does not start with a shebang (i.e. starts with
+    #!<executable>), then or is not an executable, cloud-init will add a
+    shebang of "#!/bin/bash"
 
 * cloud-init:user-data is treated like on other Clouds.
 
@@ -133,7 +135,7 @@ or not to base64 decode something:
   * base64_all: Except for excluded keys, attempt to base64 decode
     the values. If the value fails to decode properly, it will be
     returned in its text
-  * base64_keys: A comma deliminated list of which keys are base64 encoded.
+  * base64_keys: A comma delimited list of which keys are base64 encoded.
   * b64-<key>:
     for any key, if there exists an entry in the metadata for 'b64-<key>'
     Then 'b64-<key>' is expected to be a plaintext boolean indicating whether
diff --git a/doc/rtd/topics/debugging.rst b/doc/rtd/topics/debugging.rst
index e13d915..afcf267 100644
--- a/doc/rtd/topics/debugging.rst
+++ b/doc/rtd/topics/debugging.rst
@@ -68,18 +68,18 @@ subcommands default to reading /var/log/cloud-init.log.
          00.00100s (modules-final/config-rightscale_userdata)
          ...
 
-* ``analyze boot`` Make subprocess calls to the kernel in order to get relevant 
+* ``analyze boot`` Make subprocess calls to the kernel in order to get relevant
   pre-cloud-init timestamps, such as the kernel start, kernel finish boot, and cloud-init start.
 
 .. code-block:: shell-session
 
-    $ cloud-init analyze boot 
+    $ cloud-init analyze boot
     -- Most Recent Boot Record --
-    	Kernel Started at: 2019-06-13 15:59:55.809385
-    	Kernel ended boot at: 2019-06-13 16:00:00.944740
-    	Kernel time to boot (seconds): 5.135355
-    	Cloud-init start: 2019-06-13 16:00:05.738396
-    	Time between Kernel boot and Cloud-init start (seconds): 4.793656
+        Kernel Started at: 2019-06-13 15:59:55.809385
+        Kernel ended boot at: 2019-06-13 16:00:00.944740
+        Kernel time to boot (seconds): 5.135355
+        Cloud-init start: 2019-06-13 16:00:05.738396
+        Time between Kernel boot and Cloud-init start (seconds): 4.793656
 
 
 Analyze quickstart - LXC
diff --git a/doc/rtd/topics/dir_layout.rst b/doc/rtd/topics/dir_layout.rst
index 7a6265e..ebd63ae 100644
--- a/doc/rtd/topics/dir_layout.rst
+++ b/doc/rtd/topics/dir_layout.rst
@@ -2,11 +2,12 @@
 Directory layout
 ****************
 
-Cloudinits's directory structure is somewhat different from a regular application::
+Cloud-init's directory structure is somewhat different from a regular
+application::
 
   /var/lib/cloud/
       - data/
-         - instance-id  
+         - instance-id
          - previous-instance-id
          - datasource
          - previous-datasource
@@ -35,38 +36,41 @@ Cloudinits's directory structure is somewhat different from a regular applicatio
 
   The main directory containing the cloud-init specific subdirectories.
   It is typically located at ``/var/lib`` but there are certain configuration
-  scenarios where this can be altered. 
+  scenarios where this can be altered.
 
   TBD, describe this overriding more.
 
 ``data/``
 
-  Contains information related to instance ids, datasources and hostnames of the previous
-  and current instance if they are different. These can be examined as needed to
-  determine any information related to a previous boot (if applicable).
+  Contains information related to instance ids, datasources and hostnames of
+  the previous and current instance if they are different. These can be
+  examined as needed to determine any information related to a previous boot
+  (if applicable).
 
 ``handlers/``
 
-  Custom ``part-handlers`` code is written out here. Files that end up here are written
-  out with in the scheme of ``part-handler-XYZ`` where ``XYZ`` is the handler number (the
-  first handler found starts at 0).
+  Custom ``part-handlers`` code is written out here. Files that end up here are
+  written out with in the scheme of ``part-handler-XYZ`` where ``XYZ`` is the
+  handler number (the first handler found starts at 0).
 
 
 ``instance``
 
-  A symlink to the current ``instances/`` subdirectory that points to the currently
-  active instance (which is active is dependent on the datasource loaded).
+  A symlink to the current ``instances/`` subdirectory that points to the
+  currently active instance (which is active is dependent on the datasource
+  loaded).
 
 ``instances/``
 
-  All instances that were created using this image end up with instance identifier
-  subdirectories (and corresponding data for each instance). The currently active
-  instance will be symlinked the ``instance`` symlink file defined previously.
+  All instances that were created using this image end up with instance
+  identifier subdirectories (and corresponding data for each instance). The
+  currently active instance will be symlinked the ``instance`` symlink file
+  defined previously.
 
 ``scripts/``
 
-  Scripts that are downloaded/created by the corresponding ``part-handler`` will end up
-  in one of these subdirectories.
+  Scripts that are downloaded/created by the corresponding ``part-handler``
+  will end up in one of these subdirectories.
 
 ``seed/``
 
@@ -77,6 +81,7 @@ Cloudinits's directory structure is somewhat different from a regular applicatio
   Cloud-init has a concept of a module semaphore, which basically consists
   of the module name and its frequency. These files are used to ensure a module
   is only ran `per-once`, `per-instance`, `per-always`. This folder contains
-  semaphore `files` which are only supposed to run `per-once` (not tied to the instance id).
+  semaphore `files` which are only supposed to run `per-once` (not tied to the
+  instance id).
 
 .. vi: textwidth=78
diff --git a/doc/rtd/topics/docs.rst b/doc/rtd/topics/docs.rst
new file mode 100644
index 0000000..1b15377
--- /dev/null
+++ b/doc/rtd/topics/docs.rst
@@ -0,0 +1,84 @@
+.. _docs:
+
+Docs
+****
+
+These docs are hosted on Read the Docs. The following will explain how to
+contribute to and build these docs locally.
+
+The documentation is primarily written in reStructuredText.
+
+
+Building
+========
+
+There is a makefile target to build the documentation for you:
+
+.. code-block:: shell-session
+
+    $ tox -e doc
+
+This will do two things:
+
+- Build the documentation using sphinx
+- Run doc8 against the documentation source code
+
+Once build the HTML files will be viewable in ``doc/rtd_html``. Use your
+web browser to open ``index.html`` to view and navigate the site.
+
+Style Guide
+===========
+
+Headings
+--------
+The headings used across the documentation use the following hierarchy:
+
+- ``*****``: used once atop of a new page
+- ``=====``: each sections on the page
+- ``-----``: subsections
+- ``^^^^^``: sub-subsections
+- ``"""""``: paragraphs
+
+The top level header ``######`` is reserved for the first page.
+
+If under and overline are used, their length must be identical. The length of
+the underline must be at least as long as the title itself
+
+Line Length
+-----------
+Please keep the line lengths to a maximum of **79** characters. This ensures
+that the pages and tables do not get too wide that side scrolling is required.
+
+Header
+------
+Adding a link at the top of the page allows for the page to be referenced by
+other pages. For example for the FAQ page this would be:
+
+.. code-block:: rst
+
+    .. _faq:
+
+Footer
+------
+The footer should include the textwidth
+
+.. code-block:: rst
+
+    .. vi: textwidth=79
+
+Vertical Whitespace
+-------------------
+One newline between each section helps ensure readability of the documentation
+source code.
+
+Common Words
+------------
+There are some common words that should follow specific usage:
+
+- ``cloud-init``: always lower case with a hyphen, unless starting a sentence
+  in which case only the 'C' is capitalized (e.g. ``Cloud-init``).
+- ``metadata``: one word
+- ``user data``: two words, not to be combined
+- ``vendor data``: like user data, it is two words
+
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/examples.rst b/doc/rtd/topics/examples.rst
index c30d226..62b8ee4 100644
--- a/doc/rtd/topics/examples.rst
+++ b/doc/rtd/topics/examples.rst
@@ -134,7 +134,7 @@ Configure instances ssh-keys
 .. literalinclude:: ../../examples/cloud-config-ssh-keys.txt
    :language: yaml
    :linenos:
-   
+
 Additional apt configuration
 ============================
 
diff --git a/doc/rtd/topics/faq.rst b/doc/rtd/topics/faq.rst
new file mode 100644
index 0000000..16e19c2
--- /dev/null
+++ b/doc/rtd/topics/faq.rst
@@ -0,0 +1,43 @@
+.. _faq:
+
+FAQ
+***
+
+Getting help
+============
+
+Having trouble? We would like to help!
+
+- Use the search bar at the upper left to search these docs
+- Ask a question in the ``#cloud-init`` IRC channel on Freenode
+- Join and ask questions on the `cloud-init mailing list <https://launchpad.net/~cloud-init>`_
+- Find a bug? `Report bugs on Launchpad <https://bugs.launchpad.net/cloud-init/+filebug>`_
+
+
+Media
+=====
+
+Below are some videos, blog posts, and white papers about cloud-init from a
+variety of sources.
+
+- `Cloud Instance Initialization with cloud-init (Whitepaper)`_
+- `cloud-init Summit 2018`_
+- `cloud-init - The cross-cloud Magic Sauce (PDF)`_
+- `cloud-init Summit 2017`_
+- `cloud-init - Building clouds one Linux box at a time (Video)`_
+- `cloud-init - Building clouds one Linux box at a time (PDF)`_
+- `Metadata and cloud-init`_
+- `The beauty of cloud-init`_
+- `Introduction to cloud-init`_
+
+.. _Cloud Instance Initialization with cloud-init (Whitepaper): https://ubuntu.com/blog/cloud-instance-initialisation-with-cloud-init
+.. _cloud-init Summit 2018: https://powersj.io/post/cloud-init-summit18/
+.. _cloud-init - The cross-cloud Magic Sauce (PDF): https://events.linuxfoundation.org/wp-content/uploads/2017/12/cloud-init-The-cross-cloud-Magic-Sauce-Scott-Moser-Chad-Smith-Canonical.pdf
+.. _cloud-init Summit 2017: https://powersj.io/post/cloud-init-summit17/
+.. _cloud-init - Building clouds one Linux box at a time (Video): https://www.youtube.com/watch?v=1joQfUZQcPg
+.. _cloud-init - Building clouds one Linux box at a time (PDF): https://annex.debconf.org/debconf-share/debconf17/slides/164-cloud-init_Building_clouds_one_Linux_box_at_a_time.pdf
+.. _Metadata and cloud-init: https://www.youtube.com/watch?v=RHVhIWifVqU
+.. _The beauty of cloud-init: http://brandon.fuller.name/archives/2011/05/02/06.40.57/
+.. _Introduction to cloud-init: http://www.youtube.com/watch?v=-zL3BdbKyGY
+
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/format.rst b/doc/rtd/topics/format.rst
index 74d1fee..7605040 100644
--- a/doc/rtd/topics/format.rst
+++ b/doc/rtd/topics/format.rst
@@ -4,22 +4,24 @@
 User-Data Formats
 *****************
 
-User data that will be acted upon by cloud-init must be in one of the following types.
+User data that will be acted upon by cloud-init must be in one of the following
+types.
 
 Gzip Compressed Content
 =======================
 
 Content found to be gzip compressed will be uncompressed.
-The uncompressed data will then be used as if it were not compressed. 
+The uncompressed data will then be used as if it were not compressed.
 This is typically useful because user-data is limited to ~16384 [#]_ bytes.
 
 Mime Multi Part Archive
 =======================
 
-This list of rules is applied to each part of this multi-part file. 
+This list of rules is applied to each part of this multi-part file.
 Using a mime-multi part file, the user can specify more than one type of data.
 
-For example, both a user data script and a cloud-config type could be specified.
+For example, both a user data script and a cloud-config type could be
+specified.
 
 Supported content-types:
 
@@ -66,7 +68,8 @@ User-Data Script
 
 Typically used by those who just want to execute a shell script.
 
-Begins with: ``#!`` or ``Content-Type: text/x-shellscript`` when using a MIME archive.
+Begins with: ``#!`` or ``Content-Type: text/x-shellscript`` when using a MIME
+archive.
 
 .. note::
    New in cloud-init v. 18.4: User-data scripts can also render cloud instance
@@ -83,25 +86,27 @@ Example
   #!/bin/sh
   echo "Hello World.  The time is now $(date -R)!" | tee /root/output.txt
 
-  $ euca-run-instances --key mykey --user-data-file myscript.sh ami-a07d95c9 
+  $ euca-run-instances --key mykey --user-data-file myscript.sh ami-a07d95c9
 
 Include File
 ============
 
 This content is a ``include`` file.
 
-The file contains a list of urls, one per line.
-Each of the URLs will be read, and their content will be passed through this same set of rules.
-Ie, the content read from the URL can be gzipped, mime-multi-part, or plain text.
-If an error occurs reading a file the remaining files will not be read.
+The file contains a list of urls, one per line. Each of the URLs will be read,
+and their content will be passed through this same set of rules. Ie, the
+content read from the URL can be gzipped, mime-multi-part, or plain text. If
+an error occurs reading a file the remaining files will not be read.
 
-Begins with: ``#include`` or ``Content-Type: text/x-include-url``  when using a MIME archive.
+Begins with: ``#include`` or ``Content-Type: text/x-include-url``  when using
+a MIME archive.
 
 Cloud Config Data
 =================
 
-Cloud-config is the simplest way to accomplish some things
-via user-data. Using cloud-config syntax, the user can specify certain things in a human friendly format. 
+Cloud-config is the simplest way to accomplish some things via user-data. Using
+cloud-config syntax, the user can specify certain things in a human friendly
+format.
 
 These things include:
 
@@ -114,9 +119,11 @@ These things include:
 .. note::
    This file must be valid yaml syntax.
 
-See the :ref:`yaml_examples` section for a commented set of examples of supported cloud config formats.
+See the :ref:`yaml_examples` section for a commented set of examples of
+supported cloud config formats.
 
-Begins with: ``#cloud-config`` or ``Content-Type: text/cloud-config`` when using a MIME archive.
+Begins with: ``#cloud-config`` or ``Content-Type: text/cloud-config`` when
+using a MIME archive.
 
 .. note::
    New in cloud-init v. 18.4: Cloud config dta can also render cloud instance
@@ -126,25 +133,41 @@ Begins with: ``#cloud-config`` or ``Content-Type: text/cloud-config`` when using
 Upstart Job
 ===========
 
-Content is placed into a file in ``/etc/init``, and will be consumed by upstart as any other upstart job.
+Content is placed into a file in ``/etc/init``, and will be consumed by upstart
+as any other upstart job.
 
-Begins with: ``#upstart-job`` or ``Content-Type: text/upstart-job`` when using a MIME archive.
+Begins with: ``#upstart-job`` or ``Content-Type: text/upstart-job`` when using
+a MIME archive.
 
 Cloud Boothook
 ==============
 
-This content is ``boothook`` data. It is stored in a file under ``/var/lib/cloud`` and then executed immediately.
-This is the earliest ``hook`` available. Note, that there is no mechanism provided for running only once. The boothook must take care of this itself.
-It is provided with the instance id in the environment variable ``INSTANCE_ID``. This could be made use of to provide a 'once-per-instance' type of functionality.
+This content is ``boothook`` data. It is stored in a file under
+``/var/lib/cloud`` and then executed immediately. This is the earliest ``hook``
+available. Note, that there is no mechanism provided for running only once. The
+boothook must take care of this itself.
 
-Begins with: ``#cloud-boothook`` or ``Content-Type: text/cloud-boothook`` when using a MIME archive.
+It is provided with the instance id in the environment variable
+``INSTANCE_ID``. This could be made use of to provide a 'once-per-instance'
+type of functionality.
+
+Begins with: ``#cloud-boothook`` or ``Content-Type: text/cloud-boothook`` when
+using a MIME archive.
 
 Part Handler
 ============
 
-This is a ``part-handler``: It contains custom code for either supporting new mime-types in multi-part user data, or overriding the existing handlers for supported mime-types.  It will be written to a file in ``/var/lib/cloud/data`` based on its filename (which is generated).
-This must be python code that contains a ``list_types`` function and a ``handle_part`` function. 
-Once the section is read the ``list_types`` method will be called. It must return a list of mime-types that this part-handler handles.  Because mime parts are processed in order, a ``part-handler`` part must precede any parts with mime-types it is expected to handle in the same user data.
+This is a ``part-handler``: It contains custom code for either supporting new
+mime-types in multi-part user data, or overriding the existing handlers for
+supported mime-types.  It will be written to a file in ``/var/lib/cloud/data``
+based on its filename (which is generated).
+
+This must be python code that contains a ``list_types`` function and a
+``handle_part`` function. Once the section is read the ``list_types`` method
+will be called. It must return a list of mime-types that this part-handler
+handles.  Because mime parts are processed in order, a ``part-handler`` part
+must precede any parts with mime-types it is expected to handle in the same
+user data.
 
 The ``handle_part`` function must be defined like:
 
@@ -156,11 +179,13 @@ The ``handle_part`` function must be defined like:
       # filename = the filename of the part (or a generated filename if none is present in mime data)
       # payload = the parts' content
 
-Cloud-init will then call the ``handle_part`` function once before it handles any parts, once per part received, and once after all parts have been handled.
-The ``'__begin__'`` and ``'__end__'`` sentinels allow the part handler to do initialization or teardown before or after
-receiving any parts.
+Cloud-init will then call the ``handle_part`` function once before it handles
+any parts, once per part received, and once after all parts have been handled.
+The ``'__begin__'`` and ``'__end__'`` sentinels allow the part handler to do
+initialization or teardown before or after receiving any parts.
 
-Begins with: ``#part-handler`` or ``Content-Type: text/part-handler`` when using a MIME archive.
+Begins with: ``#part-handler`` or ``Content-Type: text/part-handler`` when
+using a MIME archive.
 
 Example
 -------
diff --git a/doc/rtd/topics/merging.rst b/doc/rtd/topics/merging.rst
index 5f7ca18..2b5e5da 100644
--- a/doc/rtd/topics/merging.rst
+++ b/doc/rtd/topics/merging.rst
@@ -68,8 +68,10 @@ Cloud-init provides merging for the following built-in types:
 The ``Dict`` merger has the following options which control what is done with
 values contained within the config.
 
-- ``allow_delete``: Existing values not present in the new value can be deleted, defaults to False
-- ``no_replace``: Do not replace an existing value if one is already present, enabled by default.
+- ``allow_delete``: Existing values not present in the new value can be
+  deleted, defaults to False
+- ``no_replace``: Do not replace an existing value if one is already present,
+  enabled by default.
 - ``replace``: Overwrite existing values with new ones.
 
 The ``List`` merger has the following options which control what is done with
@@ -77,7 +79,8 @@ the values contained within the config.
 
 - ``append``:  Add new value to the end of the list, defaults to False.
 - ``prepend``:  Add new values to the start of the list, defaults to False.
-- ``no_replace``: Do not replace an existing value if one is already present, enabled by default.
+- ``no_replace``: Do not replace an existing value if one is already present,
+  enabled by default.
 - ``replace``: Overwrite existing values with new ones.
 
 The ``Str`` merger has the following options which control what is done with
@@ -88,10 +91,13 @@ the values contained within the config.
 Common options for all merge types which control how recursive merging is
 done on other types.
 
-- ``recurse_dict``: If True merge the new values of the dictionary, defaults to True.
-- ``recurse_list``: If True merge the new values of the list, defaults to False.
+- ``recurse_dict``: If True merge the new values of the dictionary, defaults to
+  True.
+- ``recurse_list``: If True merge the new values of the list, defaults to
+  False.
 - ``recurse_array``: Alias for ``recurse_list``.
-- ``recurse_str``: If True merge the new values of the string, defaults to False.
+- ``recurse_str``: If True merge the new values of the string, defaults to
+  False.
 
 
 Customizability
diff --git a/doc/rtd/topics/moreinfo.rst b/doc/rtd/topics/moreinfo.rst
deleted file mode 100644
index 9c3b7fb..0000000
--- a/doc/rtd/topics/moreinfo.rst
+++ /dev/null
@@ -1,13 +0,0 @@
-****************
-More information
-****************
-
-Useful external references
-==========================
-
-- `The beauty of cloudinit`_
-- `Introduction to cloud-init`_ (video)
-
-.. _Introduction to cloud-init: http://www.youtube.com/watch?v=-zL3BdbKyGY
-.. _The beauty of cloudinit: http://brandon.fuller.name/archives/2011/05/02/06.40.57/
-.. vi: textwidth=78
diff --git a/doc/rtd/topics/network-config-format-v2.rst b/doc/rtd/topics/network-config-format-v2.rst
index 50f5fa6..7f85755 100644
--- a/doc/rtd/topics/network-config-format-v2.rst
+++ b/doc/rtd/topics/network-config-format-v2.rst
@@ -54,11 +54,11 @@ Physical devices
 
 :   (Examples: ethernet, wifi) These can dynamically come and go between
     reboots and even during runtime (hotplugging). In the generic case, they
-    can be selected by ``match:`` rules on desired properties, such as name/name
-    pattern, MAC address, driver, or device paths. In general these will match
-    any number of devices (unless they refer to properties which are unique
-    such as the full path or MAC address), so without further knowledge about
-    the hardware these will always be considered as a group.
+    can be selected by ``match:`` rules on desired properties, such as
+    name/name pattern, MAC address, driver, or device paths. In general these
+    will match any number of devices (unless they refer to properties which are
+    unique such as the full path or MAC address), so without further knowledge
+    about the hardware these will always be considered as a group.
 
     It is valid to specify no match rules at all, in which case the ID field is
     simply the interface name to be matched. This is mostly useful if you want
@@ -228,8 +228,8 @@ Example: ::
 
 **parameters**: *<(mapping)>*
 
-Customization parameters for special bonding options.  Time values are specified
-in seconds unless otherwise specified.
+Customization parameters for special bonding options.  Time values are
+specified in seconds unless otherwise specified.
 
 **mode**: *<(scalar)>*
 
@@ -367,8 +367,8 @@ Example: ::
 
 **parameters**: <*(mapping)>*
 
-Customization parameters for special bridging options.  Time values are specified
-in seconds unless otherwise specified.
+Customization parameters for special bridging options.  Time values are
+specified in seconds unless otherwise specified.
 
 **ageing-time**: <*(scalar)>*
 
diff --git a/tests/unittests/test_datasource/test_ovf.py b/tests/unittests/test_datasource/test_ovf.py
index 349d54c..a615470 100644
--- a/tests/unittests/test_datasource/test_ovf.py
+++ b/tests/unittests/test_datasource/test_ovf.py
@@ -169,19 +169,56 @@ class TestDatasourceOVF(CiTestCase):
             MARKER-ID = 12345345
             """)
         util.write_file(conf_file, conf_content)
-        with self.assertRaises(CustomScriptNotFound) as context:
-            wrap_and_call(
-                'cloudinit.sources.DataSourceOVF',
-                {'util.read_dmi_data': 'vmware',
-                 'util.del_dir': True,
-                 'search_file': self.tdir,
-                 'wait_for_imc_cfg_file': conf_file,
-                 'get_nics_to_enable': ''},
-                ds.get_data)
+        with mock.patch(MPATH + 'get_tools_config', return_value='true'):
+            with self.assertRaises(CustomScriptNotFound) as context:
+                wrap_and_call(
+                    'cloudinit.sources.DataSourceOVF',
+                    {'util.read_dmi_data': 'vmware',
+                     'util.del_dir': True,
+                     'search_file': self.tdir,
+                     'wait_for_imc_cfg_file': conf_file,
+                     'get_nics_to_enable': ''},
+                    ds.get_data)
         customscript = self.tmp_path('test-script', self.tdir)
         self.assertIn('Script %s not found!!' % customscript,
                       str(context.exception))
 
+    def test_get_data_cust_script_disabled(self):
+        """If custom script is disabled by VMware tools configuration,
+        raise a RuntimeError.
+        """
+        paths = Paths({'cloud_dir': self.tdir})
+        ds = self.datasource(
+            sys_cfg={'disable_vmware_customization': False}, distro={},
+            paths=paths)
+        # Prepare the conf file
+        conf_file = self.tmp_path('test-cust', self.tdir)
+        conf_content = dedent("""\
+            [CUSTOM-SCRIPT]
+            SCRIPT-NAME = test-script
+            [MISC]
+            MARKER-ID = 12345346
+            """)
+        util.write_file(conf_file, conf_content)
+        # Prepare the custom sript
+        customscript = self.tmp_path('test-script', self.tdir)
+        util.write_file(customscript, "This is the post cust script")
+
+        with mock.patch(MPATH + 'get_tools_config', return_value='false'):
+            with mock.patch(MPATH + 'set_customization_status',
+                            return_value=('msg', b'')):
+                with self.assertRaises(RuntimeError) as context:
+                    wrap_and_call(
+                        'cloudinit.sources.DataSourceOVF',
+                        {'util.read_dmi_data': 'vmware',
+                         'util.del_dir': True,
+                         'search_file': self.tdir,
+                         'wait_for_imc_cfg_file': conf_file,
+                         'get_nics_to_enable': ''},
+                        ds.get_data)
+        self.assertIn('Custom script is disabled by VM Administrator',
+                      str(context.exception))
+
     def test_get_data_non_vmware_seed_platform_info(self):
         """Platform info properly reports when on non-vmware platforms."""
         paths = Paths({'cloud_dir': self.tdir, 'run_dir': self.tdir})
diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py
index 587e699..de87be2 100644
--- a/tests/unittests/test_ds_identify.py
+++ b/tests/unittests/test_ds_identify.py
@@ -195,6 +195,10 @@ class DsIdentifyBase(CiTestCase):
         return self._check_via_dict(
             data, RC_FOUND, dslist=[data.get('ds'), DS_NONE])
 
+    def _test_ds_not_found(self, name):
+        data = copy.deepcopy(VALID_CFG[name])
+        return self._check_via_dict(data, RC_NOT_FOUND)
+
     def _check_via_dict(self, data, rc, dslist=None, **kwargs):
         ret = self._call_via_dict(data, **kwargs)
         good = False
@@ -244,9 +248,13 @@ class TestDsIdentify(DsIdentifyBase):
         self._test_ds_found('Ec2-xen')
 
     def test_brightbox_is_ec2(self):
-        """EC2: product_serial ends with 'brightbox.com'"""
+        """EC2: product_serial ends with '.brightbox.com'"""
         self._test_ds_found('Ec2-brightbox')
 
+    def test_bobrightbox_is_not_brightbox(self):
+        """EC2: bobrightbox.com in product_serial is not brightbox'"""
+        self._test_ds_not_found('Ec2-brightbox-negative')
+
     def test_gce_by_product_name(self):
         """GCE identifies itself with product_name."""
         self._test_ds_found('GCE')
@@ -724,7 +732,11 @@ VALID_CFG = {
     },
     'Ec2-brightbox': {
         'ds': 'Ec2',
-        'files': {P_PRODUCT_SERIAL: 'facc6e2f.brightbox.com\n'},
+        'files': {P_PRODUCT_SERIAL: 'srv-otuxg.gb1.brightbox.com\n'},
+    },
+    'Ec2-brightbox-negative': {
+        'ds': 'Ec2',
+        'files': {P_PRODUCT_SERIAL: 'tricky-host.bobrightbox.com\n'},
     },
     'GCE': {
         'ds': 'GCE',
diff --git a/tests/unittests/test_vmware/test_guestcust_util.py b/tests/unittests/test_vmware/test_guestcust_util.py
new file mode 100644
index 0000000..b8fa994
--- /dev/null
+++ b/tests/unittests/test_vmware/test_guestcust_util.py
@@ -0,0 +1,65 @@
+# Copyright (C) 2019 Canonical Ltd.
+# Copyright (C) 2019 VMware INC.
+#
+# Author: Xiaofeng Wang <xiaofengw@xxxxxxxxxx>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit import util
+from cloudinit.sources.helpers.vmware.imc.guestcust_util import (
+    get_tools_config,
+)
+from cloudinit.tests.helpers import CiTestCase, mock
+
+
+class TestGuestCustUtil(CiTestCase):
+    def test_get_tools_config_not_installed(self):
+        """
+        This test is designed to verify the behavior if vmware-toolbox-cmd
+        is not installed.
+        """
+        with mock.patch.object(util, 'which', return_value=None):
+            self.assertEqual(
+                get_tools_config('section', 'key', 'defaultVal'), 'defaultVal')
+
+    def test_get_tools_config_internal_exception(self):
+        """
+        This test is designed to verify the behavior if internal exception
+        is raised.
+        """
+        with mock.patch.object(util, 'which', return_value='/dummy/path'):
+            with mock.patch.object(util, 'subp',
+                                   return_value=('key=value', b''),
+                                   side_effect=util.ProcessExecutionError(
+                                       "subp failed", exit_code=99)):
+                # verify return value is 'defaultVal', not 'value'.
+                self.assertEqual(
+                    get_tools_config('section', 'key', 'defaultVal'),
+                    'defaultVal')
+
+    def test_get_tools_config_normal(self):
+        """
+        This test is designed to verify the value could be parsed from
+        key = value of the given [section]
+        """
+        with mock.patch.object(util, 'which', return_value='/dummy/path'):
+            # value is not blank
+            with mock.patch.object(util, 'subp',
+                                   return_value=('key =   value  ', b'')):
+                self.assertEqual(
+                    get_tools_config('section', 'key', 'defaultVal'),
+                    'value')
+            # value is blank
+            with mock.patch.object(util, 'subp',
+                                   return_value=('key = ', b'')):
+                self.assertEqual(
+                    get_tools_config('section', 'key', 'defaultVal'),
+                    '')
+            # value contains =
+            with mock.patch.object(util, 'subp',
+                                   return_value=('key=Bar=Wark', b'')):
+                self.assertEqual(
+                    get_tools_config('section', 'key', 'defaultVal'),
+                    'Bar=Wark')
+
+# vi: ts=4 expandtab
diff --git a/tools/ds-identify b/tools/ds-identify
index e0d4865..2447d14 100755
--- a/tools/ds-identify
+++ b/tools/ds-identify
@@ -891,9 +891,8 @@ ec2_identify_platform() {
     local default="$1"
     local serial="${DI_DMI_PRODUCT_SERIAL}"
 
-    # brightbox https://bugs.launchpad.net/cloud-init/+bug/1661693
     case "$serial" in
-        *brightbox.com) _RET="Brightbox"; return 0;;
+        *.brightbox.com) _RET="Brightbox"; return 0;;
     esac
 
     # AWS http://docs.aws.amazon.com/AWSEC2/
diff --git a/tox.ini b/tox.ini
index 1f01eb7..f5baf32 100644
--- a/tox.ini
+++ b/tox.ini
@@ -53,8 +53,13 @@ exclude = .venv,.tox,dist,doc,*egg,.git,build,tools
 
 [testenv:doc]
 basepython = python3
-deps = sphinx
-commands = {envpython} -m sphinx {posargs:doc/rtd doc/rtd_html}
+deps =
+    doc8
+    sphinx
+    sphinx_rtd_theme
+commands =
+    {envpython} -m sphinx {posargs:doc/rtd doc/rtd_html}
+    doc8 doc/rtd
 
 [testenv:xenial]
 commands =