← Back to team overview

cloud-init-dev team mailing list archive

[Merge] ~chad.smith/cloud-init:ubuntu/zesty into cloud-init:ubuntu/zesty

 

Chad Smith has proposed merging ~chad.smith/cloud-init:ubuntu/zesty into cloud-init:ubuntu/zesty.

Requested reviews:
  cloud-init commiters (cloud-init-dev)
Related bugs:
  Bug #1726572 in cloud-init: "ntp config handling inconsistent for SLES and openSUSE"
  https://bugs.launchpad.net/cloud-init/+bug/1726572
  Bug #1731022 in cloud-init: "host template expansion does not work on SUSE distros"
  https://bugs.launchpad.net/cloud-init/+bug/1731022
  Bug #1732917 in cloud-init: "17.1 update breaks EC2 nodes"
  https://bugs.launchpad.net/cloud-init/+bug/1732917
  Bug #1732964 in cloud-init: "dhclient process started by cloud-init-local.service still running afterwards"
  https://bugs.launchpad.net/cloud-init/+bug/1732964

For more details, see:
https://code.launchpad.net/~chad.smith/cloud-init/+git/cloud-init/+merge/334071

Upstream master snapshot for SRU into zesty.
-- 
Your team cloud-init commiters is requested to review the proposed merge of ~chad.smith/cloud-init:ubuntu/zesty into cloud-init:ubuntu/zesty.
diff --git a/cloudinit/cloud.py b/cloudinit/cloud.py
index d8a9fc8..ba61678 100644
--- a/cloudinit/cloud.py
+++ b/cloudinit/cloud.py
@@ -56,8 +56,8 @@ class Cloud(object):
     def get_template_filename(self, name):
         fn = self.paths.template_tpl % (name)
         if not os.path.isfile(fn):
-            LOG.warning("No template found at %s for template named %s",
-                        fn, name)
+            LOG.warning("No template found in %s for template named %s",
+                        os.path.dirname(fn), name)
             return None
         return fn
 
diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py
index d43d060..f50bcb3 100644
--- a/cloudinit/config/cc_ntp.py
+++ b/cloudinit/config/cc_ntp.py
@@ -23,7 +23,7 @@ frequency = PER_INSTANCE
 NTP_CONF = '/etc/ntp.conf'
 TIMESYNCD_CONF = '/etc/systemd/timesyncd.conf.d/cloud-init.conf'
 NR_POOL_SERVERS = 4
-distros = ['centos', 'debian', 'fedora', 'opensuse', 'ubuntu']
+distros = ['centos', 'debian', 'fedora', 'opensuse', 'sles', 'ubuntu']
 
 
 # The schema definition for each cloud-config module is a strict contract for
@@ -174,8 +174,13 @@ def rename_ntp_conf(config=None):
 
 def generate_server_names(distro):
     names = []
+    pool_distro = distro
+    # For legal reasons x.pool.sles.ntp.org does not exist,
+    # use the opensuse pool
+    if distro == 'sles':
+        pool_distro = 'opensuse'
     for x in range(0, NR_POOL_SERVERS):
-        name = "%d.%s.pool.ntp.org" % (x, distro)
+        name = "%d.%s.pool.ntp.org" % (x, pool_distro)
         names.append(name)
     return names
 
diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py
index 7f36cf8..a9d21e7 100644
--- a/cloudinit/config/cc_rh_subscription.py
+++ b/cloudinit/config/cc_rh_subscription.py
@@ -38,14 +38,16 @@ Subscription`` example config.
         server-hostname: <hostname>
 """
 
+from cloudinit import log as logging
 from cloudinit import util
 
+LOG = logging.getLogger(__name__)
+
 distros = ['fedora', 'rhel']
 
 
 def handle(name, cfg, _cloud, log, _args):
-    sm = SubscriptionManager(cfg)
-    sm.log = log
+    sm = SubscriptionManager(cfg, log=log)
     if not sm.is_configured():
         log.debug("%s: module not configured.", name)
         return None
@@ -86,10 +88,9 @@ def handle(name, cfg, _cloud, log, _args):
                 if not return_stat:
                     raise SubscriptionError("Unable to attach pools {0}"
                                             .format(sm.pools))
-            if (sm.enable_repo is not None) or (sm.disable_repo is not None):
-                return_stat = sm.update_repos(sm.enable_repo, sm.disable_repo)
-                if not return_stat:
-                    raise SubscriptionError("Unable to add or remove repos")
+            return_stat = sm.update_repos()
+            if not return_stat:
+                raise SubscriptionError("Unable to add or remove repos")
             sm.log_success("rh_subscription plugin completed successfully")
         except SubscriptionError as e:
             sm.log_warn(str(e))
@@ -108,7 +109,10 @@ class SubscriptionManager(object):
                      'rhsm-baseurl', 'server-hostname',
                      'auto-attach', 'service-level']
 
-    def __init__(self, cfg):
+    def __init__(self, cfg, log=None):
+        if log is None:
+            log = LOG
+        self.log = log
         self.cfg = cfg
         self.rhel_cfg = self.cfg.get('rh_subscription', {})
         self.rhsm_baseurl = self.rhel_cfg.get('rhsm-baseurl')
@@ -130,7 +134,7 @@ class SubscriptionManager(object):
 
     def log_warn(self, msg):
         '''Simple wrapper for logging warning messages. Useful for unittests'''
-        self.log.warn(msg)
+        self.log.warning(msg)
 
     def _verify_keys(self):
         '''
@@ -245,7 +249,7 @@ class SubscriptionManager(object):
             return False
 
         reg_id = return_out.split("ID: ")[1].rstrip()
-        self.log.debug("Registered successfully with ID {0}".format(reg_id))
+        self.log.debug("Registered successfully with ID %s", reg_id)
         return True
 
     def _set_service_level(self):
@@ -347,7 +351,7 @@ class SubscriptionManager(object):
             try:
                 self._sub_man_cli(cmd)
                 self.log.debug("Attached the following pools to your "
-                               "system: %s" % (", ".join(pool_list))
+                               "system: %s", (", ".join(pool_list))
                                .replace('--pool=', ''))
                 return True
             except util.ProcessExecutionError as e:
@@ -355,18 +359,24 @@ class SubscriptionManager(object):
                               "due to {1}".format(pool, e))
                 return False
 
-    def update_repos(self, erepos, drepos):
+    def update_repos(self):
         '''
         Takes a list of yum repo ids that need to be disabled or enabled; then
         it verifies if they are already enabled or disabled and finally
         executes the action to disable or enable
         '''
 
-        if (erepos is not None) and (not isinstance(erepos, list)):
+        erepos = self.enable_repo
+        drepos = self.disable_repo
+        if erepos is None:
+            erepos = []
+        if drepos is None:
+            drepos = []
+        if not isinstance(erepos, list):
             self.log_warn("Repo IDs must in the format of a list.")
             return False
 
-        if (drepos is not None) and (not isinstance(drepos, list)):
+        if not isinstance(drepos, list):
             self.log_warn("Repo IDs must in the format of a list.")
             return False
 
@@ -399,14 +409,14 @@ class SubscriptionManager(object):
             for fail in enable_list_fail:
                 # Check if the repo exists or not
                 if fail in active_repos:
-                    self.log.debug("Repo {0} is already enabled".format(fail))
+                    self.log.debug("Repo %s is already enabled", fail)
                 else:
                     self.log_warn("Repo {0} does not appear to "
                                   "exist".format(fail))
         if len(disable_list_fail) > 0:
             for fail in disable_list_fail:
-                self.log.debug("Repo {0} not disabled "
-                               "because it is not enabled".format(fail))
+                self.log.debug("Repo %s not disabled "
+                               "because it is not enabled", fail)
 
         cmd = ['repos']
         if len(disable_list) > 0:
@@ -422,10 +432,10 @@ class SubscriptionManager(object):
             return False
 
         if len(enable_list) > 0:
-            self.log.debug("Enabled the following repos: %s" %
+            self.log.debug("Enabled the following repos: %s",
                            (", ".join(enable_list)).replace('--enable=', ''))
         if len(disable_list) > 0:
-            self.log.debug("Disabled the following repos: %s" %
+            self.log.debug("Disabled the following repos: %s",
                            (", ".join(disable_list)).replace('--disable=', ''))
         return True
 
diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py
index b394784..c96eede 100644
--- a/cloudinit/config/cc_update_etc_hosts.py
+++ b/cloudinit/config/cc_update_etc_hosts.py
@@ -23,8 +23,8 @@ using the template located in ``/etc/cloud/templates/hosts.tmpl``. In the
 
 If ``manage_etc_hosts`` is set to ``localhost``, then cloud-init will not
 rewrite ``/etc/hosts`` entirely, but rather will ensure that a entry for the
-fqdn with ip ``127.0.1.1`` is present in ``/etc/hosts`` (i.e.
-``ping <hostname>`` will ping ``127.0.1.1``).
+fqdn with a distribution dependent ip is present in ``/etc/hosts`` (i.e.
+``ping <hostname>`` will ping ``127.0.0.1`` or ``127.0.1.1`` or other ip).
 
 .. note::
     if ``manage_etc_hosts`` is set ``true`` or ``template``, the contents
diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py
index 0cba703..d8624d8 100644
--- a/cloudinit/net/dhcp.py
+++ b/cloudinit/net/dhcp.py
@@ -8,6 +8,7 @@ import configobj
 import logging
 import os
 import re
+import signal
 
 from cloudinit.net import find_fallback_nic, get_devicelist
 from cloudinit import temp_utils
@@ -41,8 +42,7 @@ def maybe_perform_dhcp_discovery(nic=None):
     if nic is None:
         nic = find_fallback_nic()
         if nic is None:
-            LOG.debug(
-                'Skip dhcp_discovery: Unable to find fallback nic.')
+            LOG.debug('Skip dhcp_discovery: Unable to find fallback nic.')
             return {}
     elif nic not in get_devicelist():
         LOG.debug(
@@ -119,7 +119,13 @@ def dhcp_discovery(dhclient_cmd_path, interface, cleandir):
     cmd = [sandbox_dhclient_cmd, '-1', '-v', '-lf', lease_file,
            '-pf', pid_file, interface, '-sf', '/bin/true']
     util.subp(cmd, capture=True)
-    return parse_dhcp_lease_file(lease_file)
+    pid = None
+    try:
+        pid = int(util.load_file(pid_file).strip())
+        return parse_dhcp_lease_file(lease_file)
+    finally:
+        if pid:
+            os.kill(pid, signal.SIGKILL)
 
 
 def networkd_parse_lease(content):
diff --git a/cloudinit/net/tests/test_dhcp.py b/cloudinit/net/tests/test_dhcp.py
index 1c1f504..3d8e15c 100644
--- a/cloudinit/net/tests/test_dhcp.py
+++ b/cloudinit/net/tests/test_dhcp.py
@@ -2,6 +2,7 @@
 
 import mock
 import os
+import signal
 from textwrap import dedent
 
 from cloudinit.net.dhcp import (
@@ -114,8 +115,9 @@ class TestDHCPDiscoveryClean(CiTestCase):
         self.assertEqual('eth9', call[0][1])
         self.assertIn('/var/tmp/cloud-init/cloud-init-dhcp-', call[0][2])
 
+    @mock.patch('cloudinit.net.dhcp.os.kill')
     @mock.patch('cloudinit.net.dhcp.util.subp')
-    def test_dhcp_discovery_run_in_sandbox(self, m_subp):
+    def test_dhcp_discovery_run_in_sandbox(self, m_subp, m_kill):
         """dhcp_discovery brings up the interface and runs dhclient.
 
         It also returns the parsed dhcp.leases file generated in the sandbox.
@@ -134,6 +136,10 @@ class TestDHCPDiscoveryClean(CiTestCase):
         """)
         lease_file = os.path.join(tmpdir, 'dhcp.leases')
         write_file(lease_file, lease_content)
+        pid_file = os.path.join(tmpdir, 'dhclient.pid')
+        my_pid = 1
+        write_file(pid_file, "%d\n" % my_pid)
+
         self.assertItemsEqual(
             [{'interface': 'eth9', 'fixed-address': '192.168.2.74',
               'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}],
@@ -149,6 +155,7 @@ class TestDHCPDiscoveryClean(CiTestCase):
                 [os.path.join(tmpdir, 'dhclient'), '-1', '-v', '-lf',
                  lease_file, '-pf', os.path.join(tmpdir, 'dhclient.pid'),
                  'eth9', '-sf', '/bin/true'], capture=True)])
+        m_kill.assert_has_calls([mock.call(my_pid, signal.SIGKILL)])
 
 
 class TestSystemdParseLeases(CiTestCase):
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 80c2bd1..8c3492d 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -465,10 +465,8 @@ class DataSourceAzure(sources.DataSource):
 
            1. Probe the drivers of the net-devices present and inject them in
               the network configuration under params: driver: <driver> value
-           2. If the driver value is 'mlx4_core', the control mode should be
-              set to manual.  The device will be later used to build a bond,
-              for now we want to ensure the device gets named but does not
-              break any network configuration
+           2. Generate a fallback network config that does not include any of
+              the blacklisted devices.
         """
         blacklist = ['mlx4_core']
         if not self._network_config:
@@ -477,25 +475,6 @@ class DataSourceAzure(sources.DataSource):
             netconfig = net.generate_fallback_config(
                 blacklist_drivers=blacklist, config_driver=True)
 
-            # if we have any blacklisted devices, update the network_config to
-            # include the device, mac, and driver values, but with no ip
-            # config; this ensures udev rules are generated but won't affect
-            # ip configuration
-            bl_found = 0
-            for bl_dev in [dev for dev in net.get_devicelist()
-                           if net.device_driver(dev) in blacklist]:
-                bl_found += 1
-                cfg = {
-                    'type': 'physical',
-                    'name': 'vf%d' % bl_found,
-                    'mac_address': net.get_interface_mac(bl_dev),
-                    'params': {
-                        'driver': net.device_driver(bl_dev),
-                        'device_id': net.device_devid(bl_dev),
-                    },
-                }
-                netconfig['config'].append(cfg)
-
             self._network_config = netconfig
 
         return self._network_config
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index 0ef2217..7bbbfb6 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -65,7 +65,7 @@ class DataSourceEc2(sources.DataSource):
     get_network_metadata = False
 
     # Track the discovered fallback nic for use in configuration generation.
-    fallback_nic = None
+    _fallback_interface = None
 
     def __init__(self, sys_cfg, distro, paths):
         sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -92,18 +92,17 @@ class DataSourceEc2(sources.DataSource):
         elif self.cloud_platform == Platforms.NO_EC2_METADATA:
             return False
 
-        self.fallback_nic = net.find_fallback_nic()
         if self.get_network_metadata:  # Setup networking in init-local stage.
             if util.is_FreeBSD():
                 LOG.debug("FreeBSD doesn't support running dhclient with -sf")
                 return False
-            dhcp_leases = dhcp.maybe_perform_dhcp_discovery(self.fallback_nic)
+            dhcp_leases = dhcp.maybe_perform_dhcp_discovery(
+                self.fallback_interface)
             if not dhcp_leases:
                 # DataSourceEc2Local failed in init-local stage. DataSourceEc2
                 # will still run in init-network stage.
                 return False
             dhcp_opts = dhcp_leases[-1]
-            self.fallback_nic = dhcp_opts.get('interface')
             net_params = {'interface': dhcp_opts.get('interface'),
                           'ip': dhcp_opts.get('fixed-address'),
                           'prefix_or_mask': dhcp_opts.get('subnet-mask'),
@@ -301,21 +300,44 @@ class DataSourceEc2(sources.DataSource):
             return None
 
         result = None
-        net_md = self.metadata.get('network')
+        no_network_metadata_on_aws = bool(
+            'network' not in self.metadata and
+            self.cloud_platform == Platforms.AWS)
+        if no_network_metadata_on_aws:
+            LOG.debug("Metadata 'network' not present:"
+                      " Refreshing stale metadata from prior to upgrade.")
+            util.log_time(
+                logfunc=LOG.debug, msg='Re-crawl of metadata service',
+                func=self._crawl_metadata)
+
         # Limit network configuration to only the primary/fallback nic
-        macs_to_nics = {
-            net.get_interface_mac(self.fallback_nic): self.fallback_nic}
+        iface = self.fallback_interface
+        macs_to_nics = {net.get_interface_mac(iface): iface}
+        net_md = self.metadata.get('network')
         if isinstance(net_md, dict):
             result = convert_ec2_metadata_network_config(
-                net_md, macs_to_nics=macs_to_nics,
-                fallback_nic=self.fallback_nic)
+                net_md, macs_to_nics=macs_to_nics, fallback_nic=iface)
         else:
-            LOG.warning("unexpected metadata 'network' key not valid: %s",
-                        net_md)
+            LOG.warning("Metadata 'network' key not valid: %s.", net_md)
         self._network_config = result
 
         return self._network_config
 
+    @property
+    def fallback_interface(self):
+        if self._fallback_interface is None:
+            # fallback_nic was used at one point, so restored objects may
+            # have an attribute there. respect that if found.
+            _legacy_fbnic = getattr(self, 'fallback_nic', None)
+            if _legacy_fbnic:
+                self._fallback_interface = _legacy_fbnic
+                self.fallback_nic = None
+            else:
+                self._fallback_interface = net.find_fallback_nic()
+                if self._fallback_interface is None:
+                    LOG.warning("Did not find a fallback interface on EC2.")
+        return self._fallback_interface
+
     def _crawl_metadata(self):
         """Crawl metadata service when available.
 
diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py
index 88cb7f8..cc55daf 100644
--- a/cloudinit/user_data.py
+++ b/cloudinit/user_data.py
@@ -19,6 +19,7 @@ import six
 
 from cloudinit import handlers
 from cloudinit import log as logging
+from cloudinit.url_helper import UrlError
 from cloudinit import util
 
 LOG = logging.getLogger(__name__)
@@ -222,16 +223,28 @@ class UserDataProcessor(object):
             if include_once_on and os.path.isfile(include_once_fn):
                 content = util.load_file(include_once_fn)
             else:
-                resp = util.read_file_or_url(include_url,
-                                             ssl_details=self.ssl_details)
-                if include_once_on and resp.ok():
-                    util.write_file(include_once_fn, resp.contents, mode=0o600)
-                if resp.ok():
-                    content = resp.contents
-                else:
-                    LOG.warning(("Fetching from %s resulted in"
-                                 " a invalid http code of %s"),
-                                include_url, resp.code)
+                try:
+                    resp = util.read_file_or_url(include_url,
+                                                 ssl_details=self.ssl_details)
+                    if include_once_on and resp.ok():
+                        util.write_file(include_once_fn, resp.contents,
+                                        mode=0o600)
+                    if resp.ok():
+                        content = resp.contents
+                    else:
+                        LOG.warning(("Fetching from %s resulted in"
+                                     " a invalid http code of %s"),
+                                    include_url, resp.code)
+                except UrlError as urle:
+                    message = str(urle)
+                    # Older versions of requests.exceptions.HTTPError may not
+                    # include the errant url. Append it for clarity in logs.
+                    if include_url not in message:
+                        message += ' for url: {0}'.format(include_url)
+                    LOG.warning(message)
+                except IOError as ioe:
+                    LOG.warning("Fetching from %s resulted in %s",
+                                include_url, ioe)
 
             if content is not None:
                 new_msg = convert_string(content)
diff --git a/debian/changelog b/debian/changelog
index bc6c449..6c01326 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,29 @@
+cloud-init (17.1-41-g76243487-0ubuntu1~17.04.1) zesty-proposed; urgency=medium
+
+  * cherry-pick 1110f30: debian/cloud-init.templates: Fix capitilazation
+    in 'AliYun' (LP: #1728186)
+  * New upstream snapshot (LP: #1733653)
+    - integration test: replace curtin test ppa with cloud-init test ppa.
+    - EC2: Fix bug using fallback_nic and metadata when restoring from cache.
+    - EC2: Kill dhclient process used in sandbox dhclient.
+    - ntp: fix configuration template rendering for openSUSE and SLES
+    - centos: Provide the failed #include url in error messages
+    - Catch UrlError when #include'ing URLs [Andrew Jorgensen]
+    - hosts: Fix openSUSE and SLES setup for /etc/hosts and clarify docs.
+      [Robert Schweikert]
+    - rh_subscription: Perform null checks for enabled and disabled repos.
+      [Dave Mulford]
+    - Improve warning message when a template is not found.
+      [Robert Schweikert]
+    - Replace the temporary i9n.brickies.net with i9n.cloud-init.io.
+    - Azure: don't generate network configuration for SRIOV devices
+    - tests: address some minor feedback missed in last merge.
+    - tests: integration test cleanup and full pass of nocloud-kvm.
+    - Gentoo: chmod +x on all files in sysvinit/gentoo/
+      [Carlos Konstanski]
+
+ -- Chad Smith <chad.smith@xxxxxxxxxxxxx>  Tue, 21 Nov 2017 11:48:32 -0700
+
 cloud-init (17.1-27-geb292c18-0ubuntu1~17.04.1) zesty-proposed; urgency=medium
 
   * New upstream snapshot.
diff --git a/debian/cloud-init.templates b/debian/cloud-init.templates
index 0a251e3..5ed37f7 100644
--- a/debian/cloud-init.templates
+++ b/debian/cloud-init.templates
@@ -1,8 +1,8 @@
 Template: cloud-init/datasources
 Type: multiselect
-Default: NoCloud, ConfigDrive, OpenNebula, DigitalOcean, Azure, AltCloud, OVF, MAAS, GCE, OpenStack, CloudSigma, SmartOS, Bigstep, Scaleway, Aliyun, Ec2, CloudStack, None
-Choices-C: NoCloud, ConfigDrive, OpenNebula, DigitalOcean, Azure, AltCloud, OVF, MAAS, GCE, OpenStack, CloudSigma, SmartOS, Bigstep, Scaleway, Aliyun, Ec2, CloudStack, None
-Choices: NoCloud: Reads info from /var/lib/cloud/seed only, ConfigDrive: Reads data from Openstack Config Drive, OpenNebula: read from OpenNebula context disk, DigitalOcean: reads data from Droplet datasource, Azure: read from MS Azure cdrom. Requires walinux-agent, AltCloud: config disks for RHEVm and vSphere, OVF: Reads data from OVF Transports, MAAS: Reads data from Ubuntu MAAS, GCE: google compute metadata service, OpenStack: native openstack metadata service, CloudSigma: metadata over serial for cloudsigma.com,  SmartOS: Read from SmartOS metadata service, Bigstep: Bigstep metadata service, Scaleway: Scaleway metadata service, Aliyun: Alibaba metadata service, Ec2: reads data from EC2 Metadata service, CloudStack: Read from CloudStack metadata service, None: Failsafe datasource
+Default: NoCloud, ConfigDrive, OpenNebula, DigitalOcean, Azure, AltCloud, OVF, MAAS, GCE, OpenStack, CloudSigma, SmartOS, Bigstep, Scaleway, AliYun, Ec2, CloudStack, None
+Choices-C: NoCloud, ConfigDrive, OpenNebula, DigitalOcean, Azure, AltCloud, OVF, MAAS, GCE, OpenStack, CloudSigma, SmartOS, Bigstep, Scaleway, AliYun, Ec2, CloudStack, None
+Choices: NoCloud: Reads info from /var/lib/cloud/seed only, ConfigDrive: Reads data from Openstack Config Drive, OpenNebula: read from OpenNebula context disk, DigitalOcean: reads data from Droplet datasource, Azure: read from MS Azure cdrom. Requires walinux-agent, AltCloud: config disks for RHEVm and vSphere, OVF: Reads data from OVF Transports, MAAS: Reads data from Ubuntu MAAS, GCE: google compute metadata service, OpenStack: native openstack metadata service, CloudSigma: metadata over serial for cloudsigma.com,  SmartOS: Read from SmartOS metadata service, Bigstep: Bigstep metadata service, Scaleway: Scaleway metadata service, AliYun: Alibaba metadata service, Ec2: reads data from EC2 Metadata service, CloudStack: Read from CloudStack metadata service, None: Failsafe datasource
 Description: Which data sources should be searched?
  Cloud-init supports searching different "Data Sources" for information
  that it uses to configure a cloud instance.
diff --git a/sysvinit/gentoo/cloud-config b/sysvinit/gentoo/cloud-config
old mode 100644
new mode 100755
index 5618472..5618472
--- a/sysvinit/gentoo/cloud-config
+++ b/sysvinit/gentoo/cloud-config
diff --git a/sysvinit/gentoo/cloud-final b/sysvinit/gentoo/cloud-final
old mode 100644
new mode 100755
index a9bf01f..a9bf01f
--- a/sysvinit/gentoo/cloud-final
+++ b/sysvinit/gentoo/cloud-final
diff --git a/sysvinit/gentoo/cloud-init b/sysvinit/gentoo/cloud-init
old mode 100644
new mode 100755
index 531a715..531a715
--- a/sysvinit/gentoo/cloud-init
+++ b/sysvinit/gentoo/cloud-init
diff --git a/sysvinit/gentoo/cloud-init-local b/sysvinit/gentoo/cloud-init-local
old mode 100644
new mode 100755
index 0f8cf65..0f8cf65
--- a/sysvinit/gentoo/cloud-init-local
+++ b/sysvinit/gentoo/cloud-init-local
diff --git a/templates/hosts.opensuse.tmpl b/templates/hosts.opensuse.tmpl
deleted file mode 100644
index 655da3f..0000000
--- a/templates/hosts.opensuse.tmpl
+++ /dev/null
@@ -1,26 +0,0 @@
-*
-    This file /etc/cloud/templates/hosts.opensuse.tmpl is only utilized
-    if enabled in cloud-config.  Specifically, in order to enable it
-    you need to add the following to config:
-      manage_etc_hosts: True
-*#
-# Your system has configured 'manage_etc_hosts' as True.
-# As a result, if you wish for changes to this file to persist
-# then you will need to either
-# a.) make changes to the master file in
-#     /etc/cloud/templates/hosts.opensuse.tmpl
-# b.) change or remove the value of 'manage_etc_hosts' in
-#     /etc/cloud/cloud.cfg or cloud-config from user-data
-#
-# The following lines are desirable for IPv4 capable hosts
-127.0.0.1 localhost
-
-# The following lines are desirable for IPv6 capable hosts
-::1 localhost ipv6-localhost ipv6-loopback
-fe00::0 ipv6-localnet
-
-ff00::0 ipv6-mcastprefix
-ff02::1 ipv6-allnodes
-ff02::2 ipv6-allrouters
-ff02::3 ipv6-allhosts
-
diff --git a/templates/hosts.suse.tmpl b/templates/hosts.suse.tmpl
index b608269..8e664db 100644
--- a/templates/hosts.suse.tmpl
+++ b/templates/hosts.suse.tmpl
@@ -13,12 +13,18 @@ you need to add the following to config:
 #     /etc/cloud/cloud.cfg or cloud-config from user-data
 #
 # The following lines are desirable for IPv4 capable hosts
-127.0.0.1 localhost
+127.0.0.1 {{fqdn}} {{hostname}}
+127.0.0.1 localhost.localdomain localhost
+127.0.0.1 localhost4.localdomain4 localhost4
 
 # The following lines are desirable for IPv6 capable hosts
+::1 {{fqdn}} {{hostname}}
+::1 localhost.localdomain localhost
+::1 localhost6.localdomain6 localhost6
 ::1 localhost ipv6-localhost ipv6-loopback
-fe00::0 ipv6-localnet
 
+
+fe00::0 ipv6-localnet
 ff00::0 ipv6-mcastprefix
 ff02::1 ipv6-allnodes
 ff02::2 ipv6-allrouters
diff --git a/templates/ntp.conf.opensuse.tmpl b/templates/ntp.conf.opensuse.tmpl
new file mode 100644
index 0000000..f3ab565
--- /dev/null
+++ b/templates/ntp.conf.opensuse.tmpl
@@ -0,0 +1,88 @@
+## template:jinja
+
+##
+## Radio and modem clocks by convention have addresses in the 
+## form 127.127.t.u, where t is the clock type and u is a unit 
+## number in the range 0-3. 
+##
+## Most of these clocks require support in the form of a 
+## serial port or special bus peripheral. The particular  
+## device is normally specified by adding a soft link 
+## /dev/device-u to the particular hardware device involved, 
+## where u correspond to the unit number above. 
+## 
+## Generic DCF77 clock on serial port (Conrad DCF77)
+## Address:     127.127.8.u
+## Serial Port: /dev/refclock-u
+##  
+## (create soft link /dev/refclock-0 to the particular ttyS?)
+##
+# server 127.127.8.0 mode 5 prefer
+
+##
+## Undisciplined Local Clock. This is a fake driver intended for backup
+## and when no outside source of synchronized time is available.
+##
+# server 127.127.1.0		# local clock (LCL)
+# fudge  127.127.1.0 stratum 10	# LCL is unsynchronized
+
+##
+## Add external Servers using
+## # rcntpd addserver <yourserver>
+## The servers will only be added to the currently running instance, not
+## to /etc/ntp.conf.
+##
+{% if pools %}# pools
+{% endif %}
+{% for pool in pools -%}
+pool {{pool}} iburst
+{% endfor %}
+{%- if servers %}# servers
+{% endif %}
+{% for server in servers -%}
+server {{server}} iburst
+{% endfor %}
+
+# Access control configuration; see /usr/share/doc/packages/ntp/html/accopt.html for
+# details.  The web page <http://support.ntp.org/bin/view/Support/AccessRestrictions>
+# might also be helpful.
+#
+# Note that "restrict" applies to both servers and clients, so a configuration
+# that might be intended to block requests from certain clients could also end
+# up blocking replies from your own upstream servers.
+
+# By default, exchange time with everybody, but don't allow configuration.
+restrict -4 default notrap nomodify nopeer noquery
+restrict -6 default notrap nomodify nopeer noquery
+
+# Local users may interrogate the ntp server more closely.
+restrict 127.0.0.1
+restrict ::1
+
+# Clients from this (example!) subnet have unlimited access, but only if
+# cryptographically authenticated.
+#restrict 192.168.123.0 mask 255.255.255.0 notrust
+
+##
+## Miscellaneous stuff
+##
+
+driftfile /var/lib/ntp/drift/ntp.drift # path for drift file
+
+logfile   /var/log/ntp		# alternate log file
+# logconfig =syncstatus + sysevents
+# logconfig =all
+
+# statsdir /tmp/		# directory for statistics files
+# filegen peerstats  file peerstats  type day enable
+# filegen loopstats  file loopstats  type day enable
+# filegen clockstats file clockstats type day enable
+
+#
+# Authentication stuff
+#
+keys /etc/ntp.keys		# path for keys file
+trustedkey 1			# define trusted keys
+requestkey 1			# key (7) for accessing server variables
+controlkey 1			# key (6) for accessing server variables
+
diff --git a/templates/ntp.conf.sles.tmpl b/templates/ntp.conf.sles.tmpl
index 5c5fc4d..f3ab565 100644
--- a/templates/ntp.conf.sles.tmpl
+++ b/templates/ntp.conf.sles.tmpl
@@ -1,17 +1,5 @@
 ## template:jinja
 
-################################################################################
-## /etc/ntp.conf
-##
-## Sample NTP configuration file.
-## See package 'ntp-doc' for documentation, Mini-HOWTO and FAQ.
-## Copyright (c) 1998 S.u.S.E. GmbH Fuerth, Germany.
-##
-## Author: Michael Andres,  <ma@xxxxxxx>
-##         Michael Skibbe,  <mskibbe@xxxxxxx>
-##
-################################################################################
-
 ##
 ## Radio and modem clocks by convention have addresses in the 
 ## form 127.127.t.u, where t is the clock type and u is a unit 
diff --git a/tests/cloud_tests/collect.py b/tests/cloud_tests/collect.py
index 4a2422e..71ee764 100644
--- a/tests/cloud_tests/collect.py
+++ b/tests/cloud_tests/collect.py
@@ -22,11 +22,21 @@ def collect_script(instance, base_dir, script, script_name):
     """
     LOG.debug('running collect script: %s', script_name)
     (out, err, exit) = instance.run_script(
-        script, rcs=range(0, 256),
+        script.encode(), rcs=False,
         description='collect: {}'.format(script_name))
     c_util.write_file(os.path.join(base_dir, script_name), out)
 
 
+def collect_console(instance, base_dir):
+    LOG.debug('getting console log')
+    try:
+        data = instance.console_log()
+    except NotImplementedError as e:
+        data = 'Not Implemented: %s' % e
+    with open(os.path.join(base_dir, 'console.log'), "wb") as fp:
+        fp.write(data)
+
+
 def collect_test_data(args, snapshot, os_name, test_name):
     """Collect data for test case.
 
@@ -79,8 +89,12 @@ def collect_test_data(args, snapshot, os_name, test_name):
                                          test_output_dir, script, script_name))
                          for script_name, script in test_scripts.items()]
 
+        console_log = partial(
+            run_single, 'collect console',
+            partial(collect_console, instance, test_output_dir))
+
         res = run_stage('collect for test: {}'.format(test_name),
-                        [start_call] + collect_calls)
+                        [start_call] + collect_calls + [console_log])
 
     return res
 
diff --git a/tests/cloud_tests/images/base.py b/tests/cloud_tests/images/base.py
index 0a1e056..d503108 100644
--- a/tests/cloud_tests/images/base.py
+++ b/tests/cloud_tests/images/base.py
@@ -2,8 +2,10 @@
 
 """Base class for images."""
 
+from ..util import TargetBase
 
-class Image(object):
+
+class Image(TargetBase):
     """Base class for images."""
 
     platform_name = None
@@ -43,21 +45,6 @@ class Image(object):
         # NOTE: more sophisticated options may be requied at some point
         return self.config.get('setup_overrides', {})
 
-    def execute(self, *args, **kwargs):
-        """Execute command in image, modifying image."""
-        raise NotImplementedError
-
-    def push_file(self, local_path, remote_path):
-        """Copy file at 'local_path' to instance at 'remote_path'."""
-        raise NotImplementedError
-
-    def run_script(self, *args, **kwargs):
-        """Run script in image, modifying image.
-
-        @return_value: script output
-        """
-        raise NotImplementedError
-
     def snapshot(self):
         """Create snapshot of image, block until done."""
         raise NotImplementedError
diff --git a/tests/cloud_tests/images/lxd.py b/tests/cloud_tests/images/lxd.py
index fd4e93c..5caeba4 100644
--- a/tests/cloud_tests/images/lxd.py
+++ b/tests/cloud_tests/images/lxd.py
@@ -24,7 +24,7 @@ class LXDImage(base.Image):
         @param config: image configuration
         """
         self.modified = False
-        self._instance = None
+        self._img_instance = None
         self._pylxd_image = None
         self.pylxd_image = pylxd_image
         super(LXDImage, self).__init__(platform, config)
@@ -38,9 +38,9 @@ class LXDImage(base.Image):
 
     @pylxd_image.setter
     def pylxd_image(self, pylxd_image):
-        if self._instance:
+        if self._img_instance:
             self._instance.destroy()
-            self._instance = None
+            self._img_instance = None
         if (self._pylxd_image and
                 (self._pylxd_image is not pylxd_image) and
                 (not self.config.get('cache_base_image') or self.modified)):
@@ -49,15 +49,19 @@ class LXDImage(base.Image):
         self._pylxd_image = pylxd_image
 
     @property
-    def instance(self):
-        """Property function."""
-        if not self._instance:
-            self._instance = self.platform.launch_container(
+    def _instance(self):
+        """Internal use only, returns a instance
+
+        This starts an lxc instance from the image, so it is "dirty".
+        Better would be some way to modify this "at rest".
+        lxc-pstart would be an option."""
+        if not self._img_instance:
+            self._img_instance = self.platform.launch_container(
                 self.properties, self.config, self.features,
                 use_desc='image-modification', image_desc=str(self),
                 image=self.pylxd_image.fingerprint)
-        self._instance.start()
-        return self._instance
+            self._img_instance.start()
+        return self._img_instance
 
     @property
     def properties(self):
@@ -144,20 +148,20 @@ class LXDImage(base.Image):
             shutil.rmtree(export_dir)
             shutil.rmtree(extract_dir)
 
-    def execute(self, *args, **kwargs):
+    def _execute(self, *args, **kwargs):
         """Execute command in image, modifying image."""
-        return self.instance.execute(*args, **kwargs)
+        return self._instance._execute(*args, **kwargs)
 
     def push_file(self, local_path, remote_path):
         """Copy file at 'local_path' to instance at 'remote_path'."""
-        return self.instance.push_file(local_path, remote_path)
+        return self._instance.push_file(local_path, remote_path)
 
     def run_script(self, *args, **kwargs):
         """Run script in image, modifying image.
 
         @return_value: script output
         """
-        return self.instance.run_script(*args, **kwargs)
+        return self._instance.run_script(*args, **kwargs)
 
     def snapshot(self):
         """Create snapshot of image, block until done."""
@@ -169,7 +173,7 @@ class LXDImage(base.Image):
         # clone current instance
         instance = self.platform.launch_container(
             self.properties, self.config, self.features,
-            container=self.instance.name, image_desc=str(self),
+            container=self._instance.name, image_desc=str(self),
             use_desc='snapshot', container_config=conf)
         # wait for cloud-init before boot_clean_script is run to ensure
         # /var/lib/cloud is removed cleanly
diff --git a/tests/cloud_tests/images/nocloudkvm.py b/tests/cloud_tests/images/nocloudkvm.py
index a7af0e5..1e7962c 100644
--- a/tests/cloud_tests/images/nocloudkvm.py
+++ b/tests/cloud_tests/images/nocloudkvm.py
@@ -2,6 +2,8 @@
 
 """NoCloud KVM Image Base Class."""
 
+from cloudinit import util as c_util
+
 from tests.cloud_tests.images import base
 from tests.cloud_tests.snapshots import nocloudkvm as nocloud_kvm_snapshot
 
@@ -19,24 +21,11 @@ class NoCloudKVMImage(base.Image):
         @param img_path: path to the image
         """
         self.modified = False
-        self._instance = None
         self._img_path = img_path
 
         super(NoCloudKVMImage, self).__init__(platform, config)
 
     @property
-    def instance(self):
-        """Returns an instance of an image."""
-        if not self._instance:
-            if not self._img_path:
-                raise RuntimeError()
-
-            self._instance = self.platform.create_image(
-                self.properties, self.config, self.features, self._img_path,
-                image_desc=str(self), use_desc='image-modification')
-        return self._instance
-
-    @property
     def properties(self):
         """Dictionary containing: 'arch', 'os', 'version', 'release'."""
         return {
@@ -46,20 +35,26 @@ class NoCloudKVMImage(base.Image):
             'version': self.config['version'],
         }
 
-    def execute(self, *args, **kwargs):
+    def _execute(self, command, stdin=None, env=None):
         """Execute command in image, modifying image."""
-        return self.instance.execute(*args, **kwargs)
+        return self.mount_image_callback(command, stdin=stdin, env=env)
 
-    def push_file(self, local_path, remote_path):
-        """Copy file at 'local_path' to instance at 'remote_path'."""
-        return self.instance.push_file(local_path, remote_path)
+    def mount_image_callback(self, command, stdin=None, env=None):
+        """Run mount-image-callback."""
 
-    def run_script(self, *args, **kwargs):
-        """Run script in image, modifying image.
+        env_args = []
+        if env:
+            env_args = ['env'] + ["%s=%s" for k, v in env.items()]
 
-        @return_value: script output
-        """
-        return self.instance.run_script(*args, **kwargs)
+        mic_chroot = ['sudo', 'mount-image-callback', '--system-mounts',
+                      '--system-resolvconf', self._img_path,
+                      '--', 'chroot', '_MOUNTPOINT_']
+        try:
+            out, err = c_util.subp(mic_chroot + env_args + list(command),
+                                   data=stdin, decode=False)
+            return (out, err, 0)
+        except c_util.ProcessExecutionError as e:
+            return (e.stdout, e.stderr, e.exit_code)
 
     def snapshot(self):
         """Create snapshot of image, block until done."""
@@ -82,7 +77,6 @@ class NoCloudKVMImage(base.Image):
         framework decide whether to keep or destroy everything.
         """
         self._img_path = None
-        self._instance.destroy()
         super(NoCloudKVMImage, self).destroy()
 
 # vi: ts=4 expandtab
diff --git a/tests/cloud_tests/instances/base.py b/tests/cloud_tests/instances/base.py
index 9bdda60..8c59d62 100644
--- a/tests/cloud_tests/instances/base.py
+++ b/tests/cloud_tests/instances/base.py
@@ -2,8 +2,10 @@
 
 """Base instance."""
 
+from ..util import TargetBase
 
-class Instance(object):
+
+class Instance(TargetBase):
     """Base instance object."""
 
     platform_name = None
@@ -22,82 +24,7 @@ class Instance(object):
         self.properties = properties
         self.config = config
         self.features = features
-
-    def execute(self, command, stdout=None, stderr=None, env=None,
-                rcs=None, description=None):
-        """Execute command in instance, recording output, error and exit code.
-
-        Assumes functional networking and execution as root with the
-        target filesystem being available at /.
-
-        @param command: the command to execute as root inside the image
-            if command is a string, then it will be executed as:
-            ['sh', '-c', command]
-        @param stdout, stderr: file handles to write output and error to
-        @param env: environment variables
-        @param rcs: allowed return codes from command
-        @param description: purpose of command
-        @return_value: tuple containing stdout data, stderr data, exit code
-        """
-        raise NotImplementedError
-
-    def read_data(self, remote_path, decode=False):
-        """Read data from instance filesystem.
-
-        @param remote_path: path in instance
-        @param decode: return as string
-        @return_value: data as str or bytes
-        """
-        raise NotImplementedError
-
-    def write_data(self, remote_path, data):
-        """Write data to instance filesystem.
-
-        @param remote_path: path in instance
-        @param data: data to write, either str or bytes
-        """
-        raise NotImplementedError
-
-    def pull_file(self, remote_path, local_path):
-        """Copy file at 'remote_path', from instance to 'local_path'.
-
-        @param remote_path: path on remote instance
-        @param local_path: path on local instance
-        """
-        with open(local_path, 'wb') as fp:
-            fp.write(self.read_data(remote_path))
-
-    def push_file(self, local_path, remote_path):
-        """Copy file at 'local_path' to instance at 'remote_path'.
-
-        @param local_path: path on local instance
-        @param remote_path: path on remote instance
-        """
-        with open(local_path, 'rb') as fp:
-            self.write_data(remote_path, fp.read())
-
-    def run_script(self, script, rcs=None, description=None):
-        """Run script in target and return stdout.
-
-        @param script: script contents
-        @param rcs: allowed return codes from script
-        @param description: purpose of script
-        @return_value: stdout from script
-        """
-        script_path = self.tmpfile()
-        try:
-            self.write_data(script_path, script)
-            return self.execute(
-                ['/bin/bash', script_path], rcs=rcs, description=description)
-        finally:
-            self.execute(['rm', '-f', script_path], rcs=rcs)
-
-    def tmpfile(self):
-        """Get a tmp file in the target.
-
-        @return_value: path to new file in target
-        """
-        return self.execute(['mktemp'])[0].strip()
+        self._tmp_count = 0
 
     def console_log(self):
         """Instance console.
diff --git a/tests/cloud_tests/instances/lxd.py b/tests/cloud_tests/instances/lxd.py
index a43918c..3b035d8 100644
--- a/tests/cloud_tests/instances/lxd.py
+++ b/tests/cloud_tests/instances/lxd.py
@@ -2,8 +2,11 @@
 
 """Base LXD instance."""
 
-from tests.cloud_tests.instances import base
-from tests.cloud_tests import util
+from . import base
+
+import os
+import shutil
+from tempfile import mkdtemp
 
 
 class LXDInstance(base.Instance):
@@ -24,6 +27,8 @@ class LXDInstance(base.Instance):
         self._pylxd_container = pylxd_container
         super(LXDInstance, self).__init__(
             platform, name, properties, config, features)
+        self.tmpd = mkdtemp(prefix="%s-%s" % (type(self).__name__, name))
+        self._setup_console_log()
 
     @property
     def pylxd_container(self):
@@ -31,74 +36,69 @@ class LXDInstance(base.Instance):
         self._pylxd_container.sync()
         return self._pylxd_container
 
-    def execute(self, command, stdout=None, stderr=None, env=None,
-                rcs=None, description=None):
-        """Execute command in instance, recording output, error and exit code.
-
-        Assumes functional networking and execution as root with the
-        target filesystem being available at /.
-
-        @param command: the command to execute as root inside the image
-            if command is a string, then it will be executed as:
-            ['sh', '-c', command]
-        @param stdout: file handler to write output
-        @param stderr: file handler to write error
-        @param env: environment variables
-        @param rcs: allowed return codes from command
-        @param description: purpose of command
-        @return_value: tuple containing stdout data, stderr data, exit code
-        """
+    def _setup_console_log(self):
+        logf = os.path.join(self.tmpd, "console.log")
+
+        # doing this ensures we can read it. Otherwise it ends up root:root.
+        with open(logf, "w") as fp:
+            fp.write("# %s\n" % self.name)
+
+        cfg = "lxc.console.logfile=%s" % logf
+        orig = self._pylxd_container.config.get('raw.lxc', "")
+        if orig:
+            orig += "\n"
+        self._pylxd_container.config['raw.lxc'] = orig + cfg
+        self._pylxd_container.save()
+        self._console_log_file = logf
+
+    def _execute(self, command, stdin=None, env=None):
         if env is None:
             env = {}
 
-        if isinstance(command, str):
-            command = ['sh', '-c', command]
+        if stdin is not None:
+            # pylxd does not support input to execute.
+            # https://github.com/lxc/pylxd/issues/244
+            #
+            # The solution here is write a tmp file in the container
+            # and then execute a shell that sets it standard in to
+            # be from that file, removes it, and calls the comand.
+            tmpf = self.tmpfile()
+            self.write_data(tmpf, stdin)
+            ncmd = 'exec <"{tmpf}"; rm -f "{tmpf}"; exec "$@"'
+            command = (['sh', '-c', ncmd.format(tmpf=tmpf), 'stdinhack'] +
+                       list(command))
 
         # ensure instance is running and execute the command
         self.start()
+        # execute returns a ContainerExecuteResult, named tuple
+        # (exit_code, stdout, stderr)
         res = self.pylxd_container.execute(command, environment=env)
 
         # get out, exit and err from pylxd return
-        if hasattr(res, 'exit_code'):
-            # pylxd 2.2 returns ContainerExecuteResult, named tuple of
-            # (exit_code, out, err)
-            (exit, out, err) = res
-        else:
+        if not hasattr(res, 'exit_code'):
             # pylxd 2.1.3 and earlier only return out and err, no exit
-            # LOG.warning('using pylxd version < 2.2')
-            (out, err) = res
-            exit = 0
-
-        # write data to file descriptors if needed
-        if stdout:
-            stdout.write(out)
-        if stderr:
-            stderr.write(err)
-
-        # if the command exited with a code not allowed in rcs, then fail
-        if exit not in (rcs if rcs else (0,)):
-            error_desc = ('Failed command to: {}'.format(description)
-                          if description else None)
-            raise util.InTargetExecuteError(
-                out, err, exit, command, self.name, error_desc)
+            raise RuntimeError(
+                "No 'exit_code' in pylxd.container.execute return.\n"
+                "pylxd > 2.2 is required.")
 
-        return (out, err, exit)
+        return res.stdout, res.stderr, res.exit_code
 
     def read_data(self, remote_path, decode=False):
         """Read data from instance filesystem.
 
         @param remote_path: path in instance
-        @param decode: return as string
-        @return_value: data as str or bytes
+        @param decode: decode data before returning.
+        @return_value: content of remote_path as bytes if 'decode' is False,
+                       and as string if 'decode' is True.
         """
         data = self.pylxd_container.files.get(remote_path)
-        return data.decode() if decode and isinstance(data, bytes) else data
+        return data.decode() if decode else data
 
     def write_data(self, remote_path, data):
         """Write data to instance filesystem.
 
         @param remote_path: path in instance
-        @param data: data to write, either str or bytes
+        @param data: data to write in bytes
         """
         self.pylxd_container.files.put(remote_path, data)
 
@@ -107,7 +107,14 @@ class LXDInstance(base.Instance):
 
         @return_value: bytes of this instance’s console
         """
-        raise NotImplementedError
+        if not os.path.exists(self._console_log_file):
+            raise NotImplementedError(
+                "Console log '%s' does not exist. If this is a remote "
+                "lxc, then this is really NotImplementedError.  If it is "
+                "A local lxc, then this is a RuntimeError."
+                "https://github.com/lxc/lxd/issues/1129";)
+        with open(self._console_log_file, "rb") as fp:
+            return fp.read()
 
     def reboot(self, wait=True):
         """Reboot instance."""
@@ -144,6 +151,7 @@ class LXDInstance(base.Instance):
         if self.platform.container_exists(self.name):
             raise OSError('container {} was not properly removed'
                           .format(self.name))
+        shutil.rmtree(self.tmpd)
         super(LXDInstance, self).destroy()
 
 # vi: ts=4 expandtab
diff --git a/tests/cloud_tests/instances/nocloudkvm.py b/tests/cloud_tests/instances/nocloudkvm.py
index 8a0e531..cc82580 100644
--- a/tests/cloud_tests/instances/nocloudkvm.py
+++ b/tests/cloud_tests/instances/nocloudkvm.py
@@ -12,11 +12,18 @@ from cloudinit import util as c_util
 from tests.cloud_tests.instances import base
 from tests.cloud_tests import util
 
+# This domain contains reverse lookups for hostnames that are used.
+# The primary reason is so sudo will return quickly when it attempts
+# to look up the hostname.  i9n is just short for 'integration'.
+# see also bug 1730744 for why we had to do this.
+CI_DOMAIN = "i9n.cloud-init.io"
+
 
 class NoCloudKVMInstance(base.Instance):
     """NoCloud KVM backed instance."""
 
     platform_name = "nocloud-kvm"
+    _ssh_client = None
 
     def __init__(self, platform, name, properties, config, features,
                  user_data, meta_data):
@@ -35,6 +42,7 @@ class NoCloudKVMInstance(base.Instance):
         self.ssh_port = None
         self.pid = None
         self.pid_file = None
+        self.console_file = None
 
         super(NoCloudKVMInstance, self).__init__(
             platform, name, properties, config, features)
@@ -51,43 +59,18 @@ class NoCloudKVMInstance(base.Instance):
             os.remove(self.pid_file)
 
         self.pid = None
-        super(NoCloudKVMInstance, self).destroy()
-
-    def execute(self, command, stdout=None, stderr=None, env=None,
-                rcs=None, description=None):
-        """Execute command in instance.
-
-        Assumes functional networking and execution as root with the
-        target filesystem being available at /.
-
-        @param command: the command to execute as root inside the image
-            if command is a string, then it will be executed as:
-            ['sh', '-c', command]
-        @param stdout, stderr: file handles to write output and error to
-        @param env: environment variables
-        @param rcs: allowed return codes from command
-        @param description: purpose of command
-        @return_value: tuple containing stdout data, stderr data, exit code
-        """
-        if env is None:
-            env = {}
-
-        if isinstance(command, str):
-            command = ['sh', '-c', command]
+        if self._ssh_client:
+            self._ssh_client.close()
+            self._ssh_client = None
 
-        if self.pid:
-            return self.ssh(command)
-        else:
-            return self.mount_image_callback(command) + (0,)
+        super(NoCloudKVMInstance, self).destroy()
 
-    def mount_image_callback(self, cmd):
-        """Run mount-image-callback."""
-        out, err = c_util.subp(['sudo', 'mount-image-callback',
-                                '--system-mounts', '--system-resolvconf',
-                                self.name, '--', 'chroot',
-                                '_MOUNTPOINT_'] + cmd)
+    def _execute(self, command, stdin=None, env=None):
+        env_args = []
+        if env:
+            env_args = ['env'] + ["%s=%s" for k, v in env.items()]
 
-        return out, err
+        return self.ssh(['sudo'] + env_args + list(command), stdin=stdin)
 
     def generate_seed(self, tmpdir):
         """Generate nocloud seed from user-data"""
@@ -109,57 +92,31 @@ class NoCloudKVMInstance(base.Instance):
         s.close()
         return num
 
-    def push_file(self, local_path, remote_path):
-        """Copy file at 'local_path' to instance at 'remote_path'.
-
-        If we have a pid then SSH is up, otherwise, use
-        mount-image-callback.
-
-        @param local_path: path on local instance
-        @param remote_path: path on remote instance
-        """
-        if self.pid:
-            super(NoCloudKVMInstance, self).push_file()
-        else:
-            local_file = open(local_path)
-            p = subprocess.Popen(['sudo', 'mount-image-callback',
-                                  '--system-mounts', '--system-resolvconf',
-                                  self.name, '--', 'chroot', '_MOUNTPOINT_',
-                                  '/bin/sh', '-c', 'cat - > %s' % remote_path],
-                                 stdin=local_file,
-                                 stdout=subprocess.PIPE,
-                                 stderr=subprocess.PIPE)
-            p.wait()
-
-    def sftp_put(self, path, data):
-        """SFTP put a file."""
-        client = self._ssh_connect()
-        sftp = client.open_sftp()
-
-        with sftp.open(path, 'w') as f:
-            f.write(data)
-
-        client.close()
-
-    def ssh(self, command):
+    def ssh(self, command, stdin=None):
         """Run a command via SSH."""
         client = self._ssh_connect()
 
+        cmd = util.shell_pack(command)
         try:
-            _, out, err = client.exec_command(util.shell_pack(command))
-        except paramiko.SSHException:
-            raise util.InTargetExecuteError('', '', -1, command, self.name)
-
-        exit = out.channel.recv_exit_status()
-        out = ''.join(out.readlines())
-        err = ''.join(err.readlines())
-        client.close()
-
-        return out, err, exit
+            fp_in, fp_out, fp_err = client.exec_command(cmd)
+            channel = fp_in.channel
+            if stdin is not None:
+                fp_in.write(stdin)
+                fp_in.close()
+
+            channel.shutdown_write()
+            rc = channel.recv_exit_status()
+            return (fp_out.read(), fp_err.read(), rc)
+        except paramiko.SSHException as e:
+            raise util.InTargetExecuteError(
+                b'', b'', -1, command, self.name, reason=e)
 
     def _ssh_connect(self, hostname='localhost', username='ubuntu',
                      banner_timeout=120, retry_attempts=30):
         """Connect via SSH."""
+        if self._ssh_client:
+            return self._ssh_client
+
         private_key = paramiko.RSAKey.from_private_key_file(self.ssh_key_file)
         client = paramiko.SSHClient()
         client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
@@ -168,6 +125,7 @@ class NoCloudKVMInstance(base.Instance):
                 client.connect(hostname=hostname, username=username,
                                port=self.ssh_port, pkey=private_key,
                                banner_timeout=banner_timeout)
+                self._ssh_client = client
                 return client
             except (paramiko.SSHException, TypeError):
                 time.sleep(1)
@@ -183,15 +141,19 @@ class NoCloudKVMInstance(base.Instance):
         tmpdir = self.platform.config['data_dir']
         seed = self.generate_seed(tmpdir)
         self.pid_file = os.path.join(tmpdir, '%s.pid' % self.name)
+        self.console_file = os.path.join(tmpdir, '%s-console.log' % self.name)
         self.ssh_port = self.get_free_port()
 
-        subprocess.Popen(['./tools/xkvm',
-                          '--disk', '%s,cache=unsafe' % self.name,
-                          '--disk', '%s,cache=unsafe' % seed,
-                          '--netdev',
-                          'user,hostfwd=tcp::%s-:22' % self.ssh_port,
-                          '--', '-pidfile', self.pid_file, '-vnc', 'none',
-                          '-m', '2G', '-smp', '2'],
+        cmd = ['./tools/xkvm',
+               '--disk', '%s,cache=unsafe' % self.name,
+               '--disk', '%s,cache=unsafe' % seed,
+               '--netdev', ','.join(['user',
+                                     'hostfwd=tcp::%s-:22' % self.ssh_port,
+                                     'dnssearch=%s' % CI_DOMAIN]),
+               '--', '-pidfile', self.pid_file, '-vnc', 'none',
+               '-m', '2G', '-smp', '2', '-nographic',
+               '-serial', 'file:' + self.console_file]
+        subprocess.Popen(cmd,
                          close_fds=True,
                          stdin=subprocess.PIPE,
                          stdout=subprocess.PIPE,
@@ -206,12 +168,10 @@ class NoCloudKVMInstance(base.Instance):
         if wait:
             self._wait_for_system(wait_for_cloud_init)
 
-    def write_data(self, remote_path, data):
-        """Write data to instance filesystem.
-
-        @param remote_path: path in instance
-        @param data: data to write, either str or bytes
-        """
-        self.sftp_put(remote_path, data)
+    def console_log(self):
+        if not self.console_file:
+            return b''
+        with open(self.console_file, "rb") as fp:
+            return fp.read()
 
 # vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/run_commands.yaml b/tests/cloud_tests/testcases/examples/run_commands.yaml
index b0e311b..f80eb8c 100644
--- a/tests/cloud_tests/testcases/examples/run_commands.yaml
+++ b/tests/cloud_tests/testcases/examples/run_commands.yaml
@@ -7,10 +7,10 @@ enabled: False
 cloud_config: |
   #cloud-config
   runcmd:
-   - echo cloud-init run cmd test > /tmp/run_cmd
+   - echo cloud-init run cmd test > /var/tmp/run_cmd
 collect_scripts:
   run_cmd: |
     #!/bin/bash
-    cat /tmp/run_cmd
+    cat /var/tmp/run_cmd
 
 # vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_sources_ppa.py b/tests/cloud_tests/testcases/modules/apt_configure_sources_ppa.py
index d299e9a..dfbdead 100644
--- a/tests/cloud_tests/testcases/modules/apt_configure_sources_ppa.py
+++ b/tests/cloud_tests/testcases/modules/apt_configure_sources_ppa.py
@@ -11,13 +11,13 @@ class TestAptconfigureSourcesPPA(base.CloudTestCase):
         """Test specific ppa added."""
         out = self.get_data_file('sources.list')
         self.assertIn(
-            'http://ppa.launchpad.net/curtin-dev/test-archive/ubuntu', out)
+            'http://ppa.launchpad.net/cloud-init-dev/test-archive/ubuntu', out)
 
     def test_ppa_key(self):
         """Test ppa key added."""
         out = self.get_data_file('apt-key')
         self.assertIn(
-            '1BC3 0F71 5A3B 8612 47A8  1A5E 55FE 7C8C 0165 013E', out)
-        self.assertIn('Launchpad PPA for curtin developers', out)
+            '1FF0 D853 5EF7 E719 E5C8  1B9C 083D 06FB E4D3 04DF', out)
+        self.assertIn('Launchpad PPA for cloud init development team', out)
 
 # vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_sources_ppa.yaml b/tests/cloud_tests/testcases/modules/apt_configure_sources_ppa.yaml
index 9efdae5..b997bcf 100644
--- a/tests/cloud_tests/testcases/modules/apt_configure_sources_ppa.yaml
+++ b/tests/cloud_tests/testcases/modules/apt_configure_sources_ppa.yaml
@@ -2,7 +2,7 @@
 # Add a PPA to source.list
 #
 # NOTE: on older ubuntu releases the sources file added is named
-#       'curtin-dev-test-archive-trusty', without 'ubuntu' in the middle
+#       'cloud-init-dev-test-archive-trusty', without 'ubuntu' in the middle
 required_features:
   - apt
   - ppa
@@ -14,11 +14,11 @@ cloud_config: |
       source1:
         keyid: 0165013E
         keyserver: keyserver.ubuntu.com
-        source: "ppa:curtin-dev/test-archive"
+        source: "ppa:cloud-init-dev/test-archive"
 collect_scripts:
   sources.list: |
     #!/bin/bash
-    cat /etc/apt/sources.list.d/curtin-dev-ubuntu-test-archive-*.list
+    cat /etc/apt/sources.list.d/cloud-init-dev-ubuntu-test-archive-*.list
   apt-key: |
     #!/bin/bash
     apt-key finger
diff --git a/tests/cloud_tests/testcases/modules/keys_to_console.py b/tests/cloud_tests/testcases/modules/keys_to_console.py
index 88b6812..07f3811 100644
--- a/tests/cloud_tests/testcases/modules/keys_to_console.py
+++ b/tests/cloud_tests/testcases/modules/keys_to_console.py
@@ -10,13 +10,13 @@ class TestKeysToConsole(base.CloudTestCase):
     def test_excluded_keys(self):
         """Test excluded keys missing."""
         out = self.get_data_file('syslog')
-        self.assertNotIn('DSA', out)
-        self.assertNotIn('ECDSA', out)
+        self.assertNotIn('(DSA)', out)
+        self.assertNotIn('(ECDSA)', out)
 
     def test_expected_keys(self):
         """Test expected keys exist."""
         out = self.get_data_file('syslog')
-        self.assertIn('ED25519', out)
-        self.assertIn('RSA', out)
+        self.assertIn('(ED25519)', out)
+        self.assertIn('(RSA)', out)
 
 # vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/runcmd.yaml b/tests/cloud_tests/testcases/modules/runcmd.yaml
index 04e5a05..8309a88 100644
--- a/tests/cloud_tests/testcases/modules/runcmd.yaml
+++ b/tests/cloud_tests/testcases/modules/runcmd.yaml
@@ -4,10 +4,10 @@
 cloud_config: |
   #cloud-config
   runcmd:
-   - echo cloud-init run cmd test > /tmp/run_cmd
+   - echo cloud-init run cmd test > /var/tmp/run_cmd
 collect_scripts:
   run_cmd: |
     #!/bin/bash
-    cat /tmp/run_cmd
+    cat /var/tmp/run_cmd
 
 # vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/set_hostname.py b/tests/cloud_tests/testcases/modules/set_hostname.py
index 6e96a75..1dbe64c 100644
--- a/tests/cloud_tests/testcases/modules/set_hostname.py
+++ b/tests/cloud_tests/testcases/modules/set_hostname.py
@@ -7,9 +7,11 @@ from tests.cloud_tests.testcases import base
 class TestHostname(base.CloudTestCase):
     """Test hostname module."""
 
+    ex_hostname = "cloudinit2"
+
     def test_hostname(self):
         """Test hostname command shows correct output."""
         out = self.get_data_file('hostname')
-        self.assertIn('myhostname', out)
+        self.assertIn(self.ex_hostname, out)
 
 # vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/set_hostname.yaml b/tests/cloud_tests/testcases/modules/set_hostname.yaml
index c96344c..071fb22 100644
--- a/tests/cloud_tests/testcases/modules/set_hostname.yaml
+++ b/tests/cloud_tests/testcases/modules/set_hostname.yaml
@@ -5,7 +5,8 @@ required_features:
   - hostname
 cloud_config: |
   #cloud-config
-  hostname: myhostname
+  hostname: cloudinit2
+
 collect_scripts:
   hosts: |
     #!/bin/bash
diff --git a/tests/cloud_tests/testcases/modules/set_hostname_fqdn.py b/tests/cloud_tests/testcases/modules/set_hostname_fqdn.py
index 398f3d4..eb6f065 100644
--- a/tests/cloud_tests/testcases/modules/set_hostname_fqdn.py
+++ b/tests/cloud_tests/testcases/modules/set_hostname_fqdn.py
@@ -1,26 +1,31 @@
 # This file is part of cloud-init. See LICENSE file for license information.
 
 """cloud-init Integration Test Verify Script."""
+from tests.cloud_tests.instances.nocloudkvm import CI_DOMAIN
 from tests.cloud_tests.testcases import base
 
 
 class TestHostnameFqdn(base.CloudTestCase):
     """Test Hostname module."""
 
+    ex_hostname = "cloudinit1"
+    ex_fqdn = "cloudinit2." + CI_DOMAIN
+
     def test_hostname(self):
         """Test hostname output."""
         out = self.get_data_file('hostname')
-        self.assertIn('myhostname', out)
+        self.assertIn(self.ex_hostname, out)
 
     def test_hostname_fqdn(self):
         """Test hostname fqdn output."""
         out = self.get_data_file('fqdn')
-        self.assertIn('host.myorg.com', out)
+        self.assertIn(self.ex_fqdn, out)
 
     def test_hosts(self):
         """Test /etc/hosts file."""
         out = self.get_data_file('hosts')
-        self.assertIn('127.0.1.1 host.myorg.com myhostname', out)
+        self.assertIn('127.0.1.1 %s %s' % (self.ex_fqdn, self.ex_hostname),
+                      out)
         self.assertIn('127.0.0.1 localhost', out)
 
 # vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/set_hostname_fqdn.yaml b/tests/cloud_tests/testcases/modules/set_hostname_fqdn.yaml
index daf7593..a85ee79 100644
--- a/tests/cloud_tests/testcases/modules/set_hostname_fqdn.yaml
+++ b/tests/cloud_tests/testcases/modules/set_hostname_fqdn.yaml
@@ -6,8 +6,9 @@ required_features:
 cloud_config: |
   #cloud-config
   manage_etc_hosts: true
-  hostname: myhostname
-  fqdn: host.myorg.com
+  hostname: cloudinit1
+  # this needs changing if CI_DOMAIN were updated.
+  fqdn: cloudinit2.i9n.cloud-init.io
 collect_scripts:
   hosts: |
     #!/bin/bash
diff --git a/tests/cloud_tests/testcases/modules/set_password_expire.py b/tests/cloud_tests/testcases/modules/set_password_expire.py
index a1c3aa0..967aca7 100644
--- a/tests/cloud_tests/testcases/modules/set_password_expire.py
+++ b/tests/cloud_tests/testcases/modules/set_password_expire.py
@@ -18,6 +18,6 @@ class TestPasswordExpire(base.CloudTestCase):
     def test_sshd_config(self):
         """Test sshd config allows passwords."""
         out = self.get_data_file('sshd_config')
-        self.assertIn('PasswordAuthentication no', out)
+        self.assertIn('PasswordAuthentication yes', out)
 
 # vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/set_password_expire.yaml b/tests/cloud_tests/testcases/modules/set_password_expire.yaml
index 789604b..ba6344b 100644
--- a/tests/cloud_tests/testcases/modules/set_password_expire.yaml
+++ b/tests/cloud_tests/testcases/modules/set_password_expire.yaml
@@ -6,7 +6,9 @@ required_features:
 cloud_config: |
   #cloud-config
   chpasswd: { expire: True }
+  ssh_pwauth: yes
   users:
+    - default
     - name: tom
       password: $1$xyz$sPMsLNmf66Ohl.ol6JvzE.
       lock_passwd: false
diff --git a/tests/cloud_tests/testcases/modules/set_password_list.yaml b/tests/cloud_tests/testcases/modules/set_password_list.yaml
index a2a89c9..fd3e1e4 100644
--- a/tests/cloud_tests/testcases/modules/set_password_list.yaml
+++ b/tests/cloud_tests/testcases/modules/set_password_list.yaml
@@ -5,6 +5,7 @@ cloud_config: |
   #cloud-config
   ssh_pwauth: yes
   users:
+    - default
     - name: tom
       # md5 gotomgo
       passwd: "$1$S7$tT1BEDIYrczeryDQJfdPe0"
diff --git a/tests/cloud_tests/testcases/modules/set_password_list_string.yaml b/tests/cloud_tests/testcases/modules/set_password_list_string.yaml
index c2a0f63..e9fe54b 100644
--- a/tests/cloud_tests/testcases/modules/set_password_list_string.yaml
+++ b/tests/cloud_tests/testcases/modules/set_password_list_string.yaml
@@ -5,6 +5,7 @@ cloud_config: |
   #cloud-config
   ssh_pwauth: yes
   users:
+    - default
     - name: tom
       # md5 gotomgo
       passwd: "$1$S7$tT1BEDIYrczeryDQJfdPe0"
diff --git a/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.py b/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.py
index 8222321..e7329d4 100644
--- a/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.py
+++ b/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.py
@@ -13,12 +13,4 @@ class TestSshKeyFingerprintsDisable(base.CloudTestCase):
         self.assertIn('Skipping module named ssh-authkey-fingerprints, '
                       'logging of ssh fingerprints disabled', out)
 
-    def test_syslog(self):
-        """Verify output of syslog."""
-        out = self.get_data_file('syslog')
-        self.assertNotRegex(out, r'256 SHA256:.*(ECDSA)')
-        self.assertNotRegex(out, r'256 SHA256:.*(ED25519)')
-        self.assertNotRegex(out, r'1024 SHA256:.*(DSA)')
-        self.assertNotRegex(out, r'2048 SHA256:.*(RSA)')
-
 # vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.yaml b/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.yaml
index 746653e..d93893e 100644
--- a/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.yaml
+++ b/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.yaml
@@ -5,7 +5,6 @@ required_features:
   - syslog
 cloud_config: |
   #cloud-config
-  ssh_genkeytypes: []
   no_ssh_fingerprints: true
 collect_scripts:
   syslog: |
diff --git a/tests/cloud_tests/testcases/modules/ssh_keys_generate.py b/tests/cloud_tests/testcases/modules/ssh_keys_generate.py
index fd6d9ba..b68f556 100644
--- a/tests/cloud_tests/testcases/modules/ssh_keys_generate.py
+++ b/tests/cloud_tests/testcases/modules/ssh_keys_generate.py
@@ -9,11 +9,6 @@ class TestSshKeysGenerate(base.CloudTestCase):
 
     # TODO: Check cloud-init-output for the correct keys being generated
 
-    def test_ubuntu_authorized_keys(self):
-        """Test passed in key is not in list for ubuntu."""
-        out = self.get_data_file('auth_keys_ubuntu')
-        self.assertEqual('', out)
-
     def test_dsa_public(self):
         """Test dsa public key not generated."""
         out = self.get_data_file('dsa_public')
diff --git a/tests/cloud_tests/testcases/modules/ssh_keys_generate.yaml b/tests/cloud_tests/testcases/modules/ssh_keys_generate.yaml
index 659fd93..0a7adf6 100644
--- a/tests/cloud_tests/testcases/modules/ssh_keys_generate.yaml
+++ b/tests/cloud_tests/testcases/modules/ssh_keys_generate.yaml
@@ -10,12 +10,6 @@ cloud_config: |
   - ed25519
   authkey_hash: sha512
 collect_scripts:
-  auth_keys_root: |
-    #!/bin/bash
-    cat /root/.ssh/authorized_keys
-  auth_keys_ubuntu: |
-    #!/bin/bash
-    cat /home/ubuntu/ssh/authorized_keys
   dsa_public: |
     #!/bin/bash
     cat /etc/ssh/ssh_host_dsa_key.pub
diff --git a/tests/cloud_tests/testcases/modules/ssh_keys_provided.py b/tests/cloud_tests/testcases/modules/ssh_keys_provided.py
index 544649d..add3f46 100644
--- a/tests/cloud_tests/testcases/modules/ssh_keys_provided.py
+++ b/tests/cloud_tests/testcases/modules/ssh_keys_provided.py
@@ -7,17 +7,6 @@ from tests.cloud_tests.testcases import base
 class TestSshKeysProvided(base.CloudTestCase):
     """Test ssh keys module."""
 
-    def test_ubuntu_authorized_keys(self):
-        """Test passed in key is not in list for ubuntu."""
-        out = self.get_data_file('auth_keys_ubuntu')
-        self.assertEqual('', out)
-
-    def test_root_authorized_keys(self):
-        """Test passed in key is in authorized list for root."""
-        out = self.get_data_file('auth_keys_root')
-        self.assertIn('lzrkPqONphoZx0LDV86w7RUz1ksDzAdcm0tvmNRFMN1a0frDs50'
-                      '6oA3aWK0oDk4Nmvk8sXGTYYw3iQSkOvDUUlIsqdaO+w==', out)
-
     def test_dsa_public(self):
         """Test dsa public key passed in."""
         out = self.get_data_file('dsa_public')
diff --git a/tests/cloud_tests/testcases/modules/ssh_keys_provided.yaml b/tests/cloud_tests/testcases/modules/ssh_keys_provided.yaml
index 5ceb362..41f6355 100644
--- a/tests/cloud_tests/testcases/modules/ssh_keys_provided.yaml
+++ b/tests/cloud_tests/testcases/modules/ssh_keys_provided.yaml
@@ -71,12 +71,6 @@ cloud_config: |
       -----END EC PRIVATE KEY-----
     ecdsa_public: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFsS5Tvky/IC/dXhE/afxxUG6kdQOvdQJCYGZN42OZqWasYF+L3IG+3/wrV7jOrNrL3AyagHl6+lpPDiSXDMcpQ= root@xenial-lxd
 collect_scripts:
-  auth_keys_root: |
-    #!/bin/bash
-    cat /root/.ssh/authorized_keys
-  auth_keys_ubuntu: |
-    #!/bin/bash
-    cat /home/ubuntu/ssh/authorized_keys
   dsa_public: |
     #!/bin/bash
     cat /etc/ssh/ssh_host_dsa_key.pub
diff --git a/tests/cloud_tests/util.py b/tests/cloud_tests/util.py
index 4357fbb..c5cd697 100644
--- a/tests/cloud_tests/util.py
+++ b/tests/cloud_tests/util.py
@@ -7,6 +7,7 @@ import copy
 import glob
 import os
 import random
+import shlex
 import shutil
 import string
 import subprocess
@@ -285,20 +286,165 @@ def shell_pack(cmd):
     return 'eval set -- "$(echo %s | base64 --decode)" && exec "$@"' % b64
 
 
+def shell_quote(cmd):
+    if isinstance(cmd, (tuple, list)):
+        return ' '.join([shlex.quote(x) for x in cmd])
+    return shlex.quote(cmd)
+
+
+class TargetBase(object):
+    _tmp_count = 0
+
+    def execute(self, command, stdin=None, env=None,
+                rcs=None, description=None):
+        """Execute command in instance, recording output, error and exit code.
+
+        Assumes functional networking and execution as root with the
+        target filesystem being available at /.
+
+        @param command: the command to execute as root inside the image
+            if command is a string, then it will be executed as:
+            ['sh', '-c', command]
+        @param stdin: bytes content for standard in
+        @param env: environment variables
+        @param rcs: return codes.
+                    None (default): non-zero exit code will raise exception.
+                    False: any is allowed (No execption raised).
+                    list of int: any rc not in the list will raise exception.
+        @param description: purpose of command
+        @return_value: tuple containing stdout data, stderr data, exit code
+        """
+        if isinstance(command, str):
+            command = ['sh', '-c', command]
+
+        if rcs is None:
+            rcs = (0,)
+
+        if description:
+            LOG.debug('Executing "%s"', description)
+        else:
+            LOG.debug("Executing command: %s", shell_quote(command))
+
+        out, err, rc = self._execute(command=command, stdin=stdin, env=env)
+
+        # False means accept anything.
+        if (rcs is False or rc in rcs):
+            return out, err, rc
+
+        raise InTargetExecuteError(out, err, rc, command, description)
+
+    def _execute(self, command, stdin=None, env=None):
+        """Execute command in inside, return stdout, stderr and exit code.
+
+        Assumes functional networking and execution as root with the
+        target filesystem being available at /.
+
+        @param stdin: bytes content for standard in
+        @param env: environment variables
+        @return_value: tuple containing stdout data, stderr data, exit code
+
+        This is intended to be implemented by the Image or Instance.
+        Many callers will use the higher level 'execute'."""
+        raise NotImplementedError("_execute must be implemented by subclass.")
+
+    def read_data(self, remote_path, decode=False):
+        """Read data from instance filesystem.
+
+        @param remote_path: path in instance
+        @param decode: decode data before returning.
+        @return_value: content of remote_path as bytes if 'decode' is False,
+                       and as string if 'decode' is True.
+        """
+        # when sh is invoked with '-c', then the first argument is "$0"
+        # which is commonly understood as the "program name".
+        # 'read_data' is the program name, and 'remote_path' is '$1'
+        stdout, stderr, rc = self._execute(
+            ["sh", "-c", 'exec cat "$1"', 'read_data', remote_path])
+        if rc != 0:
+            raise RuntimeError("Failed to read file '%s'" % remote_path)
+
+        if decode:
+            return stdout.decode()
+        return stdout
+
+    def write_data(self, remote_path, data):
+        """Write data to instance filesystem.
+
+        @param remote_path: path in instance
+        @param data: data to write in bytes
+        """
+        # when sh is invoked with '-c', then the first argument is "$0"
+        # which is commonly understood as the "program name".
+        # 'write_data' is the program name, and 'remote_path' is '$1'
+        _, _, rc = self._execute(
+            ["sh", "-c", 'exec cat >"$1"', 'write_data', remote_path],
+            stdin=data)
+
+        if rc != 0:
+            raise RuntimeError("Failed to write to '%s'" % remote_path)
+        return
+
+    def pull_file(self, remote_path, local_path):
+        """Copy file at 'remote_path', from instance to 'local_path'.
+
+        @param remote_path: path on remote instance
+        @param local_path: path on local instance
+        """
+        with open(local_path, 'wb') as fp:
+            fp.write(self.read_data(remote_path))
+
+    def push_file(self, local_path, remote_path):
+        """Copy file at 'local_path' to instance at 'remote_path'.
+
+        @param local_path: path on local instance
+        @param remote_path: path on remote instance"""
+        with open(local_path, "rb") as fp:
+            self.write_data(remote_path, data=fp.read())
+
+    def run_script(self, script, rcs=None, description=None):
+        """Run script in target and return stdout.
+
+        @param script: script contents
+        @param rcs: allowed return codes from script
+        @param description: purpose of script
+        @return_value: stdout from script
+        """
+        # Just write to a file, add execute, run it, then remove it.
+        shblob = '; '.join((
+            'set -e',
+            's="$1"',
+            'shift',
+            'cat > "$s"',
+            'trap "rm -f $s" EXIT',
+            'chmod +x "$s"',
+            '"$s" "$@"'))
+        return self.execute(
+            ['sh', '-c', shblob, 'runscript', self.tmpfile()],
+            stdin=script, description=description, rcs=rcs)
+
+    def tmpfile(self):
+        """Get a tmp file in the target.
+
+        @return_value: path to new file in target
+        """
+        path = "/tmp/%s-%04d" % (type(self).__name__, self._tmp_count)
+        self._tmp_count += 1
+        return path
+
+
 class InTargetExecuteError(c_util.ProcessExecutionError):
     """Error type for in target commands that fail."""
 
-    default_desc = 'Unexpected error while running command in target instance'
+    default_desc = 'Unexpected error while running command.'
 
-    def __init__(self, stdout, stderr, exit_code, cmd, instance,
-                 description=None):
+    def __init__(self, stdout, stderr, exit_code, cmd, description=None,
+                 reason=None):
         """Init error and parent error class."""
-        if isinstance(cmd, (tuple, list)):
-            cmd = ' '.join(cmd)
         super(InTargetExecuteError, self).__init__(
-            stdout=stdout, stderr=stderr, exit_code=exit_code, cmd=cmd,
-            reason="Instance: {}".format(instance),
-            description=description if description else self.default_desc)
+            stdout=stdout, stderr=stderr, exit_code=exit_code,
+            cmd=shell_quote(cmd),
+            description=description if description else self.default_desc,
+            reason=reason)
 
 
 class TempDir(object):
diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py
index 6d621d2..275b16d 100644
--- a/tests/unittests/test_data.py
+++ b/tests/unittests/test_data.py
@@ -18,6 +18,8 @@ from email.mime.application import MIMEApplication
 from email.mime.base import MIMEBase
 from email.mime.multipart import MIMEMultipart
 
+import httpretty
+
 from cloudinit import handlers
 from cloudinit import helpers as c_helpers
 from cloudinit import log
@@ -522,6 +524,54 @@ c: 4
         self.assertEqual(cfg.get('password'), 'gocubs')
         self.assertEqual(cfg.get('locale'), 'chicago')
 
+    @httpretty.activate
+    @mock.patch('cloudinit.url_helper.time.sleep')
+    def test_include(self, mock_sleep):
+        """Test #include."""
+        included_url = 'http://hostname/path'
+        included_data = '#cloud-config\nincluded: true\n'
+        httpretty.register_uri(httpretty.GET, included_url, included_data)
+
+        blob = '#include\n%s\n' % included_url
+
+        self.reRoot()
+        ci = stages.Init()
+        ci.datasource = FakeDataSource(blob)
+        ci.fetch()
+        ci.consume_data()
+        cc_contents = util.load_file(ci.paths.get_ipath("cloud_config"))
+        cc = util.load_yaml(cc_contents)
+        self.assertTrue(cc.get('included'))
+
+    @httpretty.activate
+    @mock.patch('cloudinit.url_helper.time.sleep')
+    def test_include_bad_url(self, mock_sleep):
+        """Test #include with a bad URL."""
+        bad_url = 'http://bad/forbidden'
+        bad_data = '#cloud-config\nbad: true\n'
+        httpretty.register_uri(httpretty.GET, bad_url, bad_data, status=403)
+
+        included_url = 'http://hostname/path'
+        included_data = '#cloud-config\nincluded: true\n'
+        httpretty.register_uri(httpretty.GET, included_url, included_data)
+
+        blob = '#include\n%s\n%s' % (bad_url, included_url)
+
+        self.reRoot()
+        ci = stages.Init()
+        ci.datasource = FakeDataSource(blob)
+        log_file = self.capture_log(logging.WARNING)
+        ci.fetch()
+        ci.consume_data()
+
+        self.assertIn("403 Client Error: Forbidden for url: %s" % bad_url,
+                      log_file.getvalue())
+
+        cc_contents = util.load_file(ci.paths.get_ipath("cloud_config"))
+        cc = util.load_yaml(cc_contents)
+        self.assertIsNone(cc.get('bad'))
+        self.assertTrue(cc.get('included'))
+
 
 class TestUDProcess(helpers.ResourceUsingTestCase):
 
diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py
index 6af699a..ba328ee 100644
--- a/tests/unittests/test_datasource/test_ec2.py
+++ b/tests/unittests/test_datasource/test_ec2.py
@@ -307,6 +307,39 @@ class TestEc2(test_helpers.HttprettyTestCase):
 
     @httpretty.activate
     @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
+    def test_network_config_cached_property_refreshed_on_upgrade(self, m_dhcp):
+        """Refresh the network_config Ec2 cache if network key is absent.
+
+        This catches an upgrade issue where obj.pkl contained stale metadata
+        which lacked newly required network key.
+        """
+        old_metadata = copy.deepcopy(DEFAULT_METADATA)
+        old_metadata.pop('network')
+        ds = self._setup_ds(
+            platform_data=self.valid_platform_data,
+            sys_cfg={'datasource': {'Ec2': {'strict_id': True}}},
+            md=old_metadata)
+        self.assertTrue(ds.get_data())
+        # Provide new revision of metadata that contains network data
+        register_mock_metaserver(
+            'http://169.254.169.254/2009-04-04/meta-data/', DEFAULT_METADATA)
+        mac1 = '06:17:04:d7:26:09'  # Defined in DEFAULT_METADATA
+        get_interface_mac_path = (
+            'cloudinit.sources.DataSourceEc2.net.get_interface_mac')
+        ds.fallback_nic = 'eth9'
+        with mock.patch(get_interface_mac_path) as m_get_interface_mac:
+            m_get_interface_mac.return_value = mac1
+            ds.network_config  # Will re-crawl network metadata
+        self.assertIn('Re-crawl of metadata service', self.logs.getvalue())
+        expected = {'version': 1, 'config': [
+            {'mac_address': '06:17:04:d7:26:09',
+             'name': 'eth9',
+             'subnets': [{'type': 'dhcp4'}, {'type': 'dhcp6'}],
+             'type': 'physical'}]}
+        self.assertEqual(expected, ds.network_config)
+
+    @httpretty.activate
+    @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
     def test_valid_platform_with_strict_true(self, m_dhcp):
         """Valid platform data should return true with strict_id true."""
         ds = self._setup_ds(
diff --git a/tests/unittests/test_handler/test_handler_etc_hosts.py b/tests/unittests/test_handler/test_handler_etc_hosts.py
new file mode 100644
index 0000000..ced05a8
--- /dev/null
+++ b/tests/unittests/test_handler/test_handler_etc_hosts.py
@@ -0,0 +1,69 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit.config import cc_update_etc_hosts
+
+from cloudinit import cloud
+from cloudinit import distros
+from cloudinit import helpers
+from cloudinit import util
+
+from cloudinit.tests import helpers as t_help
+
+import logging
+import os
+import shutil
+
+LOG = logging.getLogger(__name__)
+
+
+class TestHostsFile(t_help.FilesystemMockingTestCase):
+    def setUp(self):
+        super(TestHostsFile, self).setUp()
+        self.tmp = self.tmp_dir()
+
+    def _fetch_distro(self, kind):
+        cls = distros.fetch(kind)
+        paths = helpers.Paths({})
+        return cls(kind, {}, paths)
+
+    def test_write_etc_hosts_suse_localhost(self):
+        cfg = {
+            'manage_etc_hosts': 'localhost',
+            'hostname': 'cloud-init.test.us'
+        }
+        os.makedirs('%s/etc/' % self.tmp)
+        hosts_content = '192.168.1.1 blah.blah.us blah\n'
+        fout = open('%s/etc/hosts' % self.tmp, 'w')
+        fout.write(hosts_content)
+        fout.close()
+        distro = self._fetch_distro('sles')
+        distro.hosts_fn = '%s/etc/hosts' % self.tmp
+        paths = helpers.Paths({})
+        ds = None
+        cc = cloud.Cloud(ds, paths, {}, distro, None)
+        self.patchUtils(self.tmp)
+        cc_update_etc_hosts.handle('test', cfg, cc, LOG, [])
+        contents = util.load_file('%s/etc/hosts' % self.tmp)
+        if '127.0.0.1\tcloud-init.test.us\tcloud-init' not in contents:
+            self.assertIsNone('No entry for 127.0.0.1 in etc/hosts')
+        if '192.168.1.1\tblah.blah.us\tblah' not in contents:
+            self.assertIsNone('Default etc/hosts content modified')
+
+    def test_write_etc_hosts_suse_template(self):
+        cfg = {
+            'manage_etc_hosts': 'template',
+            'hostname': 'cloud-init.test.us'
+        }
+        shutil.copytree('templates', '%s/etc/cloud/templates' % self.tmp)
+        distro = self._fetch_distro('sles')
+        paths = helpers.Paths({})
+        paths.template_tpl = '%s' % self.tmp + '/etc/cloud/templates/%s.tmpl'
+        ds = None
+        cc = cloud.Cloud(ds, paths, {}, distro, None)
+        self.patchUtils(self.tmp)
+        cc_update_etc_hosts.handle('test', cfg, cc, LOG, [])
+        contents = util.load_file('%s/etc/hosts' % self.tmp)
+        if '127.0.0.1 cloud-init.test.us cloud-init' not in contents:
+            self.assertIsNone('No entry for 127.0.0.1 in etc/hosts')
+        if '::1 cloud-init.test.us cloud-init' not in contents:
+            self.assertIsNone('No entry for 127.0.0.1 in etc/hosts')
diff --git a/tests/unittests/test_handler/test_handler_ntp.py b/tests/unittests/test_handler/test_handler_ntp.py
index 3abe578..28a8455 100644
--- a/tests/unittests/test_handler/test_handler_ntp.py
+++ b/tests/unittests/test_handler/test_handler_ntp.py
@@ -430,5 +430,31 @@ class TestNtp(FilesystemMockingTestCase):
             "[Time]\nNTP=192.168.2.1 192.168.2.2 0.mypool.org \n",
             content.decode())
 
+    def test_write_ntp_config_template_defaults_pools_empty_lists_sles(self):
+        """write_ntp_config_template defaults pools servers upon empty config.
+
+        When both pools and servers are empty, default NR_POOL_SERVERS get
+        configured.
+        """
+        distro = 'sles'
+        mycloud = self._get_cloud(distro)
+        ntp_conf = self.tmp_path('ntp.conf', self.new_root)  # Doesn't exist
+        # Create ntp.conf.tmpl
+        with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream:
+            stream.write(NTP_TEMPLATE)
+        with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf):
+            cc_ntp.write_ntp_config_template({}, mycloud, ntp_conf)
+        content = util.read_file_or_url('file://' + ntp_conf).contents
+        default_pools = [
+            "{0}.opensuse.pool.ntp.org".format(x)
+            for x in range(0, cc_ntp.NR_POOL_SERVERS)]
+        self.assertEqual(
+            "servers []\npools {0}\n".format(default_pools),
+            content.decode())
+        self.assertIn(
+            "Adding distro default ntp pool servers: {0}".format(
+                ",".join(default_pools)),
+            self.logs.getvalue())
+
 
 # vi: ts=4 expandtab
diff --git a/tests/unittests/test_rh_subscription.py b/tests/unittests/test_rh_subscription.py
index e9d5702..2271810 100644
--- a/tests/unittests/test_rh_subscription.py
+++ b/tests/unittests/test_rh_subscription.py
@@ -2,6 +2,7 @@
 
 """Tests for registering RHEL subscription via rh_subscription."""
 
+import copy
 import logging
 
 from cloudinit.config import cc_rh_subscription
@@ -68,6 +69,20 @@ class GoodTests(TestCase):
         self.assertEqual(self.SM.log_success.call_count, 1)
         self.assertEqual(self.SM._sub_man_cli.call_count, 2)
 
+    @mock.patch.object(cc_rh_subscription.SubscriptionManager, "_getRepos")
+    @mock.patch.object(cc_rh_subscription.SubscriptionManager, "_sub_man_cli")
+    def test_update_repos_disable_with_none(self, m_sub_man_cli, m_get_repos):
+        cfg = copy.deepcopy(self.config)
+        m_get_repos.return_value = ([], ['repo1'])
+        m_sub_man_cli.return_value = (b'', b'')
+        cfg['rh_subscription'].update(
+            {'enable-repo': ['repo1'], 'disable-repo': None})
+        mysm = cc_rh_subscription.SubscriptionManager(cfg)
+        self.assertEqual(True, mysm.update_repos())
+        m_get_repos.assert_called_with()
+        self.assertEqual(m_sub_man_cli.call_args_list,
+                         [mock.call(['repos', '--enable=repo1'])])
+
     def test_full_registration(self):
         '''
         Registration with auto-attach, service-level, adding pools,

Follow ups