← Back to team overview

cloud-init-dev team mailing list archive

[Merge] ~chad.smith/cloud-init:ubuntu/devel into cloud-init:ubuntu/devel

 

Chad Smith has proposed merging ~chad.smith/cloud-init:ubuntu/devel into cloud-init:ubuntu/devel.

Commit message:
new upstream snapshot for upload into Disco. Note changed series represented in debian/changelog

Requested reviews:
  cloud-init commiters (cloud-init-dev)
Related bugs:
  Bug #1797480 in cloud-init: "Azure: Support ephemeral disk handling on Gen2 VMs"
  https://bugs.launchpad.net/cloud-init/+bug/1797480
  Bug #1799338 in cloud-init: "cloud-init won't reformat NTFS ephemeral drive on SLES 15"
  https://bugs.launchpad.net/cloud-init/+bug/1799338
  Bug #1799594 in cloud-init: "Azure - Report ready during preprovisioning as soon as we get the ReprovisionData"
  https://bugs.launchpad.net/cloud-init/+bug/1799594

For more details, see:
https://code.launchpad.net/~chad.smith/cloud-init/+git/cloud-init/+merge/358684
-- 
Your team cloud-init commiters is requested to review the proposed merge of ~chad.smith/cloud-init:ubuntu/devel into cloud-init:ubuntu/devel.
diff --git a/cloudinit/cmd/devel/render.py b/cloudinit/cmd/devel/render.py
index 2ba6b68..1bc2240 100755
--- a/cloudinit/cmd/devel/render.py
+++ b/cloudinit/cmd/devel/render.py
@@ -8,11 +8,10 @@ import sys
 
 from cloudinit.handlers.jinja_template import render_jinja_payload_from_file
 from cloudinit import log
-from cloudinit.sources import INSTANCE_JSON_FILE
+from cloudinit.sources import INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE
 from . import addLogHandlerCLI, read_cfg_paths
 
 NAME = 'render'
-DEFAULT_INSTANCE_DATA = '/run/cloud-init/instance-data.json'
 
 LOG = log.getLogger(NAME)
 
@@ -47,12 +46,22 @@ def handle_args(name, args):
     @return 0 on success, 1 on failure.
     """
     addLogHandlerCLI(LOG, log.DEBUG if args.debug else log.WARNING)
-    if not args.instance_data:
-        paths = read_cfg_paths()
-        instance_data_fn = os.path.join(
-            paths.run_dir, INSTANCE_JSON_FILE)
-    else:
+    if args.instance_data:
         instance_data_fn = args.instance_data
+    else:
+        paths = read_cfg_paths()
+        uid = os.getuid()
+        redacted_data_fn = os.path.join(paths.run_dir, INSTANCE_JSON_FILE)
+        if uid == 0:
+            instance_data_fn = os.path.join(
+                paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE)
+            if not os.path.exists(instance_data_fn):
+                LOG.warning(
+                     'Missing root-readable %s. Using redacted %s instead.',
+                     instance_data_fn, redacted_data_fn)
+                instance_data_fn = redacted_data_fn
+        else:
+            instance_data_fn = redacted_data_fn
     if not os.path.exists(instance_data_fn):
         LOG.error('Missing instance-data.json file: %s', instance_data_fn)
         return 1
@@ -62,10 +71,14 @@ def handle_args(name, args):
     except IOError:
         LOG.error('Missing user-data file: %s', args.user_data)
         return 1
-    rendered_payload = render_jinja_payload_from_file(
-        payload=user_data, payload_fn=args.user_data,
-        instance_data_file=instance_data_fn,
-        debug=True if args.debug else False)
+    try:
+        rendered_payload = render_jinja_payload_from_file(
+            payload=user_data, payload_fn=args.user_data,
+            instance_data_file=instance_data_fn,
+            debug=True if args.debug else False)
+    except RuntimeError as e:
+        LOG.error('Cannot render from instance data: %s', str(e))
+        return 1
     if not rendered_payload:
         LOG.error('Unable to render user-data file: %s', args.user_data)
         return 1
diff --git a/cloudinit/cmd/devel/tests/test_render.py b/cloudinit/cmd/devel/tests/test_render.py
index fc5d2c0..988bba0 100644
--- a/cloudinit/cmd/devel/tests/test_render.py
+++ b/cloudinit/cmd/devel/tests/test_render.py
@@ -6,7 +6,7 @@ import os
 from collections import namedtuple
 from cloudinit.cmd.devel import render
 from cloudinit.helpers import Paths
-from cloudinit.sources import INSTANCE_JSON_FILE
+from cloudinit.sources import INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE
 from cloudinit.tests.helpers import CiTestCase, mock, skipUnlessJinja
 from cloudinit.util import ensure_dir, write_file
 
@@ -63,6 +63,49 @@ class TestRender(CiTestCase):
             'Missing instance-data.json file: %s' % json_file,
             self.logs.getvalue())
 
+    def test_handle_args_root_fallback_from_sensitive_instance_data(self):
+        """When root user defaults to sensitive.json."""
+        user_data = self.tmp_path('user-data', dir=self.tmp)
+        run_dir = self.tmp_path('run_dir', dir=self.tmp)
+        ensure_dir(run_dir)
+        paths = Paths({'run_dir': run_dir})
+        self.add_patch('cloudinit.cmd.devel.render.read_cfg_paths', 'm_paths')
+        self.m_paths.return_value = paths
+        args = self.args(
+            user_data=user_data, instance_data=None, debug=False)
+        with mock.patch('sys.stderr', new_callable=StringIO):
+            with mock.patch('os.getuid') as m_getuid:
+                m_getuid.return_value = 0
+                self.assertEqual(1, render.handle_args('anyname', args))
+        json_file = os.path.join(run_dir, INSTANCE_JSON_FILE)
+        json_sensitive = os.path.join(run_dir, INSTANCE_JSON_SENSITIVE_FILE)
+        self.assertIn(
+            'WARNING: Missing root-readable %s. Using redacted %s' % (
+                json_sensitive, json_file), self.logs.getvalue())
+        self.assertIn(
+            'ERROR: Missing instance-data.json file: %s' % json_file,
+            self.logs.getvalue())
+
+    def test_handle_args_root_uses_sensitive_instance_data(self):
+        """When root user, and no instance-data arg, use sensitive.json."""
+        user_data = self.tmp_path('user-data', dir=self.tmp)
+        write_file(user_data, '##template: jinja\nrendering: {{ my_var }}')
+        run_dir = self.tmp_path('run_dir', dir=self.tmp)
+        ensure_dir(run_dir)
+        json_sensitive = os.path.join(run_dir, INSTANCE_JSON_SENSITIVE_FILE)
+        write_file(json_sensitive, '{"my-var": "jinja worked"}')
+        paths = Paths({'run_dir': run_dir})
+        self.add_patch('cloudinit.cmd.devel.render.read_cfg_paths', 'm_paths')
+        self.m_paths.return_value = paths
+        args = self.args(
+            user_data=user_data, instance_data=None, debug=False)
+        with mock.patch('sys.stderr', new_callable=StringIO):
+            with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
+                with mock.patch('os.getuid') as m_getuid:
+                    m_getuid.return_value = 0
+                    self.assertEqual(0, render.handle_args('anyname', args))
+        self.assertIn('rendering: jinja worked', m_stdout.getvalue())
+
     @skipUnlessJinja()
     def test_handle_args_renders_instance_data_vars_in_template(self):
         """If user_data file is a jinja template render instance-data vars."""
diff --git a/cloudinit/cmd/query.py b/cloudinit/cmd/query.py
index 7d2d4fe..1d888b9 100644
--- a/cloudinit/cmd/query.py
+++ b/cloudinit/cmd/query.py
@@ -3,6 +3,7 @@
 """Query standardized instance metadata from the command line."""
 
 import argparse
+from errno import EACCES
 import os
 import six
 import sys
@@ -79,27 +80,38 @@ def handle_args(name, args):
     uid = os.getuid()
     if not all([args.instance_data, args.user_data, args.vendor_data]):
         paths = read_cfg_paths()
-    if not args.instance_data:
+    if args.instance_data:
+        instance_data_fn = args.instance_data
+    else:
+        redacted_data_fn = os.path.join(paths.run_dir, INSTANCE_JSON_FILE)
         if uid == 0:
-            default_json_fn = INSTANCE_JSON_SENSITIVE_FILE
+            sensitive_data_fn = os.path.join(
+                paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE)
+            if os.path.exists(sensitive_data_fn):
+                instance_data_fn = sensitive_data_fn
+            else:
+                LOG.warning(
+                     'Missing root-readable %s. Using redacted %s instead.',
+                     sensitive_data_fn, redacted_data_fn)
+                instance_data_fn = redacted_data_fn
         else:
-            default_json_fn = INSTANCE_JSON_FILE  # World readable
-        instance_data_fn = os.path.join(paths.run_dir, default_json_fn)
+            instance_data_fn = redacted_data_fn
+    if args.user_data:
+        user_data_fn = args.user_data
     else:
-        instance_data_fn = args.instance_data
-    if not args.user_data:
         user_data_fn = os.path.join(paths.instance_link, 'user-data.txt')
+    if args.vendor_data:
+        vendor_data_fn = args.vendor_data
     else:
-        user_data_fn = args.user_data
-    if not args.vendor_data:
         vendor_data_fn = os.path.join(paths.instance_link, 'vendor-data.txt')
-    else:
-        vendor_data_fn = args.vendor_data
 
     try:
         instance_json = util.load_file(instance_data_fn)
-    except IOError:
-        LOG.error('Missing instance-data.json file: %s', instance_data_fn)
+    except (IOError, OSError) as e:
+        if e.errno == EACCES:
+            LOG.error("No read permission on '%s'. Try sudo", instance_data_fn)
+        else:
+            LOG.error('Missing instance-data file: %s', instance_data_fn)
         return 1
 
     instance_data = util.load_json(instance_json)
diff --git a/cloudinit/cmd/tests/test_query.py b/cloudinit/cmd/tests/test_query.py
index fb87c6a..28738b1 100644
--- a/cloudinit/cmd/tests/test_query.py
+++ b/cloudinit/cmd/tests/test_query.py
@@ -1,5 +1,6 @@
 # This file is part of cloud-init. See LICENSE file for license information.
 
+import errno
 from six import StringIO
 from textwrap import dedent
 import os
@@ -7,7 +8,8 @@ import os
 from collections import namedtuple
 from cloudinit.cmd import query
 from cloudinit.helpers import Paths
-from cloudinit.sources import REDACT_SENSITIVE_VALUE, INSTANCE_JSON_FILE
+from cloudinit.sources import (
+    REDACT_SENSITIVE_VALUE, INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE)
 from cloudinit.tests.helpers import CiTestCase, mock
 from cloudinit.util import ensure_dir, write_file
 
@@ -50,10 +52,28 @@ class TestQuery(CiTestCase):
         with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
             self.assertEqual(1, query.handle_args('anyname', args))
         self.assertIn(
-            'ERROR: Missing instance-data.json file: %s' % absent_fn,
+            'ERROR: Missing instance-data file: %s' % absent_fn,
             self.logs.getvalue())
         self.assertIn(
-            'ERROR: Missing instance-data.json file: %s' % absent_fn,
+            'ERROR: Missing instance-data file: %s' % absent_fn,
+            m_stderr.getvalue())
+
+    def test_handle_args_error_when_no_read_permission_instance_data(self):
+        """When instance_data file is unreadable, log an error."""
+        noread_fn = self.tmp_path('unreadable', dir=self.tmp)
+        write_file(noread_fn, 'thou shall not pass')
+        args = self.args(
+            debug=False, dump_all=True, format=None, instance_data=noread_fn,
+            list_keys=False, user_data='ud', vendor_data='vd', varname=None)
+        with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
+            with mock.patch('cloudinit.cmd.query.util.load_file') as m_load:
+                m_load.side_effect = OSError(errno.EACCES, 'Not allowed')
+                self.assertEqual(1, query.handle_args('anyname', args))
+        self.assertIn(
+            "ERROR: No read permission on '%s'. Try sudo" % noread_fn,
+            self.logs.getvalue())
+        self.assertIn(
+            "ERROR: No read permission on '%s'. Try sudo" % noread_fn,
             m_stderr.getvalue())
 
     def test_handle_args_defaults_instance_data(self):
@@ -70,12 +90,58 @@ class TestQuery(CiTestCase):
             self.assertEqual(1, query.handle_args('anyname', args))
         json_file = os.path.join(run_dir, INSTANCE_JSON_FILE)
         self.assertIn(
-            'ERROR: Missing instance-data.json file: %s' % json_file,
+            'ERROR: Missing instance-data file: %s' % json_file,
             self.logs.getvalue())
         self.assertIn(
-            'ERROR: Missing instance-data.json file: %s' % json_file,
+            'ERROR: Missing instance-data file: %s' % json_file,
             m_stderr.getvalue())
 
+    def test_handle_args_root_fallsback_to_instance_data(self):
+        """When no instance_data argument, root falls back to redacted json."""
+        args = self.args(
+            debug=False, dump_all=True, format=None, instance_data=None,
+            list_keys=False, user_data=None, vendor_data=None, varname=None)
+        run_dir = self.tmp_path('run_dir', dir=self.tmp)
+        ensure_dir(run_dir)
+        paths = Paths({'run_dir': run_dir})
+        self.add_patch('cloudinit.cmd.query.read_cfg_paths', 'm_paths')
+        self.m_paths.return_value = paths
+        with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
+            with mock.patch('os.getuid') as m_getuid:
+                m_getuid.return_value = 0
+                self.assertEqual(1, query.handle_args('anyname', args))
+        json_file = os.path.join(run_dir, INSTANCE_JSON_FILE)
+        sensitive_file = os.path.join(run_dir, INSTANCE_JSON_SENSITIVE_FILE)
+        self.assertIn(
+            'WARNING: Missing root-readable %s. Using redacted %s instead.' % (
+                sensitive_file, json_file),
+            m_stderr.getvalue())
+
+    def test_handle_args_root_uses_instance_sensitive_data(self):
+        """When no instance_data argument, root uses semsitive json."""
+        user_data = self.tmp_path('user-data', dir=self.tmp)
+        vendor_data = self.tmp_path('vendor-data', dir=self.tmp)
+        write_file(user_data, 'ud')
+        write_file(vendor_data, 'vd')
+        run_dir = self.tmp_path('run_dir', dir=self.tmp)
+        sensitive_file = os.path.join(run_dir, INSTANCE_JSON_SENSITIVE_FILE)
+        write_file(sensitive_file, '{"my-var": "it worked"}')
+        ensure_dir(run_dir)
+        paths = Paths({'run_dir': run_dir})
+        self.add_patch('cloudinit.cmd.query.read_cfg_paths', 'm_paths')
+        self.m_paths.return_value = paths
+        args = self.args(
+            debug=False, dump_all=True, format=None, instance_data=None,
+            list_keys=False, user_data=vendor_data, vendor_data=vendor_data,
+            varname=None)
+        with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
+            with mock.patch('os.getuid') as m_getuid:
+                m_getuid.return_value = 0
+                self.assertEqual(0, query.handle_args('anyname', args))
+        self.assertEqual(
+            '{\n "my_var": "it worked",\n "userdata": "vd",\n '
+            '"vendordata": "vd"\n}\n', m_stdout.getvalue())
+
     def test_handle_args_dumps_all_instance_data(self):
         """When --all is specified query will dump all instance data vars."""
         write_file(self.instance_data, '{"my-var": "it worked"}')
diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index 943089e..29e192e 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -743,7 +743,7 @@ def assert_and_settle_device(device):
         util.udevadm_settle()
         if not os.path.exists(device):
             raise RuntimeError("Device %s did not exist and was not created "
-                               "with a udevamd settle." % device)
+                               "with a udevadm settle." % device)
 
     # Whether or not the device existed above, it is possible that udev
     # events that would populate udev database (for reading by lsdname) have
diff --git a/cloudinit/handlers/jinja_template.py b/cloudinit/handlers/jinja_template.py
index 3fa4097..ce3accf 100644
--- a/cloudinit/handlers/jinja_template.py
+++ b/cloudinit/handlers/jinja_template.py
@@ -1,5 +1,6 @@
 # This file is part of cloud-init. See LICENSE file for license information.
 
+from errno import EACCES
 import os
 import re
 
@@ -76,7 +77,14 @@ def render_jinja_payload_from_file(
         raise RuntimeError(
             'Cannot render jinja template vars. Instance data not yet'
             ' present at %s' % instance_data_file)
-    instance_data = load_json(load_file(instance_data_file))
+    try:
+        instance_data = load_json(load_file(instance_data_file))
+    except (IOError, OSError) as e:
+        if e.errno == EACCES:
+            raise RuntimeError(
+                'Cannot render jinja template vars. No read permission on'
+                " '%s'. Try sudo" % instance_data_file)
+
     rendered_payload = render_jinja_payload(
         payload, payload_fn, instance_data, debug)
     if not rendered_payload:
diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py
index 12cf509..bdc5799 100644
--- a/cloudinit/net/dhcp.py
+++ b/cloudinit/net/dhcp.py
@@ -40,34 +40,56 @@ class EphemeralDHCPv4(object):
     def __init__(self, iface=None):
         self.iface = iface
         self._ephipv4 = None
+        self.lease = None
 
     def __enter__(self):
+        """Setup sandboxed dhcp context."""
+        return self.obtain_lease()
+
+    def __exit__(self, excp_type, excp_value, excp_traceback):
+        """Teardown sandboxed dhcp context."""
+        self.clean_network()
+
+    def clean_network(self):
+        """Exit _ephipv4 context to teardown of ip configuration performed."""
+        if self.lease:
+            self.lease = None
+        if not self._ephipv4:
+            return
+        self._ephipv4.__exit__(None, None, None)
+
+    def obtain_lease(self):
+        """Perform dhcp discovery in a sandboxed environment if possible.
+
+        @return: A dict representing dhcp options on the most recent lease
+            obtained from the dhclient discovery if run, otherwise an error
+            is raised.
+
+        @raises: NoDHCPLeaseError if no leases could be obtained.
+        """
+        if self.lease:
+            return self.lease
         try:
             leases = maybe_perform_dhcp_discovery(self.iface)
         except InvalidDHCPLeaseFileError:
             raise NoDHCPLeaseError()
         if not leases:
             raise NoDHCPLeaseError()
-        lease = leases[-1]
+        self.lease = leases[-1]
         LOG.debug("Received dhcp lease on %s for %s/%s",
-                  lease['interface'], lease['fixed-address'],
-                  lease['subnet-mask'])
+                  self.lease['interface'], self.lease['fixed-address'],
+                  self.lease['subnet-mask'])
         nmap = {'interface': 'interface', 'ip': 'fixed-address',
                 'prefix_or_mask': 'subnet-mask',
                 'broadcast': 'broadcast-address',
                 'router': 'routers'}
-        kwargs = dict([(k, lease.get(v)) for k, v in nmap.items()])
+        kwargs = dict([(k, self.lease.get(v)) for k, v in nmap.items()])
         if not kwargs['broadcast']:
             kwargs['broadcast'] = bcip(kwargs['prefix_or_mask'], kwargs['ip'])
         ephipv4 = EphemeralIPv4Network(**kwargs)
         ephipv4.__enter__()
         self._ephipv4 = ephipv4
-        return lease
-
-    def __exit__(self, excp_type, excp_value, excp_traceback):
-        if not self._ephipv4:
-            return
-        self._ephipv4.__exit__(excp_type, excp_value, excp_traceback)
+        return self.lease
 
 
 def maybe_perform_dhcp_discovery(nic=None):
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 39391d0..9e8a1a8 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -22,7 +22,7 @@ from cloudinit.event import EventType
 from cloudinit.net.dhcp import EphemeralDHCPv4
 from cloudinit import sources
 from cloudinit.sources.helpers.azure import get_metadata_from_fabric
-from cloudinit.url_helper import readurl, UrlError
+from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc
 from cloudinit import util
 
 LOG = logging.getLogger(__name__)
@@ -57,7 +57,7 @@ IMDS_URL = "http://169.254.169.254/metadata/";
 # List of static scripts and network config artifacts created by
 # stock ubuntu suported images.
 UBUNTU_EXTENDED_NETWORK_SCRIPTS = [
-    '/etc/netplan/90-azure-hotplug.yaml',
+    '/etc/netplan/90-hotplug-azure.yaml',
     '/usr/local/sbin/ephemeral_eth.sh',
     '/etc/udev/rules.d/10-net-device-added.rules',
     '/run/network/interfaces.ephemeral.d',
@@ -207,7 +207,9 @@ BUILTIN_DS_CONFIG = {
     },
     'disk_aliases': {'ephemeral0': RESOURCE_DISK_PATH},
     'dhclient_lease_file': LEASE_FILE,
+    'apply_network_config': True,  # Use IMDS published network configuration
 }
+# RELEASE_BLOCKER: Xenial and earlier apply_network_config default is False
 
 BUILTIN_CLOUD_CONFIG = {
     'disk_setup': {
@@ -278,6 +280,7 @@ class DataSourceAzure(sources.DataSource):
         self._network_config = None
         # Regenerate network config new_instance boot and every boot
         self.update_events['network'].add(EventType.BOOT)
+        self._ephemeral_dhcp_ctx = None
 
     def __str__(self):
         root = sources.DataSource.__str__(self)
@@ -404,7 +407,8 @@ class DataSourceAzure(sources.DataSource):
                 LOG.warning("%s was not mountable", cdev)
                 continue
 
-            if reprovision or self._should_reprovision(ret):
+            perform_reprovision = reprovision or self._should_reprovision(ret)
+            if perform_reprovision:
                 ret = self._reprovision()
             imds_md = get_metadata_from_imds(
                 self.fallback_interface, retries=3)
@@ -432,6 +436,18 @@ class DataSourceAzure(sources.DataSource):
             crawled_data['metadata']['random_seed'] = seed
         crawled_data['metadata']['instance-id'] = util.read_dmi_data(
             'system-uuid')
+
+        if perform_reprovision:
+            LOG.info("Reporting ready to Azure after getting ReprovisionData")
+            use_cached_ephemeral = (net.is_up(self.fallback_interface) and
+                                    getattr(self, '_ephemeral_dhcp_ctx', None))
+            if use_cached_ephemeral:
+                self._report_ready(lease=self._ephemeral_dhcp_ctx.lease)
+                self._ephemeral_dhcp_ctx.clean_network()  # Teardown ephemeral
+            else:
+                with EphemeralDHCPv4() as lease:
+                    self._report_ready(lease=lease)
+
         return crawled_data
 
     def _is_platform_viable(self):
@@ -458,7 +474,8 @@ class DataSourceAzure(sources.DataSource):
         except sources.InvalidMetaDataException as e:
             LOG.warning('Could not crawl Azure metadata: %s', e)
             return False
-        if self.distro and self.distro.name == 'ubuntu':
+        if (self.distro and self.distro.name == 'ubuntu' and
+                self.ds_cfg.get('apply_network_config')):
             maybe_remove_ubuntu_network_config_scripts()
 
         # Process crawled data and augment with various config defaults
@@ -509,32 +526,29 @@ class DataSourceAzure(sources.DataSource):
         report_ready = bool(not os.path.isfile(REPORTED_READY_MARKER_FILE))
         LOG.debug("Start polling IMDS")
 
-        def exc_cb(msg, exception):
-            if isinstance(exception, UrlError) and exception.code == 404:
-                return True
-            # If we get an exception while trying to call IMDS, we
-            # call DHCP and setup the ephemeral network to acquire the new IP.
-            return False
-
         while True:
             try:
-                with EphemeralDHCPv4() as lease:
-                    if report_ready:
-                        path = REPORTED_READY_MARKER_FILE
-                        LOG.info(
-                            "Creating a marker file to report ready: %s", path)
-                        util.write_file(path, "{pid}: {time}\n".format(
-                            pid=os.getpid(), time=time()))
-                        self._report_ready(lease=lease)
-                        report_ready = False
-                    return readurl(url, timeout=1, headers=headers,
-                                   exception_cb=exc_cb, infinite=True).contents
+                # Save our EphemeralDHCPv4 context so we avoid repeated dhcp
+                self._ephemeral_dhcp_ctx = EphemeralDHCPv4()
+                lease = self._ephemeral_dhcp_ctx.obtain_lease()
+                if report_ready:
+                    path = REPORTED_READY_MARKER_FILE
+                    LOG.info(
+                        "Creating a marker file to report ready: %s", path)
+                    util.write_file(path, "{pid}: {time}\n".format(
+                        pid=os.getpid(), time=time()))
+                    self._report_ready(lease=lease)
+                    report_ready = False
+                return readurl(url, timeout=1, headers=headers,
+                               exception_cb=retry_on_url_exc, infinite=True,
+                               log_req_resp=False).contents
             except UrlError:
+                # Teardown our EphemeralDHCPv4 context on failure as we retry
+                self._ephemeral_dhcp_ctx.clean_network()
                 pass
 
     def _report_ready(self, lease):
-        """Tells the fabric provisioning has completed
-           before we go into our polling loop."""
+        """Tells the fabric provisioning has completed """
         try:
             get_metadata_from_fabric(None, lease['unknown-245'])
         except Exception:
@@ -619,7 +633,11 @@ class DataSourceAzure(sources.DataSource):
               the blacklisted devices.
         """
         if not self._network_config:
-            self._network_config = parse_network_config(self._metadata_imds)
+            if self.ds_cfg.get('apply_network_config'):
+                nc_src = self._metadata_imds
+            else:
+                nc_src = None
+            self._network_config = parse_network_config(nc_src)
         return self._network_config
 
 
@@ -700,7 +718,7 @@ def can_dev_be_reformatted(devpath, preserve_ntfs):
         file_count = util.mount_cb(cand_path, count_files, mtype="ntfs",
                                    update_env_for_mount={'LANG': 'C'})
     except util.MountFailedError as e:
-        if "mount: unknown filesystem type 'ntfs'" in str(e):
+        if "unknown filesystem type 'ntfs'" in str(e):
             return True, (bmsg + ' but this system cannot mount NTFS,'
                           ' assuming there are no important files.'
                           ' Formatting allowed.')
@@ -1162,17 +1180,12 @@ def get_metadata_from_imds(fallback_nic, retries):
 
 def _get_metadata_from_imds(retries):
 
-    def retry_on_url_error(msg, exception):
-        if isinstance(exception, UrlError) and exception.code == 404:
-            return True  # Continue retries
-        return False  # Stop retries on all other exceptions
-
     url = IMDS_URL + "instance?api-version=2017-12-01"
     headers = {"Metadata": "true"}
     try:
         response = readurl(
             url, timeout=1, headers=headers, retries=retries,
-            exception_cb=retry_on_url_error)
+            exception_cb=retry_on_url_exc)
     except Exception as e:
         LOG.debug('Ignoring IMDS instance metadata: %s', e)
         return {}
@@ -1195,7 +1208,7 @@ def maybe_remove_ubuntu_network_config_scripts(paths=None):
     additional interfaces which get attached by a customer at some point
     after initial boot. Since the Azure datasource can now regenerate
     network configuration as metadata reports these new devices, we no longer
-    want the udev rules or netplan's 90-azure-hotplug.yaml to configure
+    want the udev rules or netplan's 90-hotplug-azure.yaml to configure
     networking on eth1 or greater as it might collide with cloud-init's
     configuration.
 
diff --git a/cloudinit/tests/test_url_helper.py b/cloudinit/tests/test_url_helper.py
index 113249d..aa9f3ec 100644
--- a/cloudinit/tests/test_url_helper.py
+++ b/cloudinit/tests/test_url_helper.py
@@ -1,10 +1,12 @@
 # This file is part of cloud-init. See LICENSE file for license information.
 
-from cloudinit.url_helper import oauth_headers, read_file_or_url
+from cloudinit.url_helper import (
+    NOT_FOUND, UrlError, oauth_headers, read_file_or_url, retry_on_url_exc)
 from cloudinit.tests.helpers import CiTestCase, mock, skipIf
 from cloudinit import util
 
 import httpretty
+import requests
 
 
 try:
@@ -64,3 +66,24 @@ class TestReadFileOrUrl(CiTestCase):
         result = read_file_or_url(url)
         self.assertEqual(result.contents, data)
         self.assertEqual(str(result), data.decode('utf-8'))
+
+
+class TestRetryOnUrlExc(CiTestCase):
+
+    def test_do_not_retry_non_urlerror(self):
+        """When exception is not UrlError return False."""
+        myerror = IOError('something unexcpected')
+        self.assertFalse(retry_on_url_exc(msg='', exc=myerror))
+
+    def test_perform_retries_on_not_found(self):
+        """When exception is UrlError with a 404 status code return True."""
+        myerror = UrlError(cause=RuntimeError(
+            'something was not found'), code=NOT_FOUND)
+        self.assertTrue(retry_on_url_exc(msg='', exc=myerror))
+
+    def test_perform_retries_on_timeout(self):
+        """When exception is a requests.Timout return True."""
+        myerror = UrlError(cause=requests.Timeout('something timed out'))
+        self.assertTrue(retry_on_url_exc(msg='', exc=myerror))
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py
index 749a384..e3d2dba 100644
--- a/cloudinit/tests/test_util.py
+++ b/cloudinit/tests/test_util.py
@@ -18,25 +18,51 @@ MOUNT_INFO = [
 ]
 
 OS_RELEASE_SLES = dedent("""\
-    NAME="SLES"\n
-    VERSION="12-SP3"\n
-    VERSION_ID="12.3"\n
-    PRETTY_NAME="SUSE Linux Enterprise Server 12 SP3"\n
-    ID="sles"\nANSI_COLOR="0;32"\n
-    CPE_NAME="cpe:/o:suse:sles:12:sp3"\n
+    NAME="SLES"
+    VERSION="12-SP3"
+    VERSION_ID="12.3"
+    PRETTY_NAME="SUSE Linux Enterprise Server 12 SP3"
+    ID="sles"
+    ANSI_COLOR="0;32"
+    CPE_NAME="cpe:/o:suse:sles:12:sp3"
 """)
 
 OS_RELEASE_OPENSUSE = dedent("""\
-NAME="openSUSE Leap"
-VERSION="42.3"
-ID=opensuse
-ID_LIKE="suse"
-VERSION_ID="42.3"
-PRETTY_NAME="openSUSE Leap 42.3"
-ANSI_COLOR="0;32"
-CPE_NAME="cpe:/o:opensuse:leap:42.3"
-BUG_REPORT_URL="https://bugs.opensuse.org";
-HOME_URL="https://www.opensuse.org/";
+    NAME="openSUSE Leap"
+    VERSION="42.3"
+    ID=opensuse
+    ID_LIKE="suse"
+    VERSION_ID="42.3"
+    PRETTY_NAME="openSUSE Leap 42.3"
+    ANSI_COLOR="0;32"
+    CPE_NAME="cpe:/o:opensuse:leap:42.3"
+    BUG_REPORT_URL="https://bugs.opensuse.org";
+    HOME_URL="https://www.opensuse.org/";
+""")
+
+OS_RELEASE_OPENSUSE_L15 = dedent("""\
+    NAME="openSUSE Leap"
+    VERSION="15.0"
+    ID="opensuse-leap"
+    ID_LIKE="suse opensuse"
+    VERSION_ID="15.0"
+    PRETTY_NAME="openSUSE Leap 15.0"
+    ANSI_COLOR="0;32"
+    CPE_NAME="cpe:/o:opensuse:leap:15.0"
+    BUG_REPORT_URL="https://bugs.opensuse.org";
+    HOME_URL="https://www.opensuse.org/";
+""")
+
+OS_RELEASE_OPENSUSE_TW = dedent("""\
+    NAME="openSUSE Tumbleweed"
+    ID="opensuse-tumbleweed"
+    ID_LIKE="opensuse suse"
+    VERSION_ID="20180920"
+    PRETTY_NAME="openSUSE Tumbleweed"
+    ANSI_COLOR="0;32"
+    CPE_NAME="cpe:/o:opensuse:tumbleweed:20180920"
+    BUG_REPORT_URL="https://bugs.opensuse.org";
+    HOME_URL="https://www.opensuse.org/";
 """)
 
 OS_RELEASE_CENTOS = dedent("""\
@@ -447,12 +473,35 @@ class TestGetLinuxDistro(CiTestCase):
 
     @mock.patch('cloudinit.util.load_file')
     def test_get_linux_opensuse(self, m_os_release, m_path_exists):
-        """Verify we get the correct name and machine arch on OpenSUSE."""
+        """Verify we get the correct name and machine arch on openSUSE
+           prior to openSUSE Leap 15.
+        """
         m_os_release.return_value = OS_RELEASE_OPENSUSE
         m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
         dist = util.get_linux_distro()
         self.assertEqual(('opensuse', '42.3', platform.machine()), dist)
 
+    @mock.patch('cloudinit.util.load_file')
+    def test_get_linux_opensuse_l15(self, m_os_release, m_path_exists):
+        """Verify we get the correct name and machine arch on openSUSE
+           for openSUSE Leap 15.0 and later.
+        """
+        m_os_release.return_value = OS_RELEASE_OPENSUSE_L15
+        m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+        dist = util.get_linux_distro()
+        self.assertEqual(('opensuse-leap', '15.0', platform.machine()), dist)
+
+    @mock.patch('cloudinit.util.load_file')
+    def test_get_linux_opensuse_tw(self, m_os_release, m_path_exists):
+        """Verify we get the correct name and machine arch on openSUSE
+           for openSUSE Tumbleweed
+        """
+        m_os_release.return_value = OS_RELEASE_OPENSUSE_TW
+        m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+        dist = util.get_linux_distro()
+        self.assertEqual(
+            ('opensuse-tumbleweed', '20180920', platform.machine()), dist)
+
     @mock.patch('platform.dist')
     def test_get_linux_distro_no_data(self, m_platform_dist, m_path_exists):
         """Verify we get no information if os-release does not exist"""
diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py
index 8067979..396d69a 100644
--- a/cloudinit/url_helper.py
+++ b/cloudinit/url_helper.py
@@ -199,7 +199,7 @@ def _get_ssl_args(url, ssl_details):
 def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
             headers=None, headers_cb=None, ssl_details=None,
             check_status=True, allow_redirects=True, exception_cb=None,
-            session=None, infinite=False):
+            session=None, infinite=False, log_req_resp=True):
     url = _cleanurl(url)
     req_args = {
         'url': url,
@@ -256,9 +256,11 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
                 continue
             filtered_req_args[k] = v
         try:
-            LOG.debug("[%s/%s] open '%s' with %s configuration", i,
-                      "infinite" if infinite else manual_tries, url,
-                      filtered_req_args)
+
+            if log_req_resp:
+                LOG.debug("[%s/%s] open '%s' with %s configuration", i,
+                          "infinite" if infinite else manual_tries, url,
+                          filtered_req_args)
 
             if session is None:
                 session = requests.Session()
@@ -294,8 +296,11 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
                 break
             if (infinite and sec_between > 0) or \
                (i + 1 < manual_tries and sec_between > 0):
-                LOG.debug("Please wait %s seconds while we wait to try again",
-                          sec_between)
+
+                if log_req_resp:
+                    LOG.debug(
+                        "Please wait %s seconds while we wait to try again",
+                        sec_between)
                 time.sleep(sec_between)
     if excps:
         raise excps[-1]
@@ -549,4 +554,18 @@ def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret,
     _uri, signed_headers, _body = client.sign(url)
     return signed_headers
 
+
+def retry_on_url_exc(msg, exc):
+    """readurl exception_cb that will retry on NOT_FOUND and Timeout.
+
+    Returns False to raise the exception from readurl, True to retry.
+    """
+    if not isinstance(exc, UrlError):
+        return False
+    if exc.code == NOT_FOUND:
+        return True
+    if exc.cause and isinstance(exc.cause, requests.Timeout):
+        return True
+    return False
+
 # vi: ts=4 expandtab
diff --git a/cloudinit/util.py b/cloudinit/util.py
index c67d6be..7800f7b 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -615,8 +615,8 @@ def get_linux_distro():
         distro_name = os_release.get('ID', '')
         distro_version = os_release.get('VERSION_ID', '')
         if 'sles' in distro_name or 'suse' in distro_name:
-            # RELEASE_BLOCKER: We will drop this sles ivergent behavior in
-            # before 18.4 so that get_linux_distro returns a named tuple
+            # RELEASE_BLOCKER: We will drop this sles divergent behavior in
+            # the future so that get_linux_distro returns a named tuple
             # which will include both version codename and architecture
             # on all distributions.
             flavor = platform.machine()
@@ -668,7 +668,8 @@ def system_info():
             var = 'ubuntu'
         elif linux_dist == 'redhat':
             var = 'rhel'
-        elif linux_dist in ('opensuse', 'sles'):
+        elif linux_dist in (
+                'opensuse', 'opensuse-tumbleweed', 'opensuse-leap', 'sles'):
             var = 'suse'
         else:
             var = 'linux'
diff --git a/debian/changelog b/debian/changelog
index 117fd16..9c0b65f 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,31 @@
+cloud-init (18.4-22-g6062595b-0ubuntu1) disco; urgency=medium
+
+  * New upstream snapshot.
+    - azure: retry imds polling on requests.Timeout (LP: LP:1800223)
+    - azure: Accept variation in error msg from mount for ntfs volumes
+      [Jason Zions] (LP: #1799338)
+    - azure: fix regression introduced when persisting ephemeral dhcp lease
+      [Aswin Rajamannar]
+    - azure: add udev rules to create cloud-init Gen2 disk name symlinks
+      (LP: #1797480)
+    - tests: ec2 mock missing httpretty user-data and instance-identity routes
+    - azure: remove /etc/netplan/90-hotplug-azure.yaml when net from IMDS
+    - azure: report ready to fabric after reprovision and reduce logging
+      [Aswin Rajamannar] (LP: #1799594)
+    - query: better error when missing read permission on instance-data
+    - instance-data: fallback to instance-data.json if sensitive is absent.
+      (LP: #1798189)
+    - docs: remove colon from network v1 config example. [Tomer Cohen]
+    - Add cloud-id binary to packages for SUSE [Jason Zions]
+    - systemd: On SUSE ensure cloud-init.service runs before wicked
+      [Robert Schweikert] (LP: #1799709)
+    - update detection of openSUSE variants [Robert Schweikert]
+    - azure: Add apply_network_config option to disable network from IMDS
+      (LP: #1798424)
+    - Correct spelling in an error message (udevadm). [Katie McLaughlin]
+
+ -- Chad Smith <chad.smith@xxxxxxxxxxxxx>  Mon, 12 Nov 2018 20:33:12 -0700
+
 cloud-init (18.4-7-g4652b196-0ubuntu1) cosmic; urgency=medium
 
   * New upstream snapshot.
diff --git a/doc/rtd/topics/datasources/azure.rst b/doc/rtd/topics/datasources/azure.rst
index 559011e..f73c369 100644
--- a/doc/rtd/topics/datasources/azure.rst
+++ b/doc/rtd/topics/datasources/azure.rst
@@ -57,6 +57,52 @@ in order to use waagent.conf with cloud-init, the following settings are recomme
    ResourceDisk.MountPoint=/mnt
 
 
+Configuration
+-------------
+The following configuration can be set for the datasource in system
+configuration (in `/etc/cloud/cloud.cfg` or `/etc/cloud/cloud.cfg.d/`).
+
+The settings that may be configured are:
+
+ * **agent_command**: Either __builtin__ (default) or a command to run to getcw
+   metadata. If __builtin__, get metadata from walinuxagent. Otherwise run the
+   provided command to obtain metadata.
+ * **apply_network_config**: Boolean set to True to use network configuration
+   described by Azure's IMDS endpoint instead of fallback network config of
+   dhcp on eth0. Default is True. For Ubuntu 16.04 or earlier, default is False.
+ * **data_dir**: Path used to read metadata files and write crawled data.
+ * **dhclient_lease_file**: The fallback lease file to source when looking for
+   custom DHCP option 245 from Azure fabric.
+ * **disk_aliases**: A dictionary defining which device paths should be
+   interpreted as ephemeral images. See cc_disk_setup module for more info.
+ * **hostname_bounce**: A dictionary Azure hostname bounce behavior to react to
+   metadata changes.
+ * **hostname_bounce**: A dictionary Azure hostname bounce behavior to react to
+   metadata changes. Azure will throttle ifup/down in some cases after metadata
+   has been updated to inform dhcp server about updated hostnames.
+ * **set_hostname**: Boolean set to True when we want Azure to set the hostname
+   based on metadata.
+
+An example configuration with the default values is provided below:
+
+.. sourcecode:: yaml
+
+  datasource:
+   Azure:
+    agent_command: __builtin__
+    apply_network_config: true
+    data_dir: /var/lib/waagent
+    dhclient_lease_file: /var/lib/dhcp/dhclient.eth0.leases
+    disk_aliases:
+        ephemeral0: /dev/disk/cloud/azure_resource
+    hostname_bounce:
+        interface: eth0
+        command: builtin
+        policy: true
+        hostname_command: hostname
+    set_hostname: true
+
+
 Userdata
 --------
 Userdata is provided to cloud-init inside the ovf-env.xml file. Cloud-init
diff --git a/packages/redhat/cloud-init.spec.in b/packages/redhat/cloud-init.spec.in
index a3a6d1e..6b2022b 100644
--- a/packages/redhat/cloud-init.spec.in
+++ b/packages/redhat/cloud-init.spec.in
@@ -191,6 +191,7 @@ fi
 
 # Program binaries
 %{_bindir}/cloud-init*
+%{_bindir}/cloud-id*
 
 # Docs
 %doc LICENSE ChangeLog TODO.rst requirements.txt
diff --git a/packages/suse/cloud-init.spec.in b/packages/suse/cloud-init.spec.in
index e781d74..26894b3 100644
--- a/packages/suse/cloud-init.spec.in
+++ b/packages/suse/cloud-init.spec.in
@@ -93,6 +93,7 @@ version_pys=$(cd "%{buildroot}" && find . -name version.py -type f)
 
 # Program binaries
 %{_bindir}/cloud-init*
+%{_bindir}/cloud-id*
 
 # systemd files
 /usr/lib/systemd/system-generators/*
diff --git a/systemd/cloud-init.service.tmpl b/systemd/cloud-init.service.tmpl
index b92e8ab..5cb0037 100644
--- a/systemd/cloud-init.service.tmpl
+++ b/systemd/cloud-init.service.tmpl
@@ -14,8 +14,7 @@ After=networking.service
 After=network.service
 {% endif %}
 {% if variant in ["suse"] %}
-Requires=wicked.service
-After=wicked.service
+Before=wicked.service
 # setting hostname via hostnamectl depends on dbus, which otherwise
 # would not be guaranteed at this point.
 After=dbus.service
diff --git a/tests/unittests/test_builtin_handlers.py b/tests/unittests/test_builtin_handlers.py
index abe820e..b92ffc7 100644
--- a/tests/unittests/test_builtin_handlers.py
+++ b/tests/unittests/test_builtin_handlers.py
@@ -3,6 +3,7 @@
 """Tests of the built-in user data handlers."""
 
 import copy
+import errno
 import os
 import shutil
 import tempfile
@@ -202,6 +203,30 @@ class TestJinjaTemplatePartHandler(CiTestCase):
             os.path.exists(script_file),
             'Unexpected file created %s' % script_file)
 
+    def test_jinja_template_handle_errors_on_unreadable_instance_data(self):
+        """If instance-data is unreadable, raise an error from handle_part."""
+        script_handler = ShellScriptPartHandler(self.paths)
+        instance_json = os.path.join(self.run_dir, 'instance-data.json')
+        util.write_file(instance_json, util.json_dumps({}))
+        h = JinjaTemplatePartHandler(
+            self.paths, sub_handlers=[script_handler])
+        with mock.patch(self.mpath + 'load_file') as m_load:
+            with self.assertRaises(RuntimeError) as context_manager:
+                m_load.side_effect = OSError(errno.EACCES, 'Not allowed')
+                h.handle_part(
+                    data='data', ctype="!" + handlers.CONTENT_START,
+                    filename='part01',
+                    payload='## template: jinja  \n#!/bin/bash\necho himom',
+                    frequency='freq', headers='headers')
+        script_file = os.path.join(script_handler.script_dir, 'part01')
+        self.assertEqual(
+            'Cannot render jinja template vars. No read permission on'
+            " '{rdir}/instance-data.json'. Try sudo".format(rdir=self.run_dir),
+            str(context_manager.exception))
+        self.assertFalse(
+            os.path.exists(script_file),
+            'Unexpected file created %s' % script_file)
+
     @skipUnlessJinja()
     def test_jinja_template_handle_renders_jinja_content(self):
         """When present, render jinja variables from instance-data.json."""
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index 0f4b7bf..56484b2 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -17,6 +17,7 @@ import crypt
 import httpretty
 import json
 import os
+import requests
 import stat
 import xml.etree.ElementTree as ET
 import yaml
@@ -184,6 +185,35 @@ class TestGetMetadataFromIMDS(HttprettyTestCase):
             "Crawl of Azure Instance Metadata Service (IMDS) took",  # log_time
             self.logs.getvalue())
 
+    @mock.patch('requests.Session.request')
+    @mock.patch('cloudinit.url_helper.time.sleep')
+    @mock.patch(MOCKPATH + 'net.is_up')
+    def test_get_metadata_from_imds_retries_on_timeout(
+            self, m_net_is_up, m_sleep, m_request):
+        """Retry IMDS network metadata on timeout errors."""
+
+        self.attempt = 0
+        m_request.side_effect = requests.Timeout('Fake Connection Timeout')
+
+        def retry_callback(request, uri, headers):
+            self.attempt += 1
+            raise requests.Timeout('Fake connection timeout')
+
+        httpretty.register_uri(
+            httpretty.GET,
+            dsaz.IMDS_URL + 'instance?api-version=2017-12-01',
+            body=retry_callback)
+
+        m_net_is_up.return_value = True  # skips dhcp
+
+        self.assertEqual({}, dsaz.get_metadata_from_imds('eth9', retries=3))
+
+        m_net_is_up.assert_called_with('eth9')
+        self.assertEqual([mock.call(1)]*3, m_sleep.call_args_list)
+        self.assertIn(
+            "Crawl of Azure Instance Metadata Service (IMDS) took",  # log_time
+            self.logs.getvalue())
+
 
 class TestAzureDataSource(CiTestCase):
 
@@ -256,7 +286,8 @@ scbus-1 on xpt0 bus 0
         ])
         return dsaz
 
-    def _get_ds(self, data, agent_command=None, distro=None):
+    def _get_ds(self, data, agent_command=None, distro=None,
+                apply_network=None):
 
         def dsdevs():
             return data.get('dsdevs', [])
@@ -312,6 +343,8 @@ scbus-1 on xpt0 bus 0
             data.get('sys_cfg', {}), distro=distro, paths=self.paths)
         if agent_command is not None:
             dsrc.ds_cfg['agent_command'] = agent_command
+        if apply_network is not None:
+            dsrc.ds_cfg['apply_network_config'] = apply_network
 
         return dsrc
 
@@ -434,14 +467,26 @@ fdescfs            /dev/fd          fdescfs rw              0 0
 
     def test_get_data_on_ubuntu_will_remove_network_scripts(self):
         """get_data will remove ubuntu net scripts on Ubuntu distro."""
+        sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
         odata = {'HostName': "myhost", 'UserName': "myuser"}
         data = {'ovfcontent': construct_valid_ovf_env(data=odata),
-                'sys_cfg': {}}
+                'sys_cfg': sys_cfg}
 
         dsrc = self._get_ds(data, distro='ubuntu')
         dsrc.get_data()
         self.m_remove_ubuntu_network_scripts.assert_called_once_with()
 
+    def test_get_data_on_ubuntu_will_not_remove_network_scripts_disabled(self):
+        """When apply_network_config false, do not remove scripts on Ubuntu."""
+        sys_cfg = {'datasource': {'Azure': {'apply_network_config': False}}}
+        odata = {'HostName': "myhost", 'UserName': "myuser"}
+        data = {'ovfcontent': construct_valid_ovf_env(data=odata),
+                'sys_cfg': sys_cfg}
+
+        dsrc = self._get_ds(data, distro='ubuntu')
+        dsrc.get_data()
+        self.m_remove_ubuntu_network_scripts.assert_not_called()
+
     def test_crawl_metadata_returns_structured_data_and_caches_nothing(self):
         """Return all structured metadata and cache no class attributes."""
         yaml_cfg = "{agent_command: my_command}\n"
@@ -498,6 +543,58 @@ fdescfs            /dev/fd          fdescfs rw              0 0
             dsrc.crawl_metadata()
         self.assertEqual(str(cm.exception), error_msg)
 
+    @mock.patch('cloudinit.sources.DataSourceAzure.EphemeralDHCPv4')
+    @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file')
+    @mock.patch(
+        'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready')
+    @mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds')
+    def test_crawl_metadata_on_reprovision_reports_ready(
+                            self, poll_imds_func,
+                            report_ready_func,
+                            m_write, m_dhcp):
+        """If reprovisioning, report ready at the end"""
+        ovfenv = construct_valid_ovf_env(
+                            platform_settings={"PreprovisionedVm": "True"})
+
+        data = {'ovfcontent': ovfenv,
+                'sys_cfg': {}}
+        dsrc = self._get_ds(data)
+        poll_imds_func.return_value = ovfenv
+        dsrc.crawl_metadata()
+        self.assertEqual(1, report_ready_func.call_count)
+
+    @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file')
+    @mock.patch(
+        'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready')
+    @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network')
+    @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
+    @mock.patch('cloudinit.sources.DataSourceAzure.readurl')
+    def test_crawl_metadata_on_reprovision_reports_ready_using_lease(
+                            self, m_readurl, m_dhcp,
+                            m_net, report_ready_func,
+                            m_write):
+        """If reprovisioning, report ready using the obtained lease"""
+        ovfenv = construct_valid_ovf_env(
+                            platform_settings={"PreprovisionedVm": "True"})
+
+        data = {'ovfcontent': ovfenv,
+                'sys_cfg': {}}
+        dsrc = self._get_ds(data)
+
+        lease = {
+            'interface': 'eth9', 'fixed-address': '192.168.2.9',
+            'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
+            'unknown-245': '624c3620'}
+        m_dhcp.return_value = [lease]
+
+        reprovision_ovfenv = construct_valid_ovf_env()
+        m_readurl.return_value = url_helper.StringResponse(
+            reprovision_ovfenv.encode('utf-8'))
+
+        dsrc.crawl_metadata()
+        self.assertEqual(2, report_ready_func.call_count)
+        report_ready_func.assert_called_with(lease=lease)
+
     def test_waagent_d_has_0700_perms(self):
         # we expect /var/lib/waagent to be created 0700
         dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
@@ -523,8 +620,10 @@ fdescfs            /dev/fd          fdescfs rw              0 0
 
     def test_network_config_set_from_imds(self):
         """Datasource.network_config returns IMDS network data."""
+        sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
         odata = {}
-        data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
+        data = {'ovfcontent': construct_valid_ovf_env(data=odata),
+                'sys_cfg': sys_cfg}
         expected_network_config = {
             'ethernets': {
                 'eth0': {'set-name': 'eth0',
@@ -803,9 +902,10 @@ fdescfs            /dev/fd          fdescfs rw              0 0
     @mock.patch('cloudinit.net.generate_fallback_config')
     def test_imds_network_config(self, mock_fallback):
         """Network config is generated from IMDS network data when present."""
+        sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
         odata = {'HostName': "myhost", 'UserName': "myuser"}
         data = {'ovfcontent': construct_valid_ovf_env(data=odata),
-                'sys_cfg': {}}
+                'sys_cfg': sys_cfg}
 
         dsrc = self._get_ds(data)
         ret = dsrc.get_data()
@@ -825,6 +925,36 @@ fdescfs            /dev/fd          fdescfs rw              0 0
     @mock.patch('cloudinit.net.get_devicelist')
     @mock.patch('cloudinit.net.device_driver')
     @mock.patch('cloudinit.net.generate_fallback_config')
+    def test_imds_network_ignored_when_apply_network_config_false(
+            self, mock_fallback, mock_dd, mock_devlist, mock_get_mac):
+        """When apply_network_config is False, use fallback instead of IMDS."""
+        sys_cfg = {'datasource': {'Azure': {'apply_network_config': False}}}
+        odata = {'HostName': "myhost", 'UserName': "myuser"}
+        data = {'ovfcontent': construct_valid_ovf_env(data=odata),
+                'sys_cfg': sys_cfg}
+        fallback_config = {
+            'version': 1,
+            'config': [{
+                'type': 'physical', 'name': 'eth0',
+                'mac_address': '00:11:22:33:44:55',
+                'params': {'driver': 'hv_netsvc'},
+                'subnets': [{'type': 'dhcp'}],
+            }]
+        }
+        mock_fallback.return_value = fallback_config
+
+        mock_devlist.return_value = ['eth0']
+        mock_dd.return_value = ['hv_netsvc']
+        mock_get_mac.return_value = '00:11:22:33:44:55'
+
+        dsrc = self._get_ds(data)
+        self.assertTrue(dsrc.get_data())
+        self.assertEqual(dsrc.network_config, fallback_config)
+
+    @mock.patch('cloudinit.net.get_interface_mac')
+    @mock.patch('cloudinit.net.get_devicelist')
+    @mock.patch('cloudinit.net.device_driver')
+    @mock.patch('cloudinit.net.generate_fallback_config')
     def test_fallback_network_config(self, mock_fallback, mock_dd,
                                      mock_devlist, mock_get_mac):
         """On absent IMDS network data, generate network fallback config."""
@@ -1411,21 +1541,20 @@ class TestCanDevBeReformatted(CiTestCase):
                     '/dev/sda1': {'num': 1, 'fs': 'ntfs', 'files': []}
                 }}})
 
-        err = ("Unexpected error while running command.\n",
-               "Command: ['mount', '-o', 'ro,sync', '-t', 'auto', ",
-               "'/dev/sda1', '/fake-tmp/dir']\n"
-               "Exit code: 32\n"
-               "Reason: -\n"
-               "Stdout: -\n"
-               "Stderr: mount: unknown filesystem type 'ntfs'")
-        self.m_mount_cb.side_effect = MountFailedError(
-            'Failed mounting %s to %s due to: %s' %
-            ('/dev/sda', '/fake-tmp/dir', err))
-
-        value, msg = dsaz.can_dev_be_reformatted('/dev/sda',
-                                                 preserve_ntfs=False)
-        self.assertTrue(value)
-        self.assertIn('cannot mount NTFS, assuming', msg)
+        error_msgs = [
+            "Stderr: mount: unknown filesystem type 'ntfs'",  # RHEL
+            "Stderr: mount: /dev/sdb1: unknown filesystem type 'ntfs'"  # SLES
+        ]
+
+        for err_msg in error_msgs:
+            self.m_mount_cb.side_effect = MountFailedError(
+                "Failed mounting %s to %s due to: \nUnexpected.\n%s" %
+                ('/dev/sda', '/fake-tmp/dir', err_msg))
+
+            value, msg = dsaz.can_dev_be_reformatted('/dev/sda',
+                                                     preserve_ntfs=False)
+            self.assertTrue(value)
+            self.assertIn('cannot mount NTFS, assuming', msg)
 
     def test_never_destroy_ntfs_config_false(self):
         """Normally formattable situation with never_destroy_ntfs set."""
diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py
index 9f81255..1a5956d 100644
--- a/tests/unittests/test_datasource/test_ec2.py
+++ b/tests/unittests/test_datasource/test_ec2.py
@@ -211,9 +211,9 @@ class TestEc2(test_helpers.HttprettyTestCase):
         self.metadata_addr = self.datasource.metadata_urls[0]
         self.tmp = self.tmp_dir()
 
-    def data_url(self, version):
+    def data_url(self, version, data_item='meta-data'):
         """Return a metadata url based on the version provided."""
-        return '/'.join([self.metadata_addr, version, 'meta-data', ''])
+        return '/'.join([self.metadata_addr, version, data_item])
 
     def _patch_add_cleanup(self, mpath, *args, **kwargs):
         p = mock.patch(mpath, *args, **kwargs)
@@ -238,10 +238,18 @@ class TestEc2(test_helpers.HttprettyTestCase):
             all_versions = (
                 [ds.min_metadata_version] + ds.extended_metadata_versions)
             for version in all_versions:
-                metadata_url = self.data_url(version)
+                metadata_url = self.data_url(version) + '/'
                 if version == md_version:
                     # Register all metadata for desired version
-                    register_mock_metaserver(metadata_url, md)
+                    register_mock_metaserver(
+                        metadata_url, md.get('md', DEFAULT_METADATA))
+                    userdata_url = self.data_url(
+                        version, data_item='user-data')
+                    register_mock_metaserver(userdata_url, md.get('ud', ''))
+                    identity_url = self.data_url(
+                        version, data_item='dynamic/instance-identity')
+                    register_mock_metaserver(
+                        identity_url, md.get('id', DYNAMIC_METADATA))
                 else:
                     instance_id_url = metadata_url + 'instance-id'
                     if version == ds.min_metadata_version:
@@ -261,7 +269,7 @@ class TestEc2(test_helpers.HttprettyTestCase):
         ds = self._setup_ds(
             platform_data=self.valid_platform_data,
             sys_cfg={'datasource': {'Ec2': {'strict_id': True}}},
-            md=DEFAULT_METADATA)
+            md={'md': DEFAULT_METADATA})
         find_fallback_path = (
             'cloudinit.sources.DataSourceEc2.net.find_fallback_nic')
         with mock.patch(find_fallback_path) as m_find_fallback:
@@ -293,7 +301,7 @@ class TestEc2(test_helpers.HttprettyTestCase):
         ds = self._setup_ds(
             platform_data=self.valid_platform_data,
             sys_cfg={'datasource': {'Ec2': {'strict_id': True}}},
-            md=DEFAULT_METADATA)
+            md={'md': DEFAULT_METADATA})
         find_fallback_path = (
             'cloudinit.sources.DataSourceEc2.net.find_fallback_nic')
         with mock.patch(find_fallback_path) as m_find_fallback:
@@ -322,7 +330,7 @@ class TestEc2(test_helpers.HttprettyTestCase):
         ds = self._setup_ds(
             platform_data=self.valid_platform_data,
             sys_cfg={'datasource': {'Ec2': {'strict_id': True}}},
-            md=DEFAULT_METADATA)
+            md={'md': DEFAULT_METADATA})
         ds._network_config = {'cached': 'data'}
         self.assertEqual({'cached': 'data'}, ds.network_config)
 
@@ -338,7 +346,7 @@ class TestEc2(test_helpers.HttprettyTestCase):
         ds = self._setup_ds(
             platform_data=self.valid_platform_data,
             sys_cfg={'datasource': {'Ec2': {'strict_id': True}}},
-            md=old_metadata)
+            md={'md': old_metadata})
         self.assertTrue(ds.get_data())
         # Provide new revision of metadata that contains network data
         register_mock_metaserver(
@@ -372,7 +380,7 @@ class TestEc2(test_helpers.HttprettyTestCase):
         ds = self._setup_ds(
             platform_data=self.valid_platform_data,
             sys_cfg={'datasource': {'Ec2': {'strict_id': False}}},
-            md=DEFAULT_METADATA)
+            md={'md': DEFAULT_METADATA})
         # Mock 404s on all versions except latest
         all_versions = (
             [ds.min_metadata_version] + ds.extended_metadata_versions)
@@ -399,7 +407,7 @@ class TestEc2(test_helpers.HttprettyTestCase):
         ds = self._setup_ds(
             platform_data=self.valid_platform_data,
             sys_cfg={'datasource': {'Ec2': {'strict_id': True}}},
-            md=DEFAULT_METADATA)
+            md={'md': DEFAULT_METADATA})
         ret = ds.get_data()
         self.assertTrue(ret)
         self.assertEqual(0, m_dhcp.call_count)
@@ -412,7 +420,7 @@ class TestEc2(test_helpers.HttprettyTestCase):
         ds = self._setup_ds(
             platform_data=self.valid_platform_data,
             sys_cfg={'datasource': {'Ec2': {'strict_id': False}}},
-            md=DEFAULT_METADATA)
+            md={'md': DEFAULT_METADATA})
         ret = ds.get_data()
         self.assertTrue(ret)
 
@@ -422,7 +430,7 @@ class TestEc2(test_helpers.HttprettyTestCase):
         ds = self._setup_ds(
             platform_data={'uuid': uuid, 'uuid_source': 'dmi', 'serial': ''},
             sys_cfg={'datasource': {'Ec2': {'strict_id': True}}},
-            md=DEFAULT_METADATA)
+            md={'md': DEFAULT_METADATA})
         ret = ds.get_data()
         self.assertFalse(ret)
 
@@ -432,7 +440,7 @@ class TestEc2(test_helpers.HttprettyTestCase):
         ds = self._setup_ds(
             platform_data={'uuid': uuid, 'uuid_source': 'dmi', 'serial': ''},
             sys_cfg={'datasource': {'Ec2': {'strict_id': False}}},
-            md=DEFAULT_METADATA)
+            md={'md': DEFAULT_METADATA})
         ret = ds.get_data()
         self.assertTrue(ret)
 
@@ -442,7 +450,7 @@ class TestEc2(test_helpers.HttprettyTestCase):
         ds = self._setup_ds(
             platform_data=self.valid_platform_data,
             sys_cfg={'datasource': {'Ec2': {'strict_id': False}}},
-            md=DEFAULT_METADATA)
+            md={'md': DEFAULT_METADATA})
         platform_attrs = [
             attr for attr in ec2.CloudNames.__dict__.keys()
             if not attr.startswith('__')]
@@ -469,7 +477,7 @@ class TestEc2(test_helpers.HttprettyTestCase):
         ds = self._setup_ds(
             platform_data=self.valid_platform_data,
             sys_cfg={'datasource': {'Ec2': {'strict_id': False}}},
-            md=DEFAULT_METADATA)
+            md={'md': DEFAULT_METADATA})
         ret = ds.get_data()
         self.assertFalse(ret)
         self.assertIn(
@@ -499,7 +507,7 @@ class TestEc2(test_helpers.HttprettyTestCase):
         ds = self._setup_ds(
             platform_data=self.valid_platform_data,
             sys_cfg={'datasource': {'Ec2': {'strict_id': False}}},
-            md=DEFAULT_METADATA)
+            md={'md': DEFAULT_METADATA})
 
         ret = ds.get_data()
         self.assertTrue(ret)
diff --git a/udev/66-azure-ephemeral.rules b/udev/66-azure-ephemeral.rules
index b9c5c3e..3032f7e 100644
--- a/udev/66-azure-ephemeral.rules
+++ b/udev/66-azure-ephemeral.rules
@@ -4,10 +4,26 @@ SUBSYSTEM!="block", GOTO="cloud_init_end"
 ATTRS{ID_VENDOR}!="Msft", GOTO="cloud_init_end"
 ATTRS{ID_MODEL}!="Virtual_Disk", GOTO="cloud_init_end"
 
-# Root has a GUID of 0000 as the second value
+# Root has a GUID of 0000 as the second value on Gen1 instances
 # The resource/resource has GUID of 0001 as the second value
 ATTRS{device_id}=="?00000000-0000-*", ENV{fabric_name}="azure_root", GOTO="ci_azure_names"
 ATTRS{device_id}=="?00000000-0001-*", ENV{fabric_name}="azure_resource", GOTO="ci_azure_names"
+
+# Azure well known SCSI controllers on Gen2 instances
+ATTRS{device_id}=="{f8b3781a-1e82-4818-a1c3-63d806ec15bb}", ENV{fabric_scsi_controller}="scsi0", GOTO="azure_datadisk"
+# Do not create symlinks for scsi[1-3] or unmatched device_ids
+ATTRS{device_id}=="{f8b3781b-1e82-4818-a1c3-63d806ec15bb}", ENV{fabric_scsi_controller}="scsi1", GOTO="cloud_init_end"
+ATTRS{device_id}=="{f8b3781c-1e82-4818-a1c3-63d806ec15bb}", ENV{fabric_scsi_controller}="scsi2", GOTO="cloud_init_end"
+ATTRS{device_id}=="{f8b3781d-1e82-4818-a1c3-63d806ec15bb}", ENV{fabric_scsi_controller}="scsi3", GOTO="cloud_init_end"
+GOTO="cloud_init_end"
+
+# Map scsi#/lun# fabric_name to azure_root|resource on Gen2 instances
+LABEL="azure_datadisk"
+ENV{DEVTYPE}=="partition", PROGRAM="/bin/sh -c 'readlink /sys/class/block/%k/../device|cut -d: -f4'", ENV{fabric_name}="$env{fabric_scsi_controller}/lun$result"
+ENV{DEVTYPE}=="disk", PROGRAM="/bin/sh -c 'readlink /sys/class/block/%k/device|cut -d: -f4'", ENV{fabric_name}="$env{fabric_scsi_controller}/lun$result"
+
+ENV{fabric_name}=="scsi0/lun0", ENV{fabric_name}="azure_root", GOTO="ci_azure_names"
+ENV{fabric_name}=="scsi0/lun1", ENV{fabric_name}="azure_resource", GOTO="ci_azure_names"
 GOTO="cloud_init_end"
 
 # Create the symlinks

Follow ups