← Back to team overview

cloud-init-dev team mailing list archive

[Merge] ~raharper/cloud-init:feature/bootspeed-v2 into cloud-init:master

 

Ryan Harper has proposed merging ~raharper/cloud-init:feature/bootspeed-v2 into cloud-init:master.

Commit message:
changes to enable starting ssh service very early.

Requested reviews:
  cloud-init commiters (cloud-init-dev)

For more details, see:
https://code.launchpad.net/~raharper/cloud-init/+git/cloud-init/+merge/371908
-- 
Your team cloud-init commiters is requested to review the proposed merge of ~raharper/cloud-init:feature/bootspeed-v2 into cloud-init:master.
diff --git a/cloudinit/analyze/show.py b/cloudinit/analyze/show.py
index 511b808..28e4941 100644
--- a/cloudinit/analyze/show.py
+++ b/cloudinit/analyze/show.py
@@ -101,6 +101,10 @@ def event_parent(event):
     return None
 
 
+def event_is_stage(event):
+    return '/' not in event_name(event)
+
+
 def event_timestamp(event):
     return float(event.get('timestamp'))
 
@@ -319,7 +323,9 @@ def generate_records(events, blame_sort=False,
             next_evt = None
 
         if event_type(event) == 'start':
-            if event.get('name') in stages_seen:
+            stage_name = event_parent(event)
+            if stage_name == event_name(event) and stage_name in stages_seen:
+                # new boot record
                 records.append(total_time_record(total_time))
                 boot_records.append(records)
                 records = []
@@ -339,19 +345,26 @@ def generate_records(events, blame_sort=False,
                                                               event,
                                                               next_evt)))
             else:
-                # This is a parent event
-                records.append("Starting stage: %s" % event.get('name'))
-                unprocessed.append(event)
-                stages_seen.append(event.get('name'))
-                continue
+                if event_is_stage(event):
+                    records.append("Starting stage: %s" % event.get('name'))
+                    unprocessed.append(event)
+                    stages_seen.append(event.get('name'))
+                else:
+                    # Start of a substage event
+                    records.append(format_record(print_format,
+                                                 event_record(start_time,
+                                                              event,
+                                                              next_evt)))
+
         else:
             prev_evt = unprocessed.pop()
             if event_name(event) == event_name(prev_evt):
-                record = event_record(start_time, prev_evt, event)
-                records.append(format_record("Finished stage: "
-                                             "(%n) %d seconds ",
-                                             record) + "\n")
-                total_time += record.get('delta')
+                if event_is_stage(event):
+                    record = event_record(start_time, prev_evt, event)
+                    records.append(format_record("Finished stage: "
+                                                 "(%n) %d seconds ",
+                                                 record) + "\n")
+                    total_time += record.get('delta')
             else:
                 # not a match, put it back
                 unprocessed.append(prev_evt)
diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py
index a5446da..789ca6e 100644
--- a/cloudinit/cmd/main.py
+++ b/cloudinit/cmd/main.py
@@ -320,7 +320,7 @@ def main_init(name, args):
 
     # Stage 5
     try:
-        init.fetch(existing=existing)
+        fetched_ds = init.fetch(existing=existing)
         # if in network mode, and the datasource is local
         # then work was done at that stage.
         if mode == sources.DSMODE_NETWORK and init.datasource.dsmode != mode:
@@ -362,7 +362,12 @@ def main_init(name, args):
     init.apply_network_config(bring_up=bool(mode != sources.DSMODE_LOCAL))
 
     if mode == sources.DSMODE_LOCAL:
-        if init.datasource.dsmode != mode:
+        if fetched_ds and hasattr(fetched_ds, 'network_config'):
+            # local mode with ds that returned data and has netcfg
+            # lets try to run some mods early \o/
+            LOG.debug("WARK: datasource %s in local mode but has "
+                      " networking, trying to run mods.", init.datasource)
+        elif init.datasource.dsmode != mode:
             LOG.debug("[%s] Exiting. datasource %s not in local mode.",
                       mode, init.datasource)
             return (init.datasource, [])
diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py
index 6813f53..362e43d 100644
--- a/cloudinit/config/cc_bootcmd.py
+++ b/cloudinit/config/cc_bootcmd.py
@@ -75,6 +75,8 @@ schema = {
 
 __doc__ = get_schema_doc(schema)  # Supplement python help()
 
+CC_KEYS = ['bootcmd']
+
 
 def handle(name, cfg, cloud, log, _args):
 
diff --git a/cloudinit/config/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py
index 64bc900..3f4a578 100644
--- a/cloudinit/config/cc_ca_certs.py
+++ b/cloudinit/config/cc_ca_certs.py
@@ -45,6 +45,7 @@ CA_CERT_SYSTEM_PATH = "/etc/ssl/certs/"
 CA_CERT_FULL_PATH = os.path.join(CA_CERT_PATH, CA_CERT_FILENAME)
 
 distros = ['ubuntu', 'debian']
+CC_KEYS = ['ca-certs']
 
 
 def update_ca_certs():
diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index 29e192e..61490fd 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -116,6 +116,7 @@ WIPEFS_CMD = util.which("wipefs")
 
 LANG_C_ENV = {'LANG': 'C'}
 
+CC_KEYS = ['device_aliases', 'disk_setup', 'fs_setup']
 LOG = logging.getLogger(__name__)
 
 
diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py
index 564f376..fbfb758 100644
--- a/cloudinit/config/cc_growpart.py
+++ b/cloudinit/config/cc_growpart.py
@@ -80,6 +80,8 @@ DEFAULT_CONFIG = {
     'ignore_growroot_disabled': False,
 }
 
+CC_KEYS = ['growpart']
+
 
 class RESIZE(object):
     SKIPPED = "SKIPPED"
diff --git a/cloudinit/config/cc_mcollective.py b/cloudinit/config/cc_mcollective.py
index d5f63f5..3af8a47 100644
--- a/cloudinit/config/cc_mcollective.py
+++ b/cloudinit/config/cc_mcollective.py
@@ -72,7 +72,7 @@ def configure(config, server_cfg=SERVER_CFG,
     # Read server.cfg (if it exists) values from the
     # original file in order to be able to mix the rest up.
     try:
-        old_contents = util.load_file(server_cfg, quiet=False, decode=False)
+        old_contents = util.load_file(server_cfg, strict=True, decode=False)
         mcollective_config = ConfigObj(BytesIO(old_contents))
     except IOError as e:
         if e.errno != errno.ENOENT:
diff --git a/cloudinit/config/cc_migrator.py b/cloudinit/config/cc_migrator.py
index 3995704..e4134c2 100644
--- a/cloudinit/config/cc_migrator.py
+++ b/cloudinit/config/cc_migrator.py
@@ -34,6 +34,7 @@ from cloudinit import util
 
 from cloudinit.settings import PER_ALWAYS
 
+CC_KEYS = ['migrate']
 frequency = PER_ALWAYS
 
 
diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py
index 123ffb8..d7e3570 100644
--- a/cloudinit/config/cc_mounts.py
+++ b/cloudinit/config/cc_mounts.py
@@ -71,6 +71,8 @@ import re
 from cloudinit import type_utils
 from cloudinit import util
 
+CC_KEYS = ['mounts', 'mount_default_fields', 'swap']
+
 # Shortname matches 'sda', 'sda1', 'xvda', 'hda', 'sdb', xvdb, vda, vdd1, sr0
 DEVICE_NAME_FILTER = r"^([x]{0,1}[shv]d[a-z][0-9]*|sr[0-9]+)$"
 DEVICE_NAME_RE = re.compile(DEVICE_NAME_FILTER)
@@ -277,7 +279,7 @@ def handle_swapcfg(swapcfg):
 
     if not (size and fname):
         LOG.debug("no need to setup swap")
-        return
+        return None
 
     if os.path.exists(fname):
         if not os.path.exists("/proc/swaps"):
@@ -319,7 +321,7 @@ def handle(_name, cfg, cloud, log, _args):
     defvals = cfg.get("mount_default_fields", defvals)
 
     # these are our default set of mounts
-    defmnts = [["ephemeral0", "/mnt", "auto", defvals[3], "0", "2"],
+    defmnts = [["ephemeral0", "/mnt", defvals[2], defvals[3], "0", "2"],
                ["swap", "none", "swap", "sw", "0", "0"]]
 
     cfgmnt = []
@@ -442,9 +444,11 @@ def handle(_name, cfg, cloud, log, _args):
     need_mount_all = False
     dirs = []
     for line in actlist:
+        print(line)
         # write 'comment' in the fs_mntops, entry,  claiming this
         line[3] = "%s,%s" % (line[3], MNT_COMMENT)
         if line[2] == "swap":
+            print('WARK enabled swap')
             needswap = True
         if line[1].startswith("/"):
             dirs.append(line[1])
@@ -461,6 +465,7 @@ def handle(_name, cfg, cloud, log, _args):
         # If any of them does not already show up in the list of current
         # mount points, we will definitely need to do mount -a.
         if not need_mount_all and d not in mount_points:
+            print('enabled mount all: %s not in %s' % (d, mount_points))
             need_mount_all = True
 
     sadds = [WS.sub(" ", n) for n in cc_lines]
@@ -474,19 +479,18 @@ def handle(_name, cfg, cloud, log, _args):
     util.write_file(FSTAB_PATH, contents)
 
     activate_cmds = []
-    if needswap:
-        activate_cmds.append(["swapon", "-a"])
-
     if len(sops) == 0:
         log.debug("No changes to /etc/fstab made.")
     else:
         log.debug("Changes to fstab: %s", sops)
         need_mount_all = True
+        if needswap:
+            activate_cmds.append(["swapon", "-a"])
 
     if need_mount_all:
-        activate_cmds.append(["mount", "-a"])
         if uses_systemd:
             activate_cmds.append(["systemctl", "daemon-reload"])
+        activate_cmds.append(["mount", "-a"])
 
     fmt = "Activating swap and mounts with: %s"
     for cmd in activate_cmds:
diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py
index afd2e06..6aa1d5b 100644
--- a/cloudinit/config/cc_resizefs.py
+++ b/cloudinit/config/cc_resizefs.py
@@ -55,6 +55,8 @@ schema = {
     }
 }
 
+CC_KEYS = ['resize_rootfs']
+
 __doc__ = get_schema_doc(schema)  # Supplement python help()
 
 
diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py
index 22b1753..91f7564 100644
--- a/cloudinit/config/cc_rsyslog.py
+++ b/cloudinit/config/cc_rsyslog.py
@@ -198,6 +198,7 @@ KEYNAME_LEGACY_FILENAME = 'rsyslog_filename'
 KEYNAME_LEGACY_DIR = 'rsyslog_dir'
 KEYNAME_REMOTES = 'remotes'
 
+CC_KEYS = ['rsyslog', 'rsyslog_dir', 'rsyslog_filename']
 LOG = logging.getLogger(__name__)
 
 COMMENT_RE = re.compile(r'[ ]*[#]+[ ]*')
diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py
index 65f6e77..b462374 100644
--- a/cloudinit/config/cc_seed_random.py
+++ b/cloudinit/config/cc_seed_random.py
@@ -70,6 +70,7 @@ from cloudinit import util
 
 frequency = PER_INSTANCE
 LOG = logging.getLogger(__name__)
+CC_KEYS = ['random_seed']
 
 
 def _decode(data, encoding=None):
diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py
index 3d2b2da..8651a32 100644
--- a/cloudinit/config/cc_set_hostname.py
+++ b/cloudinit/config/cc_set_hostname.py
@@ -38,6 +38,8 @@ import os
 from cloudinit.atomic_helper import write_json
 from cloudinit import util
 
+CC_KEYS = ['preserve_hostname', 'fqdn', 'hostname']
+
 
 class SetHostnameError(Exception):
     """Raised when the distro runs into an exception when setting hostname.
diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py
index fdd8f4d..8251898 100755
--- a/cloudinit/config/cc_ssh.py
+++ b/cloudinit/config/cc_ssh.py
@@ -11,41 +11,8 @@ SSH
 ---
 **Summary:** configure ssh and ssh keys
 
-This module handles most configuration for ssh and ssh keys. Many images have
-default ssh keys, which can be removed using ``ssh_deletekeys``. Since removing
-default keys is usually the desired behavior this option is enabled by default.
-
-Keys can be added using the ``ssh_keys`` configuration key. The argument to
-this config key should be a dictionary entries for the public and private keys
-of each desired key type. Entries in the ``ssh_keys`` config dict should
-have keys in the format ``<key type>_private`` and ``<key type>_public``, e.g.
-``rsa_private: <key>`` and ``rsa_public: <key>``. See below for supported key
-types. Not all key types have to be specified, ones left unspecified will not
-be used. If this config option is used, then no keys will be generated.
-
-.. note::
-    when specifying private keys in cloud-config, care should be taken to
-    ensure that the communication between the data source and the instance is
-    secure
-
-.. note::
-    to specify multiline private keys, use yaml multiline syntax
-
-If no keys are specified using ``ssh_keys``, then keys will be generated using
-``ssh-keygen``. By default one public/private pair of each supported key type
-will be generated. The key types to generate can be specified using the
-``ssh_genkeytypes`` config flag, which accepts a list of key types to use. For
-each key type for which this module has been instructed to create a keypair, if
-a key of the same type is already present on the system (i.e. if
-``ssh_deletekeys`` was false), no key will be generated.
-
-Supported key types for the ``ssh_keys`` and the ``ssh_genkeytypes`` config
-flags are:
-
-    - rsa
-    - dsa
-    - ecdsa
-    - ed25519
+This module handles publishing ssh host keys, disabling root login and
+importing user public keys into the default user.
 
 Root login can be enabled/disabled using the ``disable_root`` config key. Root
 login options can be manually specified with ``disable_root_opts``. If
@@ -59,6 +26,16 @@ Authorized keys for the default user/first user defined in ``users`` can be
 specified using `ssh_authorized_keys``. Keys should be specified as a list of
 public keys.
 
+On some platforms, the host ssh keys can be published to the platform to
+allow users to verify host ssh fingerprints when connecting to new instances.
+This operation is a no-op for platforms which do not support publishing
+host keys.  The ``ssh_publish_hostkeys`` config controls the behavior.
+Publishing is enabled by default, to disable this, one may emit a config with
+``ssh_publish_hostkeys: {enabled: false}``.  Users also may control which
+key types are published.  To skip publishing a host key type, add the
+key type (rsa, dsa, etc) to the ``blacklist`` list under
+``ssh_publish_hostkeys`` config.
+
 .. note::
     see the ``cc_set_passwords`` module documentation to enable/disable ssh
     password authentication
@@ -71,21 +48,6 @@ public keys.
 
 **Config keys**::
 
-    ssh_deletekeys: <true/false>
-    ssh_keys:
-        rsa_private: |
-            -----BEGIN RSA PRIVATE KEY-----
-            MIIBxwIBAAJhAKD0YSHy73nUgysO13XsJmd4fHiFyQ+00R7VVu2iV9Qco
-            ...
-            -----END RSA PRIVATE KEY-----
-        rsa_public: ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEAoPRhIfLvedSDKw7Xd ...
-        dsa_private: |
-            -----BEGIN DSA PRIVATE KEY-----
-            MIIBxwIBAAJhAKD0YSHy73nUgysO13XsJmd4fHiFyQ+00R7VVu2iV9Qco
-            ...
-            -----END DSA PRIVATE KEY-----
-        dsa_public: ssh-dsa AAAAB3NzaC1yc2EAAAABIwAAAGEAoPRhIfLvedSDKw7Xd ...
-    ssh_genkeytypes: <key type>
     disable_root: <true/false>
     disable_root_opts: <disable root options string>
     ssh_authorized_keys:
@@ -95,94 +57,20 @@ public keys.
         enabled: <true/false> (Defaults to true)
         blacklist: <list of key types> (Defaults to [dsa])
 """
-
-import glob
-import os
-import sys
-
 from cloudinit.distros import ug_util
 from cloudinit import ssh_util
 from cloudinit import util
 
-
-GENERATE_KEY_NAMES = ['rsa', 'dsa', 'ecdsa', 'ed25519']
-KEY_FILE_TPL = '/etc/ssh/ssh_host_%s_key'
+CC_KEYS = ['disable_root', 'disable_root_opts', 'ssh_authorized_keys',
+           'ssh_publish_hostkeys']
 PUBLISH_HOST_KEYS = True
 # Don't publish the dsa hostkey by default since OpenSSH recommends not using
 # it.
 HOST_KEY_PUBLISH_BLACKLIST = ['dsa']
 
-CONFIG_KEY_TO_FILE = {}
-PRIV_TO_PUB = {}
-for k in GENERATE_KEY_NAMES:
-    CONFIG_KEY_TO_FILE.update({"%s_private" % k: (KEY_FILE_TPL % k, 0o600)})
-    CONFIG_KEY_TO_FILE.update(
-        {"%s_public" % k: (KEY_FILE_TPL % k + ".pub", 0o600)})
-    PRIV_TO_PUB["%s_private" % k] = "%s_public" % k
-
-KEY_GEN_TPL = 'o=$(ssh-keygen -yf "%s") && echo "$o" root@localhost > "%s"'
-
 
 def handle(_name, cfg, cloud, log, _args):
 
-    # remove the static keys from the pristine image
-    if cfg.get("ssh_deletekeys", True):
-        key_pth = os.path.join("/etc/ssh/", "ssh_host_*key*")
-        for f in glob.glob(key_pth):
-            try:
-                util.del_file(f)
-            except Exception:
-                util.logexc(log, "Failed deleting key file %s", f)
-
-    if "ssh_keys" in cfg:
-        # if there are keys in cloud-config, use them
-        for (key, val) in cfg["ssh_keys"].items():
-            if key in CONFIG_KEY_TO_FILE:
-                tgt_fn = CONFIG_KEY_TO_FILE[key][0]
-                tgt_perms = CONFIG_KEY_TO_FILE[key][1]
-                util.write_file(tgt_fn, val, tgt_perms)
-
-        for (priv, pub) in PRIV_TO_PUB.items():
-            if pub in cfg['ssh_keys'] or priv not in cfg['ssh_keys']:
-                continue
-            pair = (CONFIG_KEY_TO_FILE[priv][0], CONFIG_KEY_TO_FILE[pub][0])
-            cmd = ['sh', '-xc', KEY_GEN_TPL % pair]
-            try:
-                # TODO(harlowja): Is this guard needed?
-                with util.SeLinuxGuard("/etc/ssh", recursive=True):
-                    util.subp(cmd, capture=False)
-                log.debug("Generated a key for %s from %s", pair[0], pair[1])
-            except Exception:
-                util.logexc(log, "Failed generated a key for %s from %s",
-                            pair[0], pair[1])
-    else:
-        # if not, generate them
-        genkeys = util.get_cfg_option_list(cfg,
-                                           'ssh_genkeytypes',
-                                           GENERATE_KEY_NAMES)
-        lang_c = os.environ.copy()
-        lang_c['LANG'] = 'C'
-        for keytype in genkeys:
-            keyfile = KEY_FILE_TPL % (keytype)
-            if os.path.exists(keyfile):
-                continue
-            util.ensure_dir(os.path.dirname(keyfile))
-            cmd = ['ssh-keygen', '-t', keytype, '-N', '', '-f', keyfile]
-
-            # TODO(harlowja): Is this guard needed?
-            with util.SeLinuxGuard("/etc/ssh", recursive=True):
-                try:
-                    out, err = util.subp(cmd, capture=True, env=lang_c)
-                    sys.stdout.write(util.decode_binary(out))
-                except util.ProcessExecutionError as e:
-                    err = util.decode_binary(e.stderr).lower()
-                    if (e.exit_code == 1 and
-                            err.lower().startswith("unknown key")):
-                        log.debug("ssh-keygen: unknown key type '%s'", keytype)
-                    else:
-                        util.logexc(log, "Failed generating key type %s to "
-                                    "file %s", keytype, keyfile)
-
     if "ssh_publish_hostkeys" in cfg:
         host_key_blacklist = util.get_cfg_option_list(
             cfg["ssh_publish_hostkeys"], "blacklist",
@@ -241,7 +129,8 @@ def get_public_host_keys(blacklist=None):
     @returns: List of keys, each formatted as a two-element tuple.
         e.g. [('ssh-rsa', 'AAAAB3Nz...'), ('ssh-ed25519', 'AAAAC3Nx...')]
     """
-    public_key_file_tmpl = '%s.pub' % (KEY_FILE_TPL,)
+    import glob
+    public_key_file_tmpl = '%s.pub' % ('/etc/ssh/ssh_host_%s_key',)
     key_list = []
     blacklist_files = []
     if blacklist:
diff --git a/cloudinit/config/cc_ssh_host_keys.py b/cloudinit/config/cc_ssh_host_keys.py
new file mode 100644
index 0000000..ea3b162
--- /dev/null
+++ b/cloudinit/config/cc_ssh_host_keys.py
@@ -0,0 +1,189 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""
+SSH Host Keys
+---
+**Summary:** configure ssh host key generation
+
+This module handles removing and generating host ssh keys. Many images have
+ssh keys, which can be removed using ``ssh_deletekeys``. Since removing default
+keys is usually the desired behavior this option is enabled by default.
+
+Keys can be added using the ``ssh_keys`` configuration key. The argument to
+this config key should be a dictionary entries for the public and private keys
+of each desired key type. Entries in the ``ssh_keys`` config dict should
+have keys in the format ``<key type>_private`` and ``<key type>_public``, e.g.
+``rsa_private: <key>`` and ``rsa_public: <key>``. See below for supported key
+types. Not all key types have to be specified, ones left unspecified will not
+be used. If this config option is used, then no keys will be generated.
+
+.. note::
+    when specifying private keys in cloud-config, care should be taken to
+    ensure that the communication between the data source and the instance is
+    secure
+
+.. note::
+    to specify multiline private keys, use yaml multiline syntax
+
+If no keys are specified using ``ssh_keys``, then keys will be generated using
+``ssh-keygen``. By default one public/private pair of each supported key type
+will be generated. The key types to generate can be specified using the
+``ssh_genkeytypes`` config flag, which accepts a list of key types to use. For
+each key type for which this module has been instructed to create a keypair, if
+a key of the same type is already present on the system (i.e. if
+``ssh_deletekeys`` was false), no key will be generated.
+
+Supported key types for the ``ssh_keys`` and the ``ssh_genkeytypes`` config
+flags are:
+
+    - rsa
+    - dsa
+    - ecdsa
+    - ed25519
+
+The ``ssh_early_start`` boolean if enabled will activate ssh and additional
+login services as soon as possible.  This feature allows users to ssh into
+an instances much sooner than without.  This can be disabled by setting
+``ssh_early_start`` to false.
+
+
+**Internal name:** ``cc_ssh_host_keys``
+
+**Module frequency:** per instance
+
+**Supported distros:** all
+
+**Config keys**::
+
+    ssh_keys:
+        rsa_private: |
+            -----BEGIN RSA PRIVATE KEY-----
+            MIIBxwIBAAJhAKD0YSHy73nUgysO13XsJmd4fHiFyQ+00R7VVu2iV9Qco
+            ...
+            -----END RSA PRIVATE KEY-----
+        rsa_public: ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEAoPRhIfLvedSDKw7Xd ...
+        dsa_private: |
+            -----BEGIN DSA PRIVATE KEY-----
+            MIIBxwIBAAJhAKD0YSHy73nUgysO13XsJmd4fHiFyQ+00R7VVu2iV9Qco
+            ...
+            -----END DSA PRIVATE KEY-----
+        dsa_public: ssh-dsa AAAAB3NzaC1yc2EAAAABIwAAAGEAoPRhIfLvedSDKw7Xd ...
+    ssh_genkeytypes: <key type>
+    ssh_early_start: <bool>
+
+"""
+
+import glob
+import os
+import sys
+
+from cloudinit import log as logging
+from cloudinit import util
+
+LOG = logging.getLogger(__name__)
+CC_KEYS = ['ssh_keys', 'ssh_genkeytypes', 'ssh_early_start']
+
+GENERATE_KEY_NAMES = ['rsa', 'dsa', 'ecdsa', 'ed25519']
+KEY_FILE_TPL = '/etc/ssh/ssh_host_%s_key'
+CONFIG_KEY_TO_FILE = {}
+PRIV_TO_PUB = {}
+for k in GENERATE_KEY_NAMES:
+    CONFIG_KEY_TO_FILE.update({"%s_private" % k: (KEY_FILE_TPL % k, 0o600)})
+    CONFIG_KEY_TO_FILE.update(
+        {"%s_public" % k: (KEY_FILE_TPL % k + ".pub", 0o600)})
+    PRIV_TO_PUB["%s_private" % k] = "%s_public" % k
+
+KEY_GEN_TPL = 'o=$(ssh-keygen -yf "%s") && echo "$o" root@localhost > "%s"'
+
+
+def handle(_name, cfg, cloud, log, _args):
+
+    # remove the static keys from the pristine image if told to
+    if cfg.get("ssh_deletekeys", True):
+        key_pth = os.path.join("/etc/ssh/", "ssh_host_*key*")
+        for f in glob.glob(key_pth):
+            try:
+                util.del_file(f)
+            except Exception:
+                util.logexc(log, "Failed deleting key file %s", f)
+
+    util.ensure_dir(os.path.dirname(KEY_FILE_TPL))
+    updated_keys = False
+    if "ssh_keys" in cfg:
+        # if there are keys in cloud-config, use them
+        for (key, val) in cfg["ssh_keys"].items():
+            if key in CONFIG_KEY_TO_FILE:
+                tgt_fn = CONFIG_KEY_TO_FILE[key][0]
+                tgt_perms = CONFIG_KEY_TO_FILE[key][1]
+                util.write_file(tgt_fn, val, tgt_perms)
+                updated_keys = False
+
+        for (priv, pub) in PRIV_TO_PUB.items():
+            if pub in cfg['ssh_keys'] or priv not in cfg['ssh_keys']:
+                continue
+            pair = (CONFIG_KEY_TO_FILE[priv][0], CONFIG_KEY_TO_FILE[pub][0])
+            cmd = ['sh', '-xc', KEY_GEN_TPL % pair]
+            try:
+                # TODO(harlowja): Is this guard needed?
+                with util.SeLinuxGuard("/etc/ssh", recursive=True):
+                    util.subp(cmd, capture=False)
+                log.debug("Generated a key for %s from %s", pair[0], pair[1])
+            except Exception:
+                util.logexc(log, "Failed generated a key for %s from %s",
+                            pair[0], pair[1])
+
+    else:
+        # if not, generate them
+        genkeys = util.get_cfg_option_list(cfg,
+                                           'ssh_genkeytypes',
+                                           GENERATE_KEY_NAMES)
+        lang_c = os.environ.copy()
+        lang_c['LANG'] = 'C'
+        for keytype in genkeys:
+            keyfile = KEY_FILE_TPL % (keytype)
+            if os.path.exists(keyfile):
+                continue
+            cmd = ['ssh-keygen', '-t', keytype, '-N', '', '-f', keyfile]
+            # TODO(harlowja): Is this guard needed?
+            with util.SeLinuxGuard("/etc/ssh", recursive=True):
+                try:
+                    out, err = util.subp(cmd, capture=True, env=lang_c)
+                    sys.stdout.write(util.decode_binary(out))
+                    updated_keys = True
+                except util.ProcessExecutionError as e:
+                    err = util.decode_binary(e.stderr).lower()
+                    if (e.exit_code == 1 and
+                            err.lower().startswith("unknown key")):
+                        LOG.debug("ssh-keygen: unknown key type '%s'", keytype)
+                    else:
+                        util.logexc(LOG, "Failed generating key type %s to "
+                                    "file %s", keytype, keyfile)
+
+    # SRU Blocker, this shouldn't be enabled by default
+    if cfg.get("ssh_early_start", True):
+        ssh_services = ['ssh', 'systemd-user-sessions', 'systemd-logind']
+
+        # check if ssh has started yet and start ssh if we generated keys
+        # or keys exist but ssh isn't yet running.
+        out, _err = util.subp(['systemctl', 'show', 'ssh.service',
+                               '-p', 'ActiveState,SubState'], capture=True)
+        LOG.debug('WARK: ssh.service status: %s', out)
+        if 'ActiveState=active' not in out:
+            LOG.debug('WARK: starting up early ssh access')
+            for service in ssh_services:
+                cmd = ['systemctl', '--job-mode=ignore-dependencies',
+                       '--no-block', 'start', '%s.service' % service]
+                util.subp(cmd, capture=True)
+
+        # we may have started ssh early with older host keys, so reload/restart
+        # if this is the case.
+        elif 'running' in out and updated_keys:
+            LOG.debug('WARK: reloading early ssh service')
+            for service in ssh_services:
+                cmd = ['systemctl', '--job-mode=ignore-dependencies',
+                       '--no-block', 'reload-or-restart',
+                       '%s.service' % service]
+                util.subp(cmd, capture=True)
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py
index c96eede..13c3a24 100644
--- a/cloudinit/config/cc_update_etc_hosts.py
+++ b/cloudinit/config/cc_update_etc_hosts.py
@@ -54,6 +54,7 @@ from cloudinit import util
 
 from cloudinit.settings import PER_ALWAYS
 
+CC_KEYS = ['manage_etc_hosts', 'fqdn', 'hostname']
 frequency = PER_ALWAYS
 
 
diff --git a/cloudinit/config/cc_update_hostname.py b/cloudinit/config/cc_update_hostname.py
index d5f4eb5..a088714 100644
--- a/cloudinit/config/cc_update_hostname.py
+++ b/cloudinit/config/cc_update_hostname.py
@@ -37,6 +37,7 @@ from cloudinit.settings import PER_ALWAYS
 from cloudinit import util
 
 frequency = PER_ALWAYS
+CC_KEYS = ['preserve_hostname', 'fqdn', 'hostname']
 
 
 def handle(name, cfg, cloud, log, _args):
diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py
index c32a743..c4cc398 100644
--- a/cloudinit/config/cc_users_groups.py
+++ b/cloudinit/config/cc_users_groups.py
@@ -128,6 +128,7 @@ from cloudinit import log as logging
 
 from cloudinit.settings import PER_INSTANCE
 
+CC_KEYS = ['groups', 'users']
 LOG = logging.getLogger(__name__)
 
 frequency = PER_INSTANCE
diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py
index 0b6546e..8d109ac 100644
--- a/cloudinit/config/cc_write_files.py
+++ b/cloudinit/config/cc_write_files.py
@@ -64,6 +64,7 @@ from cloudinit.settings import PER_INSTANCE
 from cloudinit import util
 
 
+CC_KEYS = ['write_files']
 frequency = PER_INSTANCE
 
 DEFAULT_OWNER = "root:root"
diff --git a/cloudinit/config/tests/test_ssh.py b/cloudinit/config/tests/test_ssh.py
index e778984..5767034 100644
--- a/cloudinit/config/tests/test_ssh.py
+++ b/cloudinit/config/tests/test_ssh.py
@@ -75,8 +75,7 @@ class TestHandleSsh(CiTestCase):
     @mock.patch(MODPATH + "glob.glob")
     @mock.patch(MODPATH + "ug_util.normalize_users_groups")
     @mock.patch(MODPATH + "os.path.exists")
-    def test_handle_no_cfg(self, m_path_exists, m_nug,
-                           m_glob, m_setup_keys):
+    def handle_no_cfg(self, m_path_exists, m_nug, m_glob, m_setup_keys):
         """Test handle with no config ignores generating existing keyfiles."""
         cfg = {}
         keys = ["key1"]
@@ -103,8 +102,8 @@ class TestHandleSsh(CiTestCase):
     @mock.patch(MODPATH + "glob.glob")
     @mock.patch(MODPATH + "ug_util.normalize_users_groups")
     @mock.patch(MODPATH + "os.path.exists")
-    def test_handle_no_cfg_and_default_root(self, m_path_exists, m_nug,
-                                            m_glob, m_setup_keys):
+    def handle_no_cfg_and_default_root(self, m_path_exists, m_nug,
+                                       m_glob, m_setup_keys):
         """Test handle with no config and a default distro user."""
         cfg = {}
         keys = ["key1"]
@@ -126,8 +125,8 @@ class TestHandleSsh(CiTestCase):
     @mock.patch(MODPATH + "glob.glob")
     @mock.patch(MODPATH + "ug_util.normalize_users_groups")
     @mock.patch(MODPATH + "os.path.exists")
-    def test_handle_cfg_with_explicit_disable_root(self, m_path_exists, m_nug,
-                                                   m_glob, m_setup_keys):
+    def handle_cfg_with_explicit_disable_root(self, m_path_exists, m_nug,
+                                              m_glob, m_setup_keys):
         """Test handle with explicit disable_root and a default distro user."""
         # This test is identical to test_handle_no_cfg_and_default_root,
         # except this uses an explicit cfg value
@@ -151,8 +150,8 @@ class TestHandleSsh(CiTestCase):
     @mock.patch(MODPATH + "glob.glob")
     @mock.patch(MODPATH + "ug_util.normalize_users_groups")
     @mock.patch(MODPATH + "os.path.exists")
-    def test_handle_cfg_without_disable_root(self, m_path_exists, m_nug,
-                                             m_glob, m_setup_keys):
+    def handle_cfg_without_disable_root(self, m_path_exists, m_nug,
+                                        m_glob, m_setup_keys):
         """Test handle with disable_root == False."""
         # When disable_root == False, the ssh redirect for root is skipped
         cfg = {"disable_root": False}
@@ -174,7 +173,7 @@ class TestHandleSsh(CiTestCase):
     @mock.patch(MODPATH + "glob.glob")
     @mock.patch(MODPATH + "ug_util.normalize_users_groups")
     @mock.patch(MODPATH + "os.path.exists")
-    def test_handle_publish_hostkeys_default(
+    def handle_publish_hostkeys_default(
             self, m_path_exists, m_nug, m_glob, m_setup_keys):
         """Test handle with various configs for ssh_publish_hostkeys."""
         self._publish_hostkey_test_setup()
@@ -203,7 +202,7 @@ class TestHandleSsh(CiTestCase):
     @mock.patch(MODPATH + "glob.glob")
     @mock.patch(MODPATH + "ug_util.normalize_users_groups")
     @mock.patch(MODPATH + "os.path.exists")
-    def test_handle_publish_hostkeys_config_enable(
+    def handle_publish_hostkeys_config_enable(
             self, m_path_exists, m_nug, m_glob, m_setup_keys):
         """Test handle with various configs for ssh_publish_hostkeys."""
         self._publish_hostkey_test_setup()
@@ -232,7 +231,7 @@ class TestHandleSsh(CiTestCase):
     @mock.patch(MODPATH + "glob.glob")
     @mock.patch(MODPATH + "ug_util.normalize_users_groups")
     @mock.patch(MODPATH + "os.path.exists")
-    def test_handle_publish_hostkeys_config_disable(
+    def handle_publish_hostkeys_config_disable(
             self, m_path_exists, m_nug, m_glob, m_setup_keys):
         """Test handle with various configs for ssh_publish_hostkeys."""
         self._publish_hostkey_test_setup()
@@ -259,7 +258,7 @@ class TestHandleSsh(CiTestCase):
     @mock.patch(MODPATH + "glob.glob")
     @mock.patch(MODPATH + "ug_util.normalize_users_groups")
     @mock.patch(MODPATH + "os.path.exists")
-    def test_handle_publish_hostkeys_config_blacklist(
+    def handle_publish_hostkeys_config_blacklist(
             self, m_path_exists, m_nug, m_glob, m_setup_keys):
         """Test handle with various configs for ssh_publish_hostkeys."""
         self._publish_hostkey_test_setup()
@@ -289,7 +288,7 @@ class TestHandleSsh(CiTestCase):
     @mock.patch(MODPATH + "glob.glob")
     @mock.patch(MODPATH + "ug_util.normalize_users_groups")
     @mock.patch(MODPATH + "os.path.exists")
-    def test_handle_publish_hostkeys_empty_blacklist(
+    def handle_publish_hostkeys_empty_blacklist(
             self, m_path_exists, m_nug, m_glob, m_setup_keys):
         """Test handle with various configs for ssh_publish_hostkeys."""
         self._publish_hostkey_test_setup()
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 00bdee3..1b9829f 100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -15,7 +15,6 @@ from six import StringIO
 import abc
 import os
 import re
-import stat
 
 from cloudinit import importer
 from cloudinit import log as logging
@@ -796,11 +795,6 @@ def set_etc_timezone(tz, tz_file=None, tz_conf="/etc/timezone",
 
 
 def uses_systemd():
-    try:
-        res = os.lstat('/run/systemd/system')
-        return stat.S_ISDIR(res.st_mode)
-    except Exception:
-        return False
-
+        return util.uses_systemd()
 
 # vi: ts=4 expandtab
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
index ea707c0..8e2a1c5 100644
--- a/cloudinit/net/__init__.py
+++ b/cloudinit/net/__init__.py
@@ -9,6 +9,7 @@ import errno
 import logging
 import os
 import re
+import time
 from functools import partial
 
 from cloudinit.net.network_state import mask_to_net_prefix
@@ -97,6 +98,21 @@ def is_up(devname):
     return read_sys_net_safe(devname, "operstate", translate=translate)
 
 
+def networking_is_active():
+    if util.uses_systemd():
+        try:
+            out, _err = util.subp(['systemctl', 'show', '-p', 'ActiveState',
+                                   'systemd-networkd.service'], capture=True)
+            if out:
+                show = util.load_shell_content(out)
+                if show['ActiveState'] == "active":
+                    return True
+        except Exception:
+            pass
+
+    return False
+
+
 def is_wireless(devname):
     return os.path.exists(sys_dev_path(devname, "wireless"))
 
@@ -333,6 +349,58 @@ def extract_physdevs(netcfg):
     raise RuntimeError('Unknown network config version: %s' % version)
 
 
+def wait_online(devices, ignore=None, mode='any', timeout=10.0):
+    """Wait for up to timeout seconds and collect 'is_up' result for each
+    device.  If mode is 'any' return True if one or more is up.  If mode is
+    'all' only return True if all devices are up.
+
+    :param: devices: list of network device names, e.g. ens3
+    :param: mode: string describing exit conditions.  'all' requires
+        every device to be up to return success. 'any' will return success
+        if one or more devices are up.  Defaults to 'any'.
+    :param: timeout: float: Number of seconds to wait for success.
+    :returns: tuple: (boolean, list of last results, list of devices)
+    :raises: ValueError if mode is not valid, or if timeout is not castable
+        to type float.
+    """
+    if not devices:
+        devices = get_devicelist()
+
+    if ignore is None:
+        ignore = ['lo']
+
+    if mode not in ['any', 'all']:
+        raise ValueError("'mode' must be one of 'all' or 'any': %s" % mode)
+
+    if not isinstance(timeout, float):
+        timeout = float(timeout)
+
+    devs = [dev for dev in devices if dev not in ignore]
+    msg = ('waiting up to %s seconds for %s of %s '
+           'to be up, ignoring %s' % (timeout, mode, devices, ignore))
+
+    return util.log_time(LOG.debug, msg, _wait_timeout,
+                         [is_up, devs, mode, timeout])
+
+
+def _wait_timeout(check_func, data, mode, timeout):
+    start = time.time()
+    while True:
+        results = list(map(check_func, data))
+        if mode == 'any':
+            if any(results):
+                return (True, results, data)
+        if mode == 'all':
+            if all(results):
+                return (True, results, data)
+        time.sleep(0.1)
+        if (time.time() - start) >= timeout:
+            print('Timed out')
+            break
+
+    return (False, results, data)
+
+
 def wait_for_physdevs(netcfg, strict=True):
     physdevs = extract_physdevs(netcfg)
 
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 4984fa8..fd8f546 100755
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -230,6 +230,9 @@ BUILTIN_DS_CONFIG = {
 # RELEASE_BLOCKER: Xenial and earlier apply_network_config default is False
 
 BUILTIN_CLOUD_CONFIG = {
+    'dynamic_module_order': True,
+    'ssh_early_start': True,
+    'resize_rootfs': 'noblock',
     'disk_setup': {
         'ephemeral0': {'table_type': 'gpt',
                        'layout': [100],
@@ -1257,7 +1260,7 @@ def _get_random_seed(source=PLATFORM_ENTROPY_SOURCE):
     # now update ds_cfg to reflect contents pass in config
     if source is None:
         return None
-    seed = util.load_file(source, quiet=True, decode=False)
+    seed = util.load_file(source, strict=False, decode=False)
 
     # The seed generally contains non-Unicode characters. load_file puts
     # them into a str (in python 2) or bytes (in python 3). In python 2,
@@ -1381,7 +1384,9 @@ def get_metadata_from_imds(fallback_nic, retries):
     kwargs = {'logfunc': LOG.debug,
               'msg': 'Crawl of Azure Instance Metadata Service (IMDS)',
               'func': _get_metadata_from_imds, 'args': (retries,)}
-    if net.is_up(fallback_nic):
+    status = net.wait_online([])
+    if status[0]:
+        LOG.debug('Network is already up, skipping EphemeralDHCP')
         return util.log_time(**kwargs)
     else:
         try:
diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py
index 4a01524..672679b 100644
--- a/cloudinit/sources/DataSourceOpenStack.py
+++ b/cloudinit/sources/DataSourceOpenStack.py
@@ -7,6 +7,7 @@
 import time
 
 from cloudinit import log as logging
+from cloudinit import net
 from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
 from cloudinit import sources
 from cloudinit import url_helper
@@ -127,14 +128,20 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
             return False
 
         if self.perform_dhcp_setup:  # Setup networking in init-local stage.
-            try:
-                with EphemeralDHCPv4(self.fallback_interface):
-                    results = util.log_time(
-                        logfunc=LOG.debug, msg='Crawl of metadata service',
-                        func=self._crawl_metadata)
-            except (NoDHCPLeaseError, sources.InvalidMetaDataException) as e:
-                util.logexc(LOG, str(e))
-                return False
+            kwargs = {'logfunc': LOG.debug, 'msg': 'Crawl of metadata service',
+                      'func': self._crawl_metadata}
+            results = None
+            status = net.wait_online([])
+            if status[0]:
+                LOG.debug('Network is already up, skipping EphemeralDHCP')
+                results = util.log_time(**kwargs)
+            if not results:
+                try:
+                    with EphemeralDHCPv4(self.fallback_interface):
+                        results = util.log_time(**kwargs)
+                except (NoDHCPLeaseError, sources.InvalidMetaDataException):
+                    util.logexc(LOG, "Error during EphemeralDHCP imds crawl")
+                    return False
         else:
             try:
                 results = self._crawl_metadata()
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index 5012988..bdde0ed 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -817,7 +817,9 @@ class Modules(object):
         # and which ones failed + the exception of why it failed
         failures = []
         which_ran = []
-        for (mod, name, freq, args) in mostly_mods:
+        # see if we can reoder these for faster ssh
+        ordered_mods = _earlyssh_module_order(mostly_mods, self.cfg)
+        for (mod, name, freq, args) in ordered_mods:
             try:
                 # Try the modules frequency, otherwise fallback to a known one
                 if not freq:
@@ -956,4 +958,81 @@ def _pkl_load(fname):
         util.logexc(LOG, "Failed loading pickled blob from %s", fname)
         return None
 
+
+def _earlyssh_module_order(modules, cfg):
+    """ This function will reorder the sequence in which
+        modules are run with the goal of enabling ssh
+        as fast as possible without breaking existing behavior.
+
+        For any module that would run before ssh_host_keys,
+        users-groups, and ssh has config, it will continue
+        to run before these modules, except if the module
+        a) doesn't have any config b) doesn't affect /home
+    """
+    # handle empty
+    if not modules:
+        return modules
+
+    earlyssh_order = [
+        'migrator', 'seed_random', 'ssh_host_keys', 'users-groups', 'ssh',
+        'bootcmd', 'write-files', 'growpart', 'resizefs', 'disk_setup',
+        'mounts', 'set_hostname', 'update_hostname', 'update_etc_hosts',
+        'ca-certs', 'rsyslog']
+
+    # first mod tuple, name element
+    if modules[0][1] != "migrator":
+        LOG.debug('WARK: returning o.g. order, not init_modules')
+        return modules
+
+    name_to_mod = {name: (mod, name, freq, args)
+                   for (mod, name, freq, args) in modules}
+    cfg_set = set(cfg.keys())
+
+    # check if mounts has /home, if so keep original order
+    mods_with_cfg = []
+    for (mod, name, _freq, _args) in modules:
+        if hasattr(mod, 'CC_KEYS'):
+            mod_cc_keys = mod.CC_KEYS
+        else:
+            LOG.debug('WARK: module %s has no CC_KEYS attr', name)
+            mod_cc_keys = []
+
+        has_cfg = list(set(mod_cc_keys).intersection(cfg_set))
+        if len(has_cfg) > 0:
+            LOG.debug('WARK: %s module has cloud-config to consume', name)
+            mods_with_cfg.append(name)
+
+    # we always ignore disk_setup as it doesn't mean we have to
+    # keep original order
+    ignored = ['migrator', 'seed_random', 'ssh_host_keys', 'users-groups',
+               'ssh', 'growpart', 'resizefs', 'disk_setup',
+               'set_hostname', 'update_hostname', 'update_etc_hosts',
+               'ca-certs', 'rsyslog']
+    LOG.debug('WARK: checking if we can ignore mods w/cfg: %s', mods_with_cfg)
+    for mod_name in mods_with_cfg:
+        # we can possible ignore these if the mounts do not
+        # include /home.
+        if mod_name in ['mounts']:
+            can_ignore = True
+            for mnt in cfg['mounts']:
+                if '/home' in mnt[1]:
+                    can_ignore = False
+                    break
+            if can_ignore:
+                ignored.append('mounts')
+
+    # excluding ignores, any modules with config?
+    LOG.debug('WARK: mods with cfg: %s ignoring %s', mods_with_cfg, ignored)
+    delta = set(mods_with_cfg).difference(ignored)
+    if len(delta) == 0:
+        LOG.debug('WARK: enabling earlyssh mod order')
+        early = [name_to_mod[name] for name in earlyssh_order
+                 if name in name_to_mod]
+        LOG.debug('WARK: early order-> %s', early)
+        return early
+
+    LOG.debug('WARK: Keeping o.g order, delta=%s', len(delta))
+    return modules
+
+
 # vi: ts=4 expandtab
diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py
index 23fddd0..65e82a3 100644
--- a/cloudinit/tests/helpers.py
+++ b/cloudinit/tests/helpers.py
@@ -6,7 +6,9 @@ import functools
 import httpretty
 import logging
 import os
+import random
 import shutil
+import string
 import sys
 import tempfile
 import time
@@ -122,6 +124,12 @@ class TestCase(unittest2.TestCase):
             parser.readfp(contents)
         return parser
 
+    @classmethod
+    def random_string(cls, length=8):
+        """ return a random lowercase string with default length of 8"""
+        return ''.join(
+            random.choice(string.ascii_lowercase) for _ in range(length))
+
 
 class CiTestCase(TestCase):
     """This is the preferred test case base class unless user
diff --git a/cloudinit/util.py b/cloudinit/util.py
index aa23b3f..2050a79 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -1352,19 +1352,21 @@ def uniq_list(in_list):
     return out_list
 
 
-def load_file(fname, read_cb=None, quiet=False, decode=True):
-    LOG.debug("Reading from %s (quiet=%s)", fname, quiet)
+def load_file(fname, read_cb=None, verbose=False, strict=True, decode=True):
+    if verbose:
+        LOG.debug("Reading from %s (strict=%s)", fname, strict)
     ofh = six.BytesIO()
     try:
         with open(fname, 'rb') as ifh:
             pipe_in_out(ifh, ofh, chunk_cb=read_cb)
     except IOError as e:
-        if not quiet:
+        if strict:
             raise
         if e.errno != ENOENT:
             raise
     contents = ofh.getvalue()
-    LOG.debug("Read %s bytes from %s", len(contents), fname)
+    if verbose:
+        LOG.debug("Read %s bytes from %s", len(contents), fname)
     if decode:
         return decode_binary(contents)
     else:
@@ -2565,7 +2567,7 @@ def pathprefix2dict(base, required=None, optional=None, delim=os.path.sep):
     ret = {}
     for f in required + optional:
         try:
-            ret[f] = load_file(base + delim + f, quiet=False, decode=False)
+            ret[f] = load_file(base + delim + f, strict=True, decode=False)
         except IOError as e:
             if e.errno != ENOENT:
                 raise
@@ -2759,7 +2761,7 @@ def system_is_snappy():
     # this is certainly not a perfect test, but good enough for now.
     orpath = "/etc/os-release"
     try:
-        orinfo = load_shell_content(load_file(orpath, quiet=True))
+        orinfo = load_shell_content(load_file(orpath, strict=False))
         if orinfo.get('ID', '').lower() == "ubuntu-core":
             return True
     except ValueError as e:
@@ -2769,7 +2771,7 @@ def system_is_snappy():
     if 'snap_core=' in cmdline:
         return True
 
-    content = load_file("/etc/system-image/channel.ini", quiet=True)
+    content = load_file("/etc/system-image/channel.ini", strict=False)
     if 'ubuntu-core' in content.lower():
         return True
     if os.path.isdir("/etc/system-image/config.d/"):
@@ -2884,7 +2886,7 @@ def get_proc_ppid(pid):
     """
     ppid = 0
     try:
-        contents = load_file("/proc/%s/stat" % pid, quiet=True)
+        contents = load_file("/proc/%s/stat" % pid, strict=False)
     except IOError as e:
         LOG.warning('Failed to load /proc/%s/stat. %s', pid, e)
     if contents:
@@ -2894,4 +2896,13 @@ def get_proc_ppid(pid):
         ppid = int(parts[3])
     return ppid
 
+
+def uses_systemd():
+    try:
+        res = os.lstat('/run/systemd/system')
+        return stat.S_ISDIR(res.st_mode)
+    except Exception:
+        return False
+
+
 # vi: ts=4 expandtab
diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl
index 684c747..63c0064 100644
--- a/config/cloud.cfg.tmpl
+++ b/config/cloud.cfg.tmpl
@@ -61,6 +61,7 @@ cloud_init_modules:
  - ca-certs
  - rsyslog
 {% endif %}
+ - ssh_host_keys
  - users-groups
  - ssh
 
diff --git a/setup.py b/setup.py
index fcaf26f..9cae049 100755
--- a/setup.py
+++ b/setup.py
@@ -250,6 +250,7 @@ data_files = [
     (ETC + '/cloud/templates', glob('templates/*')),
     (USR_LIB_EXEC + '/cloud-init', ['tools/ds-identify',
                                     'tools/uncloud-init',
+                                    'tools/ds_customizer',
                                     'tools/write-ssh-key-fingerprints']),
     (USR + '/share/bash-completion/completions',
      ['bash_completion/cloud-init']),
diff --git a/systemd/cloud-init-generator.tmpl b/systemd/cloud-init-generator.tmpl
index 45efa24..d72a412 100755
--- a/systemd/cloud-init-generator.tmpl
+++ b/systemd/cloud-init-generator.tmpl
@@ -105,6 +105,22 @@ check_for_datasource() {
     return 1
 }
 
+datasource_customizer() {
+    dsfound="/run/cloud-init/dsfound"
+    if [ ! -e "${dsfound}" ]; then
+        debug 1 "datasource_customier not enabled, ${dsfound} not present."
+        return 0
+    fi
+    for ds in $(ls -1 /run/cloud-init/dsfound); do
+        customizer="/usr/lib/cloud-init/ds_customizer"
+        if [ -e "${customizer}" ]; then
+            debug 1 "Running datsource_customizer $ds"
+            ${customizer} $ds
+        fi
+    done
+    return 0
+}
+
 main() {
     local normal_d="$1" early_d="$2" late_d="$3"
     local target_name="multi-user.target" gen_d="$early_d"
@@ -154,6 +170,8 @@ main() {
             fi
         fi
         : > "$RUN_ENABLED_FILE"
+        debug 1 "emitting datasource customization"
+        datasource_customizer
     elif [ "$result" = "$DISABLE" ]; then
         if [ -f "$link_path" ]; then
             if rm -f "$link_path"; then
diff --git a/systemd/cloud-init-local.service.tmpl b/systemd/cloud-init-local.service.tmpl
index ff9c644..9946954 100644
--- a/systemd/cloud-init-local.service.tmpl
+++ b/systemd/cloud-init-local.service.tmpl
@@ -17,6 +17,7 @@ RequiresMountsFor=/var/lib/cloud
 
 [Service]
 Type=oneshot
+ExecStartPre=/bin/echo "Starting cloud-init-local"
 ExecStart=/usr/bin/cloud-init init --local
 ExecStart=/bin/touch /run/cloud-init/network-config-ready
 RemainAfterExit=yes
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index 3547dd9..dd85c85 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -163,18 +163,18 @@ class TestGetMetadataFromIMDS(HttprettyTestCase):
 
     @mock.patch(MOCKPATH + 'readurl')
     @mock.patch(MOCKPATH + 'EphemeralDHCPv4')
-    @mock.patch(MOCKPATH + 'net.is_up')
+    @mock.patch(MOCKPATH + 'net.wait_online')
     def test_get_metadata_does_not_dhcp_if_network_is_up(
             self, m_net_is_up, m_dhcp, m_readurl):
         """Do not perform DHCP setup when nic is already up."""
-        m_net_is_up.return_value = True
+        m_net_is_up.return_value = (True,)
         m_readurl.return_value = url_helper.StringResponse(
             json.dumps(NETWORK_METADATA).encode('utf-8'))
         self.assertEqual(
             NETWORK_METADATA,
             dsaz.get_metadata_from_imds('eth9', retries=3))
 
-        m_net_is_up.assert_called_with('eth9')
+        m_net_is_up.assert_called_with([])
         m_dhcp.assert_not_called()
         self.assertIn(
             "Crawl of Azure Instance Metadata Service (IMDS) took",  # log_time
@@ -182,11 +182,11 @@ class TestGetMetadataFromIMDS(HttprettyTestCase):
 
     @mock.patch(MOCKPATH + 'readurl')
     @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting')
-    @mock.patch(MOCKPATH + 'net.is_up')
+    @mock.patch(MOCKPATH + 'net.wait_online')
     def test_get_metadata_performs_dhcp_when_network_is_down(
             self, m_net_is_up, m_dhcp, m_readurl):
         """Perform DHCP setup when nic is not up."""
-        m_net_is_up.return_value = False
+        m_net_is_up.return_value = (False,)
         m_readurl.return_value = url_helper.StringResponse(
             json.dumps(NETWORK_METADATA).encode('utf-8'))
 
@@ -194,7 +194,7 @@ class TestGetMetadataFromIMDS(HttprettyTestCase):
             NETWORK_METADATA,
             dsaz.get_metadata_from_imds('eth9', retries=2))
 
-        m_net_is_up.assert_called_with('eth9')
+        m_net_is_up.assert_called_with([])
         m_dhcp.assert_called_with(mock.ANY, 'eth9')
         self.assertIn(
             "Crawl of Azure Instance Metadata Service (IMDS) took",  # log_time
@@ -206,7 +206,7 @@ class TestGetMetadataFromIMDS(HttprettyTestCase):
             timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS)
 
     @mock.patch('cloudinit.url_helper.time.sleep')
-    @mock.patch(MOCKPATH + 'net.is_up')
+    @mock.patch(MOCKPATH + 'net.wait_online')
     def test_get_metadata_from_imds_empty_when_no_imds_present(
             self, m_net_is_up, m_sleep):
         """Return empty dict when IMDS network metadata is absent."""
@@ -215,11 +215,11 @@ class TestGetMetadataFromIMDS(HttprettyTestCase):
             dsaz.IMDS_URL + 'instance?api-version=2017-12-01',
             body={}, status=404)
 
-        m_net_is_up.return_value = True  # skips dhcp
+        m_net_is_up.return_value = (True,)  # skips dhcp
 
         self.assertEqual({}, dsaz.get_metadata_from_imds('eth9', retries=2))
 
-        m_net_is_up.assert_called_with('eth9')
+        m_net_is_up.assert_called_with([])
         self.assertEqual([mock.call(1), mock.call(1)], m_sleep.call_args_list)
         self.assertIn(
             "Crawl of Azure Instance Metadata Service (IMDS) took",  # log_time
@@ -227,7 +227,7 @@ class TestGetMetadataFromIMDS(HttprettyTestCase):
 
     @mock.patch('requests.Session.request')
     @mock.patch('cloudinit.url_helper.time.sleep')
-    @mock.patch(MOCKPATH + 'net.is_up')
+    @mock.patch(MOCKPATH + 'net.wait_online')
     def test_get_metadata_from_imds_retries_on_timeout(
             self, m_net_is_up, m_sleep, m_request):
         """Retry IMDS network metadata on timeout errors."""
@@ -244,11 +244,11 @@ class TestGetMetadataFromIMDS(HttprettyTestCase):
             dsaz.IMDS_URL + 'instance?api-version=2017-12-01',
             body=retry_callback)
 
-        m_net_is_up.return_value = True  # skips dhcp
+        m_net_is_up.return_value = (True,)  # skips dhcp
 
         self.assertEqual({}, dsaz.get_metadata_from_imds('eth9', retries=3))
 
-        m_net_is_up.assert_called_with('eth9')
+        m_net_is_up.assert_called_with([])
         self.assertEqual([mock.call(1)]*3, m_sleep.call_args_list)
         self.assertIn(
             "Crawl of Azure Instance Metadata Service (IMDS) took",  # log_time
diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py
index a731f1e..796e374 100644
--- a/tests/unittests/test_datasource/test_openstack.py
+++ b/tests/unittests/test_datasource/test_openstack.py
@@ -251,9 +251,10 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
         m_dhcp.assert_not_called()
 
     @hp.activate
+    @test_helpers.mock.patch('cloudinit.net.wait_online')
     @test_helpers.mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network')
     @test_helpers.mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
-    def test_local_datasource(self, m_dhcp, m_net):
+    def test_local_datasource(self, m_dhcp, m_net, m_wait_on):
         """OpenStackLocal calls EphemeralDHCPNetwork and gets instance data."""
         _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES)
         ds_os_local = ds.DataSourceOpenStackLocal(
@@ -263,6 +264,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
             'interface': 'eth9', 'fixed-address': '192.168.2.9',
             'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
             'broadcast-address': '192.168.2.255'}]
+        m_wait_on.return_value = (False,)
 
         self.assertIsNone(ds_os_local.version)
         mock_path = MOCK_PATH + 'detect_openstack'
@@ -282,6 +284,36 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
         self.assertIsNone(ds_os_local.vendordata_raw)
         m_dhcp.assert_called_with('eth9')
 
+    @hp.activate
+    @test_helpers.mock.patch('cloudinit.net.wait_online')
+    @test_helpers.mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network')
+    @test_helpers.mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
+    def test_local_datasource_net_up(self, m_dhcp, m_net, m_wait_on):
+        """OpenStackLocal skips dhcp when net.is_up and gets instance data."""
+        _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES)
+        ds_os_local = ds.DataSourceOpenStackLocal(
+            settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}))
+        ds_os_local._fallback_interface = 'eth9'  # Monkey patch for dhcp
+        m_wait_on.return_value = (True,)
+
+        self.assertIsNone(ds_os_local.version)
+        mock_path = MOCK_PATH + 'detect_openstack'
+        with test_helpers.mock.patch(mock_path) as m_detect_os:
+            m_detect_os.return_value = True
+            found = ds_os_local.get_data()
+        self.assertTrue(found)
+        self.assertEqual(2, ds_os_local.version)
+        md = dict(ds_os_local.metadata)
+        md.pop('instance-id', None)
+        md.pop('local-hostname', None)
+        self.assertEqual(OSTACK_META, md)
+        self.assertEqual(EC2_META, ds_os_local.ec2_metadata)
+        self.assertEqual(USER_DATA, ds_os_local.userdata_raw)
+        self.assertEqual(2, len(ds_os_local.files))
+        self.assertEqual(VENDOR_DATA, ds_os_local.vendordata_pure)
+        self.assertIsNone(ds_os_local.vendordata_raw)
+        self.assertEqual(0, m_dhcp.call_count)
+
     def test_bad_datasource_meta(self):
         os_files = copy.deepcopy(OS_FILES)
         for k in list(os_files.keys()):
diff --git a/tests/unittests/test_handler/test_handler_mounts.py b/tests/unittests/test_handler/test_handler_mounts.py
index 0fb160b..93b360d 100644
--- a/tests/unittests/test_handler/test_handler_mounts.py
+++ b/tests/unittests/test_handler/test_handler_mounts.py
@@ -255,7 +255,8 @@ class TestFstabHandling(test_helpers.FilesystemMockingTestCase):
             self.assertEqual(fstab_expected_content, fstab_new_content)
         cc_mounts.handle(None, cc, self.mock_cloud, self.mock_log, [])
         self.m_util_subp.assert_has_calls([
+            mock.call(['systemctl', 'daemon-reload']),
             mock.call(['mount', '-a']),
-            mock.call(['systemctl', 'daemon-reload'])])
+        ])
 
 # vi: ts=4 expandtab
diff --git a/tests/unittests/test_runs/test_simple_run.py b/tests/unittests/test_runs/test_simple_run.py
index d67c422..8d6061d 100644
--- a/tests/unittests/test_runs/test_simple_run.py
+++ b/tests/unittests/test_runs/test_simple_run.py
@@ -1,6 +1,7 @@
 # This file is part of cloud-init. See LICENSE file for license information.
 
 import copy
+import mock
 import os
 
 
@@ -178,4 +179,88 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase):
         self.assertTrue(len(failures) == 0)
         self.assertEqual([], which_ran)
 
+
+class TestEarlySshRun(helpers.FilesystemMockingTestCase):
+
+    with_logs = True
+    maxDiff = None
+
+    def setUp(self):
+        super(TestEarlySshRun, self).setUp()
+        self.new_root = self.tmp_dir()
+        self.replicateTestRoot('simple_ubuntu', self.new_root)
+        self.cfg = {
+            'datasource_list': ['None'],
+            'system_info': {'paths': {'run_dir': self.new_root}},
+            'bootcmd': ['ls /etc'],  # test ALL_DISTROS
+            'write_files': [
+                {
+                    'path': '/etc/blah.ini',
+                    'content': 'blah',
+                    'permissions': 0o755,
+                },
+            ],
+            'cloud_init_modules': ['migrator', 'seed_random', 'bootcmd',
+                                   'write-files', 'growpart', 'users-groups',
+                                   'ssh', 'ssh_host_keys'],
+        }
+        cloud_cfg = util.yaml_dumps(self.cfg)
+        util.ensure_dir(os.path.join(self.new_root, 'etc', 'cloud'))
+        util.write_file(os.path.join(self.new_root, 'etc',
+                                     'cloud', 'cloud.cfg'), cloud_cfg)
+        self.patchOS(self.new_root)
+        self.patchUtils(self.new_root)
+
+    @mock.patch('cloudinit.config.cc_bootcmd.handle')
+    def test_modules_keep_original_order_if_cfg_present(self, m_bh):
+        """Allow module reordering if possible.  """
+        initer = stages.Init()
+        initer.read_cfg()
+        initer.initialize()
+        initer.fetch()
+        initer.instancify()
+        initer.update()
+        initer.cloudify().run('consume_data', initer.consume_data,
+                              args=[PER_INSTANCE], freq=PER_INSTANCE)
+
+        mods = stages.Modules(initer)
+        (which_ran, failures) = mods.run_section('cloud_init_modules')
+        print(failures)
+        print(self.logs.getvalue())
+        self.assertTrue(len(failures) == 0)
+        self.assertEqual(self.cfg['cloud_init_modules'], which_ran)
+
+    def test_modules_reoder_for_early_ssh(self):
+        """Allow module reordering if possible.  """
+        # re-write cloud.cfg with different cloud config
+        cfg = copy.deepcopy(self.cfg)
+        del cfg['bootcmd']
+        del cfg['write_files']
+        cfg['ssh_early_start'] = True
+        cloud_cfg = util.yaml_dumps(cfg)
+        util.ensure_dir(os.path.join(self.new_root, 'etc', 'cloud'))
+        util.write_file(os.path.join(self.new_root, 'etc',
+                                     'cloud', 'cloud.cfg'), cloud_cfg)
+
+        initer = stages.Init()
+        initer.read_cfg()
+        initer.initialize()
+        initer.fetch()
+        initer.instancify()
+        initer.update()
+        initer.cloudify().run('consume_data', initer.consume_data,
+                              args=[PER_INSTANCE], freq=PER_INSTANCE)
+
+        mods = stages.Modules(initer)
+        (which_ran, failures) = mods.run_section('cloud_init_modules')
+        # early ssh pushes some modules up
+        expected_order = [
+            'migrator', 'seed_random', 'ssh_host_keys', 'users-groups',
+            'ssh', 'bootcmd', 'write-files', 'growpart']
+        print(failures)
+        print(self.logs.getvalue())
+        self.assertTrue(len(failures) == 0)
+        self.assertEqual(expected_order, which_ran)
+
+
 # vi: ts=4 expandtab
diff --git a/tools/ds-identify b/tools/ds-identify
index e0d4865..d5784e5 100755
--- a/tools/ds-identify
+++ b/tools/ds-identify
@@ -1224,11 +1224,18 @@ record_notfound() {
 found() {
     # found(ds1, [ds2 ...], [-- [extra lines]])
     local list="" ds=""
+    local dsmarker="${PATH_RUN_CI}/dsfound"
+    if [ "$DI_MODE" != "report" ]; then
+        mkdir -p "${dsmarker}"
+    fi
     while [ $# -ne 0 ]; do
         if [ "$1" = "--" ]; then
             shift
             break
         fi
+        if [ "$DI_MODE" != "report" ]; then
+           touch "${dsmarker}/$1"
+        fi
         list="${list:+${list}, }$1"
         shift
     done
diff --git a/tools/ds_customizer b/tools/ds_customizer
new file mode 100755
index 0000000..2c85a94
--- /dev/null
+++ b/tools/ds_customizer
@@ -0,0 +1,93 @@
+#!/bin/sh
+
+early_networking() {
+# dhcp on all ethernets netplan style
+    if command -v netplan; then
+        CI_NETPLAN="/run/netplan/10-cloud-init-dhcp.yaml"
+        mkdir -p /run/netplan
+        cat >"${CI_NETPLAN}" << EOF
+# This file is generated when cloud-init is enable to bring up networking
+# as fast as possible.  Cloud-init will render the complete instance
+# network configuration later in boot.
+network:
+  ethernets:
+    en_all:
+      dhcp4: true
+      match:
+        name: e*
+      optional: true
+  version: 2
+EOF
+        netplan generate
+
+        # start networkd early, before cloud-init-local
+        SDNET_CONFD="/run/systemd/generator.early/systemd-networkd.service.d"
+        SDNET_EARLY_NETWORKD="${SDNET_CONFD}/override.conf"
+        mkdir -p $SDNET_CONFD
+        cat >"${SDNET_EARLY_NETWORKD}" << EOF
+# generated by cloud-init-generator - ds_customizer
+# NOTE: this does not work, BUG lp:#####
+[Unit]
+DefaultDependencies=no
+After=
+After=systemd-udevd.service local-fs.target
+Before=
+Before=network-pre.target
+Wants=
+Wants=network-pre.target
+[Service]
+ExecStartPre=/bin/echo "WARK: Starting networkd"
+EOF
+        # modify vendor file with our edited version into generator.early
+        LIB_NETD="/lib/systemd/system/systemd-networkd.service"
+        RUN_NETD="/run/systemd/generator.early/systemd-networkd.service"
+        sed -e 's,After=.*,After=systemd-udevd.service,g' \
+            -e 's,Before=.*,Before=network-pre.target multi-user.target shutdown.target,g' \
+            -e 's,Wants=.*,Wants=network-pre.target,g' ${LIB_NETD} > ${RUN_NETD}
+
+        # Start ssh early, after networkd and right before cloud-init-local
+        CI_CONFD="/run/systemd/generator.early/cloud-init-local.service.d"
+        mkdir -p $CI_CONFD
+        CI_EARLY_NETWORKD="${CI_CONFD}/10-start-networkd.conf"
+        cat >"${CI_EARLY_NETWORKD}" << EOF
+# generated by cloud-init-generator - ds_customizer
+[Unit]
+After=systemd-networkd.service
+EOF
+
+        CI_EARLY_SSH="${CI_CONFD}/20-start-ssh.conf"
+        cat >"${CI_EARLY_SSH}" << EOF
+# generated by cloud-init-generator - ds_customizer
+[Service]
+ExecStartPre=/bin/echo "20-start-ssh.conf"
+ExecStartPre=/bin/sh -c '[ -n "\$(ls /etc/ssh/ssh_host*)" ] && for svc in ssh.service systemd-user-sessions.service systemd-logind.service; do systemctl --job-mode=ignore-dependencies --no-block start \$svc; done; exit 0;'
+EOF
+    fi
+
+    # dhcp on all the e* nics for that old school cool with eni
+    if command -v ifup; then
+        if [ -d /etc/network/interfaces.d ]; then
+            CI_ENI="/etc/network/10-cloud-init-dhcp.cfg"
+            cat >"${CI_ENI}" << EOF
+# This file is generated when cloud-init is enable to bring up networking
+# as fast as possible.  Cloud-init will render the complete instance
+# network configuration later in boot.
+auto lo
+iface lo inet loopback
+
+EOF
+            for iface in $(ls -1 /sys/class/net/ | grep ^e); do
+                echo -e "auto $iface\niface $iface inet dhcp\n" >> $CI_ENI
+            done
+        fi
+    fi
+}
+
+DS=${1};
+case $DS in
+   Azure|NoCloud|OpenStack|ConfigDrive)
+      early_networking
+   ;;
+   *) echo "No customizations for Datasource $DS";;
+esac
+exit 0

References