← Back to team overview

cloud-init-dev team mailing list archive

[Merge] ~daniel-thewatkins/cloud-init/+git/cloud-init:ubuntu/devel into cloud-init:ubuntu/devel

 

Dan Watkins has proposed merging ~daniel-thewatkins/cloud-init/+git/cloud-init:ubuntu/devel into cloud-init:ubuntu/devel.

Requested reviews:
  cloud-init commiters (cloud-init-dev)
Related bugs:
  Bug #1838794 in cloud-init: "Set Passwords documentation describes incorrect behaviour for `password` config key"
  https://bugs.launchpad.net/cloud-init/+bug/1838794

For more details, see:
https://code.launchpad.net/~daniel-thewatkins/cloud-init/+git/cloud-init/+merge/371135
-- 
Your team cloud-init commiters is requested to review the proposed merge of ~daniel-thewatkins/cloud-init/+git/cloud-init:ubuntu/devel into cloud-init:ubuntu/devel.
diff --git a/cloudinit/apport.py b/cloudinit/apport.py
index 22cb7fd..003ff1f 100644
--- a/cloudinit/apport.py
+++ b/cloudinit/apport.py
@@ -23,6 +23,7 @@ KNOWN_CLOUD_NAMES = [
     'CloudStack',
     'DigitalOcean',
     'GCE - Google Compute Engine',
+    'Exoscale',
     'Hetzner Cloud',
     'IBM - (aka SoftLayer or BlueMix)',
     'LXD',
diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py
index 4585e4d..cf9b5ab 100755
--- a/cloudinit/config/cc_set_passwords.py
+++ b/cloudinit/config/cc_set_passwords.py
@@ -9,27 +9,40 @@
 """
 Set Passwords
 -------------
-**Summary:** Set user passwords
-
-Set system passwords and enable or disable ssh password authentication.
-The ``chpasswd`` config key accepts a dictionary containing a single one of two
-keys, either ``expire`` or ``list``. If ``expire`` is specified and is set to
-``false``, then the ``password`` global config key is used as the password for
-all user accounts. If the ``expire`` key is specified and is set to ``true``
-then user passwords will be expired, preventing the default system passwords
-from being used.
-
-If the ``list`` key is provided, a list of
-``username:password`` pairs can be specified. The usernames specified
-must already exist on the system, or have been created using the
-``cc_users_groups`` module. A password can be randomly generated using
-``username:RANDOM`` or ``username:R``. A hashed password can be specified
-using ``username:$6$salt$hash``. Password ssh authentication can be
-enabled, disabled, or left to system defaults using ``ssh_pwauth``.
+**Summary:** Set user passwords and enable/disable SSH password authentication
+
+This module consumes three top-level config keys: ``ssh_pwauth``, ``chpasswd``
+and ``password``.
+
+The ``ssh_pwauth`` config key determines whether or not sshd will be configured
+to accept password authentication.  True values will enable password auth,
+false values will disable password auth, and the literal string ``unchanged``
+will leave it unchanged.  Setting no value will also leave the current setting
+on-disk unchanged.
+
+The ``chpasswd`` config key accepts a dictionary containing either or both of
+``expire`` and ``list``.
+
+If the ``list`` key is provided, it should contain a list of
+``username:password`` pairs.  This can be either a YAML list (of strings), or a
+multi-line string with one pair per line.  Each user will have the
+corresponding password set.  A password can be randomly generated by specifying
+``RANDOM`` or ``R`` as a user's password.  A hashed password, created by a tool
+like ``mkpasswd``, can be specified; a regex
+(``r'\\$(1|2a|2y|5|6)(\\$.+){2}'``) is used to determine if a password value
+should be treated as a hash.
 
 .. note::
-    if using ``expire: true`` then a ssh authkey should be specified or it may
-    not be possible to login to the system
+    The users specified must already exist on the system.  Users will have been
+    created by the ``cc_users_groups`` module at this point.
+
+By default, all users on the system will have their passwords expired (meaning
+that they will have to be reset the next time the user logs in).  To disable
+this behaviour, set ``expire`` under ``chpasswd`` to a false value.
+
+If a ``list`` of user/password pairs is not specified under ``chpasswd``, then
+the value of the ``password`` config key will be used to set the default user's
+password.
 
 **Internal name:** ``cc_set_passwords``
 
@@ -160,6 +173,8 @@ def handle(_name, cfg, cloud, log, args):
         hashed_users = []
         randlist = []
         users = []
+        # N.B. This regex is included in the documentation (i.e. the module
+        # docstring), so any changes to it should be reflected there.
         prog = re.compile(r'\$(1|2a|2y|5|6)(\$.+){2}')
         for line in plist:
             u, p = line.split(':', 1)
diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py
index f8f7cb3..53f6939 100755
--- a/cloudinit/config/cc_ssh.py
+++ b/cloudinit/config/cc_ssh.py
@@ -91,6 +91,9 @@ public keys.
     ssh_authorized_keys:
         - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEA3FSyQwBI6Z+nCSjUU ...
         - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZ ...
+    ssh_publish_hostkeys:
+        enabled: <true/false> (Defaults to true)
+        blacklist: <list of key types> (Defaults to [dsa])
 """
 
 import glob
@@ -104,6 +107,10 @@ from cloudinit import util
 
 GENERATE_KEY_NAMES = ['rsa', 'dsa', 'ecdsa', 'ed25519']
 KEY_FILE_TPL = '/etc/ssh/ssh_host_%s_key'
+PUBLISH_HOST_KEYS = True
+# Don't publish the dsa hostkey by default since OpenSSH recommends not using
+# it.
+HOST_KEY_PUBLISH_BLACKLIST = ['dsa']
 
 CONFIG_KEY_TO_FILE = {}
 PRIV_TO_PUB = {}
@@ -176,6 +183,23 @@ def handle(_name, cfg, cloud, log, _args):
                         util.logexc(log, "Failed generating key type %s to "
                                     "file %s", keytype, keyfile)
 
+    if "ssh_publish_hostkeys" in cfg:
+        host_key_blacklist = util.get_cfg_option_list(
+            cfg["ssh_publish_hostkeys"], "blacklist",
+            HOST_KEY_PUBLISH_BLACKLIST)
+        publish_hostkeys = util.get_cfg_option_bool(
+            cfg["ssh_publish_hostkeys"], "enabled", PUBLISH_HOST_KEYS)
+    else:
+        host_key_blacklist = HOST_KEY_PUBLISH_BLACKLIST
+        publish_hostkeys = PUBLISH_HOST_KEYS
+
+    if publish_hostkeys:
+        hostkeys = get_public_host_keys(blacklist=host_key_blacklist)
+        try:
+            cloud.datasource.publish_host_keys(hostkeys)
+        except Exception as e:
+            util.logexc(log, "Publishing host keys failed!")
+
     try:
         (users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
         (user, _user_config) = ug_util.extract_default(users)
@@ -209,4 +233,35 @@ def apply_credentials(keys, user, disable_root, disable_root_opts):
 
     ssh_util.setup_user_keys(keys, 'root', options=key_prefix)
 
+
+def get_public_host_keys(blacklist=None):
+    """Read host keys from /etc/ssh/*.pub files and return them as a list.
+
+    @param blacklist: List of key types to ignore. e.g. ['dsa', 'rsa']
+    @returns: List of keys, each formatted as a two-element tuple.
+        e.g. [('ssh-rsa', 'AAAAB3Nz...'), ('ssh-ed25519', 'AAAAC3Nx...')]
+    """
+    public_key_file_tmpl = '%s.pub' % (KEY_FILE_TPL,)
+    key_list = []
+    blacklist_files = []
+    if blacklist:
+        # Convert blacklist to filenames:
+        # 'dsa' -> '/etc/ssh/ssh_host_dsa_key.pub'
+        blacklist_files = [public_key_file_tmpl % (key_type,)
+                           for key_type in blacklist]
+    # Get list of public key files and filter out blacklisted files.
+    file_list = [hostfile for hostfile
+                 in glob.glob(public_key_file_tmpl % ('*',))
+                 if hostfile not in blacklist_files]
+
+    # Read host key files, retrieve first two fields as a tuple and
+    # append that tuple to key_list.
+    for file_name in file_list:
+        file_contents = util.load_file(file_name)
+        key_data = file_contents.split()
+        if key_data and len(key_data) > 1:
+            key_list.append(tuple(key_data[:2]))
+    return key_list
+
+
 # vi: ts=4 expandtab
diff --git a/cloudinit/config/tests/test_ssh.py b/cloudinit/config/tests/test_ssh.py
index c8a4271..e778984 100644
--- a/cloudinit/config/tests/test_ssh.py
+++ b/cloudinit/config/tests/test_ssh.py
@@ -1,5 +1,6 @@
 # This file is part of cloud-init. See LICENSE file for license information.
 
+import os.path
 
 from cloudinit.config import cc_ssh
 from cloudinit import ssh_util
@@ -12,6 +13,25 @@ MODPATH = "cloudinit.config.cc_ssh."
 class TestHandleSsh(CiTestCase):
     """Test cc_ssh handling of ssh config."""
 
+    def _publish_hostkey_test_setup(self):
+        self.test_hostkeys = {
+            'dsa': ('ssh-dss', 'AAAAB3NzaC1kc3MAAACB'),
+            'ecdsa': ('ecdsa-sha2-nistp256', 'AAAAE2VjZ'),
+            'ed25519': ('ssh-ed25519', 'AAAAC3NzaC1lZDI'),
+            'rsa': ('ssh-rsa', 'AAAAB3NzaC1yc2EAAA'),
+        }
+        self.test_hostkey_files = []
+        hostkey_tmpdir = self.tmp_dir()
+        for key_type in ['dsa', 'ecdsa', 'ed25519', 'rsa']:
+            key_data = self.test_hostkeys[key_type]
+            filename = 'ssh_host_%s_key.pub' % key_type
+            filepath = os.path.join(hostkey_tmpdir, filename)
+            self.test_hostkey_files.append(filepath)
+            with open(filepath, 'w') as f:
+                f.write(' '.join(key_data))
+
+        cc_ssh.KEY_FILE_TPL = os.path.join(hostkey_tmpdir, 'ssh_host_%s_key')
+
     def test_apply_credentials_with_user(self, m_setup_keys):
         """Apply keys for the given user and root."""
         keys = ["key1"]
@@ -64,6 +84,7 @@ class TestHandleSsh(CiTestCase):
         # Mock os.path.exits to True to short-circuit the key writing logic
         m_path_exists.return_value = True
         m_nug.return_value = ([], {})
+        cc_ssh.PUBLISH_HOST_KEYS = False
         cloud = self.tmp_cloud(
             distro='ubuntu', metadata={'public-keys': keys})
         cc_ssh.handle("name", cfg, cloud, None, None)
@@ -149,3 +170,148 @@ class TestHandleSsh(CiTestCase):
         self.assertEqual([mock.call(set(keys), user),
                           mock.call(set(keys), "root", options="")],
                          m_setup_keys.call_args_list)
+
+    @mock.patch(MODPATH + "glob.glob")
+    @mock.patch(MODPATH + "ug_util.normalize_users_groups")
+    @mock.patch(MODPATH + "os.path.exists")
+    def test_handle_publish_hostkeys_default(
+            self, m_path_exists, m_nug, m_glob, m_setup_keys):
+        """Test handle with various configs for ssh_publish_hostkeys."""
+        self._publish_hostkey_test_setup()
+        cc_ssh.PUBLISH_HOST_KEYS = True
+        keys = ["key1"]
+        user = "clouduser"
+        # Return no matching keys for first glob, test keys for second.
+        m_glob.side_effect = iter([
+                                  [],
+                                  self.test_hostkey_files,
+                                  ])
+        # Mock os.path.exits to True to short-circuit the key writing logic
+        m_path_exists.return_value = True
+        m_nug.return_value = ({user: {"default": user}}, {})
+        cloud = self.tmp_cloud(
+            distro='ubuntu', metadata={'public-keys': keys})
+        cloud.datasource.publish_host_keys = mock.Mock()
+
+        cfg = {}
+        expected_call = [self.test_hostkeys[key_type] for key_type
+                         in ['ecdsa', 'ed25519', 'rsa']]
+        cc_ssh.handle("name", cfg, cloud, None, None)
+        self.assertEqual([mock.call(expected_call)],
+                         cloud.datasource.publish_host_keys.call_args_list)
+
+    @mock.patch(MODPATH + "glob.glob")
+    @mock.patch(MODPATH + "ug_util.normalize_users_groups")
+    @mock.patch(MODPATH + "os.path.exists")
+    def test_handle_publish_hostkeys_config_enable(
+            self, m_path_exists, m_nug, m_glob, m_setup_keys):
+        """Test handle with various configs for ssh_publish_hostkeys."""
+        self._publish_hostkey_test_setup()
+        cc_ssh.PUBLISH_HOST_KEYS = False
+        keys = ["key1"]
+        user = "clouduser"
+        # Return no matching keys for first glob, test keys for second.
+        m_glob.side_effect = iter([
+                                  [],
+                                  self.test_hostkey_files,
+                                  ])
+        # Mock os.path.exits to True to short-circuit the key writing logic
+        m_path_exists.return_value = True
+        m_nug.return_value = ({user: {"default": user}}, {})
+        cloud = self.tmp_cloud(
+            distro='ubuntu', metadata={'public-keys': keys})
+        cloud.datasource.publish_host_keys = mock.Mock()
+
+        cfg = {'ssh_publish_hostkeys': {'enabled': True}}
+        expected_call = [self.test_hostkeys[key_type] for key_type
+                         in ['ecdsa', 'ed25519', 'rsa']]
+        cc_ssh.handle("name", cfg, cloud, None, None)
+        self.assertEqual([mock.call(expected_call)],
+                         cloud.datasource.publish_host_keys.call_args_list)
+
+    @mock.patch(MODPATH + "glob.glob")
+    @mock.patch(MODPATH + "ug_util.normalize_users_groups")
+    @mock.patch(MODPATH + "os.path.exists")
+    def test_handle_publish_hostkeys_config_disable(
+            self, m_path_exists, m_nug, m_glob, m_setup_keys):
+        """Test handle with various configs for ssh_publish_hostkeys."""
+        self._publish_hostkey_test_setup()
+        cc_ssh.PUBLISH_HOST_KEYS = True
+        keys = ["key1"]
+        user = "clouduser"
+        # Return no matching keys for first glob, test keys for second.
+        m_glob.side_effect = iter([
+                                  [],
+                                  self.test_hostkey_files,
+                                  ])
+        # Mock os.path.exits to True to short-circuit the key writing logic
+        m_path_exists.return_value = True
+        m_nug.return_value = ({user: {"default": user}}, {})
+        cloud = self.tmp_cloud(
+            distro='ubuntu', metadata={'public-keys': keys})
+        cloud.datasource.publish_host_keys = mock.Mock()
+
+        cfg = {'ssh_publish_hostkeys': {'enabled': False}}
+        cc_ssh.handle("name", cfg, cloud, None, None)
+        self.assertFalse(cloud.datasource.publish_host_keys.call_args_list)
+        cloud.datasource.publish_host_keys.assert_not_called()
+
+    @mock.patch(MODPATH + "glob.glob")
+    @mock.patch(MODPATH + "ug_util.normalize_users_groups")
+    @mock.patch(MODPATH + "os.path.exists")
+    def test_handle_publish_hostkeys_config_blacklist(
+            self, m_path_exists, m_nug, m_glob, m_setup_keys):
+        """Test handle with various configs for ssh_publish_hostkeys."""
+        self._publish_hostkey_test_setup()
+        cc_ssh.PUBLISH_HOST_KEYS = True
+        keys = ["key1"]
+        user = "clouduser"
+        # Return no matching keys for first glob, test keys for second.
+        m_glob.side_effect = iter([
+                                  [],
+                                  self.test_hostkey_files,
+                                  ])
+        # Mock os.path.exits to True to short-circuit the key writing logic
+        m_path_exists.return_value = True
+        m_nug.return_value = ({user: {"default": user}}, {})
+        cloud = self.tmp_cloud(
+            distro='ubuntu', metadata={'public-keys': keys})
+        cloud.datasource.publish_host_keys = mock.Mock()
+
+        cfg = {'ssh_publish_hostkeys': {'enabled': True,
+                                        'blacklist': ['dsa', 'rsa']}}
+        expected_call = [self.test_hostkeys[key_type] for key_type
+                         in ['ecdsa', 'ed25519']]
+        cc_ssh.handle("name", cfg, cloud, None, None)
+        self.assertEqual([mock.call(expected_call)],
+                         cloud.datasource.publish_host_keys.call_args_list)
+
+    @mock.patch(MODPATH + "glob.glob")
+    @mock.patch(MODPATH + "ug_util.normalize_users_groups")
+    @mock.patch(MODPATH + "os.path.exists")
+    def test_handle_publish_hostkeys_empty_blacklist(
+            self, m_path_exists, m_nug, m_glob, m_setup_keys):
+        """Test handle with various configs for ssh_publish_hostkeys."""
+        self._publish_hostkey_test_setup()
+        cc_ssh.PUBLISH_HOST_KEYS = True
+        keys = ["key1"]
+        user = "clouduser"
+        # Return no matching keys for first glob, test keys for second.
+        m_glob.side_effect = iter([
+                                  [],
+                                  self.test_hostkey_files,
+                                  ])
+        # Mock os.path.exits to True to short-circuit the key writing logic
+        m_path_exists.return_value = True
+        m_nug.return_value = ({user: {"default": user}}, {})
+        cloud = self.tmp_cloud(
+            distro='ubuntu', metadata={'public-keys': keys})
+        cloud.datasource.publish_host_keys = mock.Mock()
+
+        cfg = {'ssh_publish_hostkeys': {'enabled': True,
+                                        'blacklist': []}}
+        expected_call = [self.test_hostkeys[key_type] for key_type
+                         in ['dsa', 'ecdsa', 'ed25519', 'rsa']]
+        cc_ssh.handle("name", cfg, cloud, None, None)
+        self.assertEqual([mock.call(expected_call)],
+                         cloud.datasource.publish_host_keys.call_args_list)
diff --git a/cloudinit/settings.py b/cloudinit/settings.py
index b1ebaad..2060d81 100644
--- a/cloudinit/settings.py
+++ b/cloudinit/settings.py
@@ -39,6 +39,7 @@ CFG_BUILTIN = {
         'Hetzner',
         'IBMCloud',
         'Oracle',
+        'Exoscale',
         # At the end to act as a 'catch' when none of the above work...
         'None',
     ],
diff --git a/cloudinit/sources/DataSourceExoscale.py b/cloudinit/sources/DataSourceExoscale.py
new file mode 100644
index 0000000..52e7f6f
--- /dev/null
+++ b/cloudinit/sources/DataSourceExoscale.py
@@ -0,0 +1,258 @@
+# Author: Mathieu Corbin <mathieu.corbin@xxxxxxxxxxxx>
+# Author: Christopher Glass <christopher.glass@xxxxxxxxxxxx>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit import ec2_utils as ec2
+from cloudinit import log as logging
+from cloudinit import sources
+from cloudinit import url_helper
+from cloudinit import util
+
+LOG = logging.getLogger(__name__)
+
+METADATA_URL = "http://169.254.169.254";
+API_VERSION = "1.0"
+PASSWORD_SERVER_PORT = 8080
+
+URL_TIMEOUT = 10
+URL_RETRIES = 6
+
+EXOSCALE_DMI_NAME = "Exoscale"
+
+BUILTIN_DS_CONFIG = {
+    # We run the set password config module on every boot in order to enable
+    # resetting the instance's password via the exoscale console (and a
+    # subsequent instance reboot).
+    'cloud_config_modules': [["set-passwords", "always"]]
+}
+
+
+class DataSourceExoscale(sources.DataSource):
+
+    dsname = 'Exoscale'
+
+    def __init__(self, sys_cfg, distro, paths):
+        super(DataSourceExoscale, self).__init__(sys_cfg, distro, paths)
+        LOG.debug("Initializing the Exoscale datasource")
+
+        self.metadata_url = self.ds_cfg.get('metadata_url', METADATA_URL)
+        self.api_version = self.ds_cfg.get('api_version', API_VERSION)
+        self.password_server_port = int(
+            self.ds_cfg.get('password_server_port', PASSWORD_SERVER_PORT))
+        self.url_timeout = self.ds_cfg.get('timeout', URL_TIMEOUT)
+        self.url_retries = self.ds_cfg.get('retries', URL_RETRIES)
+
+        self.extra_config = BUILTIN_DS_CONFIG
+
+    def wait_for_metadata_service(self):
+        """Wait for the metadata service to be reachable."""
+
+        metadata_url = "{}/{}/meta-data/instance-id".format(
+            self.metadata_url, self.api_version)
+
+        url = url_helper.wait_for_url(
+            urls=[metadata_url],
+            max_wait=self.url_max_wait,
+            timeout=self.url_timeout,
+            status_cb=LOG.critical)
+
+        return bool(url)
+
+    def crawl_metadata(self):
+        """
+        Crawl the metadata service when available.
+
+        @returns: Dictionary of crawled metadata content.
+        """
+        metadata_ready = util.log_time(
+            logfunc=LOG.info,
+            msg='waiting for the metadata service',
+            func=self.wait_for_metadata_service)
+
+        if not metadata_ready:
+            return {}
+
+        return read_metadata(self.metadata_url, self.api_version,
+                             self.password_server_port, self.url_timeout,
+                             self.url_retries)
+
+    def _get_data(self):
+        """Fetch the user data, the metadata and the VM password
+        from the metadata service.
+
+        Please refer to the datasource documentation for details on how the
+        metadata server and password server are crawled.
+        """
+        if not self._is_platform_viable():
+            return False
+
+        data = util.log_time(
+            logfunc=LOG.debug,
+            msg='Crawl of metadata service',
+            func=self.crawl_metadata)
+
+        if not data:
+            return False
+
+        self.userdata_raw = data['user-data']
+        self.metadata = data['meta-data']
+        password = data.get('password')
+
+        password_config = {}
+        if password:
+            # Since we have a password, let's make sure we are allowed to use
+            # it by allowing ssh_pwauth.
+            # The password module's default behavior is to leave the
+            # configuration as-is in this regard, so that means it will either
+            # leave the password always disabled if no password is ever set, or
+            # leave the password login enabled if we set it once.
+            password_config = {
+                'ssh_pwauth': True,
+                'password': password,
+                'chpasswd': {
+                    'expire': False,
+                },
+            }
+
+        # builtin extra_config overrides password_config
+        self.extra_config = util.mergemanydict(
+            [self.extra_config, password_config])
+
+        return True
+
+    def get_config_obj(self):
+        return self.extra_config
+
+    def _is_platform_viable(self):
+        return util.read_dmi_data('system-product-name').startswith(
+            EXOSCALE_DMI_NAME)
+
+
+# Used to match classes to dependencies
+datasources = [
+    (DataSourceExoscale, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+]
+
+
+# Return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+    return sources.list_from_depends(depends, datasources)
+
+
+def get_password(metadata_url=METADATA_URL,
+                 api_version=API_VERSION,
+                 password_server_port=PASSWORD_SERVER_PORT,
+                 url_timeout=URL_TIMEOUT,
+                 url_retries=URL_RETRIES):
+    """Obtain the VM's password if set.
+
+    Once fetched the password is marked saved. Future calls to this method may
+    return empty string or 'saved_password'."""
+    password_url = "{}:{}/{}/".format(metadata_url, password_server_port,
+                                      api_version)
+    response = url_helper.read_file_or_url(
+        password_url,
+        ssl_details=None,
+        headers={"DomU_Request": "send_my_password"},
+        timeout=url_timeout,
+        retries=url_retries)
+    password = response.contents.decode('utf-8')
+    # the password is empty or already saved
+    # Note: the original metadata server would answer an additional
+    # 'bad_request' status, but the Exoscale implementation does not.
+    if password in ['', 'saved_password']:
+        return None
+    # save the password
+    url_helper.read_file_or_url(
+        password_url,
+        ssl_details=None,
+        headers={"DomU_Request": "saved_password"},
+        timeout=url_timeout,
+        retries=url_retries)
+    return password
+
+
+def read_metadata(metadata_url=METADATA_URL,
+                  api_version=API_VERSION,
+                  password_server_port=PASSWORD_SERVER_PORT,
+                  url_timeout=URL_TIMEOUT,
+                  url_retries=URL_RETRIES):
+    """Query the metadata server and return the retrieved data."""
+    crawled_metadata = {}
+    crawled_metadata['_metadata_api_version'] = api_version
+    try:
+        crawled_metadata['user-data'] = ec2.get_instance_userdata(
+            api_version,
+            metadata_url,
+            timeout=url_timeout,
+            retries=url_retries)
+        crawled_metadata['meta-data'] = ec2.get_instance_metadata(
+            api_version,
+            metadata_url,
+            timeout=url_timeout,
+            retries=url_retries)
+    except Exception as e:
+        util.logexc(LOG, "failed reading from metadata url %s (%s)",
+                    metadata_url, e)
+        return {}
+
+    try:
+        crawled_metadata['password'] = get_password(
+            api_version=api_version,
+            metadata_url=metadata_url,
+            password_server_port=password_server_port,
+            url_retries=url_retries,
+            url_timeout=url_timeout)
+    except Exception as e:
+        util.logexc(LOG, "failed to read from password server url %s:%s (%s)",
+                    metadata_url, password_server_port, e)
+
+    return crawled_metadata
+
+
+if __name__ == "__main__":
+    import argparse
+
+    parser = argparse.ArgumentParser(description='Query Exoscale Metadata')
+    parser.add_argument(
+        "--endpoint",
+        metavar="URL",
+        help="The url of the metadata service.",
+        default=METADATA_URL)
+    parser.add_argument(
+        "--version",
+        metavar="VERSION",
+        help="The version of the metadata endpoint to query.",
+        default=API_VERSION)
+    parser.add_argument(
+        "--retries",
+        metavar="NUM",
+        type=int,
+        help="The number of retries querying the endpoint.",
+        default=URL_RETRIES)
+    parser.add_argument(
+        "--timeout",
+        metavar="NUM",
+        type=int,
+        help="The time in seconds to wait before timing out.",
+        default=URL_TIMEOUT)
+    parser.add_argument(
+        "--password-port",
+        metavar="PORT",
+        type=int,
+        help="The port on which the password endpoint listens",
+        default=PASSWORD_SERVER_PORT)
+
+    args = parser.parse_args()
+
+    data = read_metadata(
+        metadata_url=args.endpoint,
+        api_version=args.version,
+        password_server_port=args.password_port,
+        url_timeout=args.timeout,
+        url_retries=args.retries)
+
+    print(util.json_dumps(data))
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py
index d816262..6cbfbba 100644
--- a/cloudinit/sources/DataSourceGCE.py
+++ b/cloudinit/sources/DataSourceGCE.py
@@ -18,10 +18,13 @@ LOG = logging.getLogger(__name__)
 MD_V1_URL = 'http://metadata.google.internal/computeMetadata/v1/'
 BUILTIN_DS_CONFIG = {'metadata_url': MD_V1_URL}
 REQUIRED_FIELDS = ('instance-id', 'availability-zone', 'local-hostname')
+GUEST_ATTRIBUTES_URL = ('http://metadata.google.internal/computeMetadata/'
+                        'v1/instance/guest-attributes')
+HOSTKEY_NAMESPACE = 'hostkeys'
+HEADERS = {'Metadata-Flavor': 'Google'}
 
 
 class GoogleMetadataFetcher(object):
-    headers = {'Metadata-Flavor': 'Google'}
 
     def __init__(self, metadata_address):
         self.metadata_address = metadata_address
@@ -32,7 +35,7 @@ class GoogleMetadataFetcher(object):
             url = self.metadata_address + path
             if is_recursive:
                 url += '/?recursive=True'
-            resp = url_helper.readurl(url=url, headers=self.headers)
+            resp = url_helper.readurl(url=url, headers=HEADERS)
         except url_helper.UrlError as exc:
             msg = "url %s raised exception %s"
             LOG.debug(msg, path, exc)
@@ -90,6 +93,10 @@ class DataSourceGCE(sources.DataSource):
         public_keys_data = self.metadata['public-keys-data']
         return _parse_public_keys(public_keys_data, self.default_user)
 
+    def publish_host_keys(self, hostkeys):
+        for key in hostkeys:
+            _write_host_key_to_guest_attributes(*key)
+
     def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False):
         # GCE has long FDQN's and has asked for short hostnames.
         return self.metadata['local-hostname'].split('.')[0]
@@ -103,6 +110,17 @@ class DataSourceGCE(sources.DataSource):
         return self.availability_zone.rsplit('-', 1)[0]
 
 
+def _write_host_key_to_guest_attributes(key_type, key_value):
+    url = '%s/%s/%s' % (GUEST_ATTRIBUTES_URL, HOSTKEY_NAMESPACE, key_type)
+    key_value = key_value.encode('utf-8')
+    resp = url_helper.readurl(url=url, data=key_value, headers=HEADERS,
+                              request_method='PUT', check_status=False)
+    if resp.ok():
+        LOG.debug('Wrote %s host key to guest attributes.',  key_type)
+    else:
+        LOG.debug('Unable to write %s host key to guest attributes.', key_type)
+
+
 def _has_expired(public_key):
     # Check whether an SSH key is expired. Public key input is a single SSH
     # public key in the GCE specific key format documented here:
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index c2baccd..a319322 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -491,6 +491,16 @@ class DataSource(object):
     def get_public_ssh_keys(self):
         return normalize_pubkey_data(self.metadata.get('public-keys'))
 
+    def publish_host_keys(self, hostkeys):
+        """Publish the public SSH host keys (found in /etc/ssh/*.pub).
+
+        @param hostkeys: List of host key tuples (key_type, key_value),
+            where key_type is the first field in the public key file
+            (e.g. 'ssh-rsa') and key_value is the key itself
+            (e.g. 'AAAAB3NzaC1y...').
+        """
+        pass
+
     def _remap_device(self, short_name):
         # LP: #611137
         # the metadata service may believe that devices are named 'sda'
diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py
index 0af0d9e..44ee61d 100644
--- a/cloudinit/url_helper.py
+++ b/cloudinit/url_helper.py
@@ -199,18 +199,19 @@ def _get_ssl_args(url, ssl_details):
 def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
             headers=None, headers_cb=None, ssl_details=None,
             check_status=True, allow_redirects=True, exception_cb=None,
-            session=None, infinite=False, log_req_resp=True):
+            session=None, infinite=False, log_req_resp=True,
+            request_method=None):
     url = _cleanurl(url)
     req_args = {
         'url': url,
     }
     req_args.update(_get_ssl_args(url, ssl_details))
     req_args['allow_redirects'] = allow_redirects
-    req_args['method'] = 'GET'
+    if not request_method:
+        request_method = 'POST' if data else 'GET'
+    req_args['method'] = request_method
     if timeout is not None:
         req_args['timeout'] = max(float(timeout), 0)
-    if data:
-        req_args['method'] = 'POST'
     # It doesn't seem like config
     # was added in older library versions (or newer ones either), thus we
     # need to manually do the retries if it wasn't...
diff --git a/debian/changelog b/debian/changelog
index 671dad7..2cda24c 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,15 @@
+cloud-init (19.2-9-g15584720-0ubuntu1) eoan; urgency=medium
+
+  * New upstream snapshot.
+    - Add support for publishing host keys to GCE guest attributes
+      [Rick Wright]
+    - New data source for the Exoscale.com cloud platform [Chris Glass]
+    - doc: remove intersphinx extension
+    - cc_set_passwords: rewrite documentation (LP: #1838794)
+  * d/cloud-init.templates: add Exoscale data source
+
+ -- Daniel Watkins <oddbloke@xxxxxxxxxx>  Fri, 09 Aug 2019 13:57:28 -0400
+
 cloud-init (19.2-5-g496aaa94-0ubuntu1) eoan; urgency=medium
 
   * New upstream snapshot.
diff --git a/debian/cloud-init.templates b/debian/cloud-init.templates
index ece53a0..e5efdad 100644
--- a/debian/cloud-init.templates
+++ b/debian/cloud-init.templates
@@ -1,8 +1,8 @@
 Template: cloud-init/datasources
 Type: multiselect
-Default: NoCloud, ConfigDrive, OpenNebula, DigitalOcean, Azure, AltCloud, OVF, MAAS, GCE, OpenStack, CloudSigma, SmartOS, Bigstep, Scaleway, AliYun, Ec2, CloudStack, Hetzner, IBMCloud, Oracle, None
-Choices-C: NoCloud, ConfigDrive, OpenNebula, DigitalOcean, Azure, AltCloud, OVF, MAAS, GCE, OpenStack, CloudSigma, SmartOS, Bigstep, Scaleway, AliYun, Ec2, CloudStack, Hetzner, IBMCloud, Oracle, None
-Choices: NoCloud: Reads info from /var/lib/cloud/seed only, ConfigDrive: Reads data from Openstack Config Drive, OpenNebula: read from OpenNebula context disk, DigitalOcean: reads data from Droplet datasource, Azure: read from MS Azure cdrom. Requires walinux-agent, AltCloud: config disks for RHEVm and vSphere, OVF: Reads data from OVF Transports, MAAS: Reads data from Ubuntu MAAS, GCE: google compute metadata service, OpenStack: native openstack metadata service, CloudSigma: metadata over serial for cloudsigma.com,  SmartOS: Read from SmartOS metadata service, Bigstep: Bigstep metadata service, Scaleway: Scaleway metadata service, AliYun: Alibaba metadata service, Ec2: reads data from EC2 Metadata service, CloudStack: Read from CloudStack metadata service, Hetzner: Hetzner Cloud, IBMCloud: IBM Cloud. Previously softlayer or bluemix., Oracle: Oracle Compute Infrastructure, None: Failsafe datasource
+Default: NoCloud, ConfigDrive, OpenNebula, DigitalOcean, Azure, AltCloud, OVF, MAAS, GCE, OpenStack, CloudSigma, SmartOS, Bigstep, Scaleway, AliYun, Ec2, CloudStack, Hetzner, IBMCloud, Oracle, Exoscale, None
+Choices-C: NoCloud, ConfigDrive, OpenNebula, DigitalOcean, Azure, AltCloud, OVF, MAAS, GCE, OpenStack, CloudSigma, SmartOS, Bigstep, Scaleway, AliYun, Ec2, CloudStack, Hetzner, IBMCloud, Oracle, Exoscale, None
+Choices: NoCloud: Reads info from /var/lib/cloud/seed only, ConfigDrive: Reads data from Openstack Config Drive, OpenNebula: read from OpenNebula context disk, DigitalOcean: reads data from Droplet datasource, Azure: read from MS Azure cdrom. Requires walinux-agent, AltCloud: config disks for RHEVm and vSphere, OVF: Reads data from OVF Transports, MAAS: Reads data from Ubuntu MAAS, GCE: google compute metadata service, OpenStack: native openstack metadata service, CloudSigma: metadata over serial for cloudsigma.com,  SmartOS: Read from SmartOS metadata service, Bigstep: Bigstep metadata service, Scaleway: Scaleway metadata service, AliYun: Alibaba metadata service, Ec2: reads data from EC2 Metadata service, CloudStack: Read from CloudStack metadata service, Hetzner: Hetzner Cloud, IBMCloud: IBM Cloud. Previously softlayer or bluemix., Oracle: Oracle Compute Infrastructure, Exoscale: Exoscale, None: Failsafe datasource
 Description: Which data sources should be searched?
  Cloud-init supports searching different "Data Sources" for information
  that it uses to configure a cloud instance.
diff --git a/doc/rtd/conf.py b/doc/rtd/conf.py
index 50eb05c..4174477 100644
--- a/doc/rtd/conf.py
+++ b/doc/rtd/conf.py
@@ -27,16 +27,11 @@ project = 'Cloud-Init'
 # Add any Sphinx extension module names here, as strings. They can be
 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
 extensions = [
-    'sphinx.ext.intersphinx',
     'sphinx.ext.autodoc',
     'sphinx.ext.autosectionlabel',
     'sphinx.ext.viewcode',
 ]
 
-intersphinx_mapping = {
-    'sphinx': ('http://sphinx.pocoo.org', None)
-}
-
 # The suffix of source filenames.
 source_suffix = '.rst'
 
diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst
index 648c606..2148cd5 100644
--- a/doc/rtd/topics/datasources.rst
+++ b/doc/rtd/topics/datasources.rst
@@ -155,6 +155,7 @@ Follow for more information.
    datasources/configdrive.rst
    datasources/digitalocean.rst
    datasources/ec2.rst
+   datasources/exoscale.rst
    datasources/maas.rst
    datasources/nocloud.rst
    datasources/opennebula.rst
diff --git a/doc/rtd/topics/datasources/exoscale.rst b/doc/rtd/topics/datasources/exoscale.rst
new file mode 100644
index 0000000..27aec9c
--- /dev/null
+++ b/doc/rtd/topics/datasources/exoscale.rst
@@ -0,0 +1,68 @@
+.. _datasource_exoscale:
+
+Exoscale
+========
+
+This datasource supports reading from the metadata server used on the
+`Exoscale platform <https://exoscale.com>`_.
+
+Use of the Exoscale datasource is recommended to benefit from new features of
+the Exoscale platform.
+
+The datasource relies on the availability of a compatible metadata server
+(``http://169.254.169.254`` is used by default) and its companion password
+server, reachable at the same address (by default on port 8080).
+
+Crawling of metadata
+--------------------
+
+The metadata service and password server are crawled slightly differently:
+
+ * The "metadata service" is crawled every boot.
+ * The password server is also crawled every boot (the Exoscale datasource
+   forces the password module to run with "frequency always").
+
+In the password server case, the following rules apply in order to enable the
+"restore instance password" functionality:
+
+ * If a password is returned by the password server, it is then marked "saved"
+   by the cloud-init datasource. Subsequent boots will skip setting the password
+   (the password server will return "saved_password").
+ * When the instance password is reset (via the Exoscale UI), the password
+   server will return the non-empty password at next boot, therefore causing
+   cloud-init to reset the instance's password.
+
+Configuration
+-------------
+
+Users of this datasource are discouraged from changing the default settings
+unless instructed to by Exoscale support.
+
+The following settings are available and can be set for the datasource in system
+configuration (in `/etc/cloud/cloud.cfg.d/`).
+
+The settings available are:
+
+ * **metadata_url**: The URL for the metadata service (defaults to
+   ``http://169.254.169.254``)
+ * **api_version**: The API version path on which to query the instance metadata
+   (defaults to ``1.0``)
+ * **password_server_port**: The port (on the metadata server) on which the
+   password server listens (defaults to ``8080``).
+ * **timeout**: the timeout value provided to urlopen for each individual http
+   request. (defaults to ``10``)
+ * **retries**: The number of retries that should be done for an http request
+   (defaults to ``6``)
+
+
+An example configuration with the default values is provided below:
+
+.. sourcecode:: yaml
+
+   datasource:
+     Exoscale:
+       metadata_url: "http://169.254.169.254";
+       api_version: "1.0"
+       password_server_port: 8080
+       timeout: 10
+       retries: 6
diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py
index 2a9cfb2..61a7a76 100644
--- a/tests/unittests/test_datasource/test_common.py
+++ b/tests/unittests/test_datasource/test_common.py
@@ -13,6 +13,7 @@ from cloudinit.sources import (
     DataSourceConfigDrive as ConfigDrive,
     DataSourceDigitalOcean as DigitalOcean,
     DataSourceEc2 as Ec2,
+    DataSourceExoscale as Exoscale,
     DataSourceGCE as GCE,
     DataSourceHetzner as Hetzner,
     DataSourceIBMCloud as IBMCloud,
@@ -53,6 +54,7 @@ DEFAULT_NETWORK = [
     CloudStack.DataSourceCloudStack,
     DSNone.DataSourceNone,
     Ec2.DataSourceEc2,
+    Exoscale.DataSourceExoscale,
     GCE.DataSourceGCE,
     MAAS.DataSourceMAAS,
     NoCloud.DataSourceNoCloudNet,
diff --git a/tests/unittests/test_datasource/test_exoscale.py b/tests/unittests/test_datasource/test_exoscale.py
new file mode 100644
index 0000000..350c330
--- /dev/null
+++ b/tests/unittests/test_datasource/test_exoscale.py
@@ -0,0 +1,203 @@
+# Author: Mathieu Corbin <mathieu.corbin@xxxxxxxxxxxx>
+# Author: Christopher Glass <christopher.glass@xxxxxxxxxxxx>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+from cloudinit import helpers
+from cloudinit.sources.DataSourceExoscale import (
+    API_VERSION,
+    DataSourceExoscale,
+    METADATA_URL,
+    get_password,
+    PASSWORD_SERVER_PORT,
+    read_metadata)
+from cloudinit.tests.helpers import HttprettyTestCase, mock
+
+import httpretty
+import requests
+
+
+TEST_PASSWORD_URL = "{}:{}/{}/".format(METADATA_URL,
+                                       PASSWORD_SERVER_PORT,
+                                       API_VERSION)
+
+TEST_METADATA_URL = "{}/{}/meta-data/".format(METADATA_URL,
+                                              API_VERSION)
+
+TEST_USERDATA_URL = "{}/{}/user-data".format(METADATA_URL,
+                                             API_VERSION)
+
+
+@httpretty.activate
+class TestDatasourceExoscale(HttprettyTestCase):
+
+    def setUp(self):
+        super(TestDatasourceExoscale, self).setUp()
+        self.tmp = self.tmp_dir()
+        self.password_url = TEST_PASSWORD_URL
+        self.metadata_url = TEST_METADATA_URL
+        self.userdata_url = TEST_USERDATA_URL
+
+    def test_password_saved(self):
+        """The password is not set when it is not found
+        in the metadata service."""
+        httpretty.register_uri(httpretty.GET,
+                               self.password_url,
+                               body="saved_password")
+        self.assertFalse(get_password())
+
+    def test_password_empty(self):
+        """No password is set if the metadata service returns
+        an empty string."""
+        httpretty.register_uri(httpretty.GET,
+                               self.password_url,
+                               body="")
+        self.assertFalse(get_password())
+
+    def test_password(self):
+        """The password is set to what is found in the metadata
+        service."""
+        expected_password = "p@ssw0rd"
+        httpretty.register_uri(httpretty.GET,
+                               self.password_url,
+                               body=expected_password)
+        password = get_password()
+        self.assertEqual(expected_password, password)
+
+    def test_get_data(self):
+        """The datasource conforms to expected behavior when supplied
+        full test data."""
+        path = helpers.Paths({'run_dir': self.tmp})
+        ds = DataSourceExoscale({}, None, path)
+        ds._is_platform_viable = lambda: True
+        expected_password = "p@ssw0rd"
+        expected_id = "12345"
+        expected_hostname = "myname"
+        expected_userdata = "#cloud-config"
+        httpretty.register_uri(httpretty.GET,
+                               self.userdata_url,
+                               body=expected_userdata)
+        httpretty.register_uri(httpretty.GET,
+                               self.password_url,
+                               body=expected_password)
+        httpretty.register_uri(httpretty.GET,
+                               self.metadata_url,
+                               body="instance-id\nlocal-hostname")
+        httpretty.register_uri(httpretty.GET,
+                               "{}local-hostname".format(self.metadata_url),
+                               body=expected_hostname)
+        httpretty.register_uri(httpretty.GET,
+                               "{}instance-id".format(self.metadata_url),
+                               body=expected_id)
+        self.assertTrue(ds._get_data())
+        self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config")
+        self.assertEqual(ds.metadata, {"instance-id": expected_id,
+                                       "local-hostname": expected_hostname})
+        self.assertEqual(ds.get_config_obj(),
+                         {'ssh_pwauth': True,
+                          'password': expected_password,
+                          'cloud_config_modules': [
+                              ["set-passwords", "always"]],
+                          'chpasswd': {
+                              'expire': False,
+                          }})
+
+    def test_get_data_saved_password(self):
+        """The datasource conforms to expected behavior when saved_password is
+        returned by the password server."""
+        path = helpers.Paths({'run_dir': self.tmp})
+        ds = DataSourceExoscale({}, None, path)
+        ds._is_platform_viable = lambda: True
+        expected_answer = "saved_password"
+        expected_id = "12345"
+        expected_hostname = "myname"
+        expected_userdata = "#cloud-config"
+        httpretty.register_uri(httpretty.GET,
+                               self.userdata_url,
+                               body=expected_userdata)
+        httpretty.register_uri(httpretty.GET,
+                               self.password_url,
+                               body=expected_answer)
+        httpretty.register_uri(httpretty.GET,
+                               self.metadata_url,
+                               body="instance-id\nlocal-hostname")
+        httpretty.register_uri(httpretty.GET,
+                               "{}local-hostname".format(self.metadata_url),
+                               body=expected_hostname)
+        httpretty.register_uri(httpretty.GET,
+                               "{}instance-id".format(self.metadata_url),
+                               body=expected_id)
+        self.assertTrue(ds._get_data())
+        self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config")
+        self.assertEqual(ds.metadata, {"instance-id": expected_id,
+                                       "local-hostname": expected_hostname})
+        self.assertEqual(ds.get_config_obj(),
+                         {'cloud_config_modules': [
+                             ["set-passwords", "always"]]})
+
+    def test_get_data_no_password(self):
+        """The datasource conforms to expected behavior when no password is
+        returned by the password server."""
+        path = helpers.Paths({'run_dir': self.tmp})
+        ds = DataSourceExoscale({}, None, path)
+        ds._is_platform_viable = lambda: True
+        expected_answer = ""
+        expected_id = "12345"
+        expected_hostname = "myname"
+        expected_userdata = "#cloud-config"
+        httpretty.register_uri(httpretty.GET,
+                               self.userdata_url,
+                               body=expected_userdata)
+        httpretty.register_uri(httpretty.GET,
+                               self.password_url,
+                               body=expected_answer)
+        httpretty.register_uri(httpretty.GET,
+                               self.metadata_url,
+                               body="instance-id\nlocal-hostname")
+        httpretty.register_uri(httpretty.GET,
+                               "{}local-hostname".format(self.metadata_url),
+                               body=expected_hostname)
+        httpretty.register_uri(httpretty.GET,
+                               "{}instance-id".format(self.metadata_url),
+                               body=expected_id)
+        self.assertTrue(ds._get_data())
+        self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config")
+        self.assertEqual(ds.metadata, {"instance-id": expected_id,
+                                       "local-hostname": expected_hostname})
+        self.assertEqual(ds.get_config_obj(),
+                         {'cloud_config_modules': [
+                             ["set-passwords", "always"]]})
+
+    @mock.patch('cloudinit.sources.DataSourceExoscale.get_password')
+    def test_read_metadata_when_password_server_unreachable(self, m_password):
+        """The read_metadata function returns partial results in case the
+        password server (only) is unreachable."""
+        expected_id = "12345"
+        expected_hostname = "myname"
+        expected_userdata = "#cloud-config"
+
+        m_password.side_effect = requests.Timeout('Fake Connection Timeout')
+        httpretty.register_uri(httpretty.GET,
+                               self.userdata_url,
+                               body=expected_userdata)
+        httpretty.register_uri(httpretty.GET,
+                               self.metadata_url,
+                               body="instance-id\nlocal-hostname")
+        httpretty.register_uri(httpretty.GET,
+                               "{}local-hostname".format(self.metadata_url),
+                               body=expected_hostname)
+        httpretty.register_uri(httpretty.GET,
+                               "{}instance-id".format(self.metadata_url),
+                               body=expected_id)
+
+        result = read_metadata()
+
+        self.assertIsNone(result.get("password"))
+        self.assertEqual(result.get("user-data").decode("utf-8"),
+                         expected_userdata)
+
+    def test_non_viable_platform(self):
+        """The datasource fails fast when the platform is not viable."""
+        path = helpers.Paths({'run_dir': self.tmp})
+        ds = DataSourceExoscale({}, None, path)
+        ds._is_platform_viable = lambda: False
+        self.assertFalse(ds._get_data())
diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py
index 41176c6..67744d3 100644
--- a/tests/unittests/test_datasource/test_gce.py
+++ b/tests/unittests/test_datasource/test_gce.py
@@ -55,6 +55,8 @@ GCE_USER_DATA_TEXT = {
 HEADERS = {'Metadata-Flavor': 'Google'}
 MD_URL_RE = re.compile(
     r'http://metadata.google.internal/computeMetadata/v1/.*')
+GUEST_ATTRIBUTES_URL = ('http://metadata.google.internal/computeMetadata/'
+                        'v1/instance/guest-attributes/hostkeys/')
 
 
 def _set_mock_metadata(gce_meta=None):
@@ -341,4 +343,20 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase):
             public_key_data, default_user='default')
         self.assertEqual(sorted(found), sorted(expected))
 
+    @mock.patch("cloudinit.url_helper.readurl")
+    def test_publish_host_keys(self, m_readurl):
+        hostkeys = [('ssh-rsa', 'asdfasdf'),
+                    ('ssh-ed25519', 'qwerqwer')]
+        readurl_expected_calls = [
+            mock.call(check_status=False, data=b'asdfasdf', headers=HEADERS,
+                      request_method='PUT',
+                      url='%s%s' % (GUEST_ATTRIBUTES_URL, 'ssh-rsa')),
+            mock.call(check_status=False, data=b'qwerqwer', headers=HEADERS,
+                      request_method='PUT',
+                      url='%s%s' % (GUEST_ATTRIBUTES_URL, 'ssh-ed25519')),
+        ]
+        self.ds.publish_host_keys(hostkeys)
+        m_readurl.assert_has_calls(readurl_expected_calls, any_order=True)
+
+
 # vi: ts=4 expandtab
diff --git a/tools/ds-identify b/tools/ds-identify
index 0305e36..e0d4865 100755
--- a/tools/ds-identify
+++ b/tools/ds-identify
@@ -124,7 +124,7 @@ DI_DSNAME=""
 # be searched if there is no setting found in config.
 DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \
 CloudSigma CloudStack DigitalOcean AliYun Ec2 GCE OpenNebula OpenStack \
-OVF SmartOS Scaleway Hetzner IBMCloud Oracle"
+OVF SmartOS Scaleway Hetzner IBMCloud Oracle Exoscale"
 DI_DSLIST=""
 DI_MODE=""
 DI_ON_FOUND=""
@@ -553,6 +553,11 @@ dscheck_CloudStack() {
     return $DS_NOT_FOUND
 }
 
+dscheck_Exoscale() {
+    dmi_product_name_matches "Exoscale*" && return $DS_FOUND
+    return $DS_NOT_FOUND
+}
+
 dscheck_CloudSigma() {
     # http://paste.ubuntu.com/23624795/
     dmi_product_name_matches "CloudSigma" && return $DS_FOUND