← Back to team overview

cloud-init-dev team mailing list archive

[Merge] ~chad.smith/cloud-init:ubuntu/devel into cloud-init:ubuntu/devel

 

Chad Smith has proposed merging ~chad.smith/cloud-init:ubuntu/devel into cloud-init:ubuntu/devel.

Commit message:
Upstream snapshot of cloud-init tip for release into Cosmic.

Contains:
 - full package version in logs
 - util: add get_linux_distro function to replace platform.dist
   [Robert Schweikert] (LP: #1745235)
 - pyflakes: fix unused variable references identified by pyflakes 2.0.0.
 - Do not use the systemd_prefix macro, not available in this environment
   [Robert Schweikert]
 - doc: Add config info to ec2, openstack and cloudstack datasource docs
 - Enable SmartOS network metadata to work with netplan via per-subnet
   routes [Dan McDonald] (LP: #1763512)


Requested reviews:
  cloud-init commiters (cloud-init-dev)
Related bugs:
  Bug #1745235 in cloud-init: "distribution detection"
  https://bugs.launchpad.net/cloud-init/+bug/1745235
  Bug #1763512 in cloud-init: "DataSourceSmartOS ignores sdc:routes"
  https://bugs.launchpad.net/cloud-init/+bug/1763512

For more details, see:
https://code.launchpad.net/~chad.smith/cloud-init/+git/cloud-init/+merge/347396
-- 
Your team cloud-init commiters is requested to review the proposed merge of ~chad.smith/cloud-init:ubuntu/devel into cloud-init:ubuntu/devel.
diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py
index eca6ea3..339baba 100644
--- a/cloudinit/config/cc_mounts.py
+++ b/cloudinit/config/cc_mounts.py
@@ -233,7 +233,7 @@ def setup_swapfile(fname, size=None, maxsize=None):
     if str(size).lower() == "auto":
         try:
             memsize = util.read_meminfo()['total']
-        except IOError as e:
+        except IOError:
             LOG.debug("Not creating swap: failed to read meminfo")
             return
 
diff --git a/cloudinit/handlers/upstart_job.py b/cloudinit/handlers/upstart_job.py
index 1ca92d4..dc33876 100644
--- a/cloudinit/handlers/upstart_job.py
+++ b/cloudinit/handlers/upstart_job.py
@@ -97,7 +97,7 @@ def _has_suitable_upstart():
             else:
                 util.logexc(LOG, "dpkg --compare-versions failed [%s]",
                             e.exit_code)
-        except Exception as e:
+        except Exception:
             util.logexc(LOG, "dpkg --compare-versions failed")
         return False
     else:
diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py
index f6e86f3..24fd65f 100644
--- a/cloudinit/sources/DataSourceAltCloud.py
+++ b/cloudinit/sources/DataSourceAltCloud.py
@@ -184,11 +184,11 @@ class DataSourceAltCloud(sources.DataSource):
             cmd = CMD_PROBE_FLOPPY
             (cmd_out, _err) = util.subp(cmd)
             LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out)
-        except ProcessExecutionError as _err:
-            util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err)
+        except ProcessExecutionError as e:
+            util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e)
             return False
-        except OSError as _err:
-            util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err)
+        except OSError as e:
+            util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e)
             return False
 
         floppy_dev = '/dev/fd0'
@@ -197,11 +197,11 @@ class DataSourceAltCloud(sources.DataSource):
         try:
             (cmd_out, _err) = util.udevadm_settle(exists=floppy_dev, timeout=5)
             LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out)
-        except ProcessExecutionError as _err:
-            util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err)
+        except ProcessExecutionError as e:
+            util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e)
             return False
-        except OSError as _err:
-            util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err)
+        except OSError as e:
+            util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e)
             return False
 
         try:
diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py
index 5d3a8dd..2daea59 100644
--- a/cloudinit/sources/DataSourceNoCloud.py
+++ b/cloudinit/sources/DataSourceNoCloud.py
@@ -78,7 +78,7 @@ class DataSourceNoCloud(sources.DataSource):
                 LOG.debug("Using seeded data from %s", path)
                 mydata = _merge_new_seed(mydata, seeded)
                 break
-            except ValueError as e:
+            except ValueError:
                 pass
 
         # If the datasource config had a 'seedfrom' entry, then that takes
@@ -117,7 +117,7 @@ class DataSourceNoCloud(sources.DataSource):
                     try:
                         seeded = util.mount_cb(dev, _pp2d_callback,
                                                pp2d_kwargs)
-                    except ValueError as e:
+                    except ValueError:
                         if dev in label_list:
                             LOG.warning("device %s with label=%s not a"
                                         "valid seed.", dev, label)
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
index d4a4111..16c1078 100644
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -378,7 +378,7 @@ def read_context_disk_dir(source_dir, asuser=None):
         if asuser is not None:
             try:
                 pwd.getpwnam(asuser)
-            except KeyError as e:
+            except KeyError:
                 raise BrokenContextDiskDir(
                     "configured user '{user}' does not exist".format(
                         user=asuser))
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index c91e4d5..f92e8b5 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -17,7 +17,7 @@
 #        of a serial console.
 #
 #   Certain behavior is defined by the DataDictionary
-#       http://us-east.manta.joyent.com/jmc/public/mdata/datadict.html
+#       https://eng.joyent.com/mdata/datadict.html
 #       Comments with "@datadictionary" are snippets of the definition
 
 import base64
@@ -298,6 +298,7 @@ class DataSourceSmartOS(sources.DataSource):
         self.userdata_raw = ud
         self.vendordata_raw = md['vendor-data']
         self.network_data = md['network-data']
+        self.routes_data = md['routes']
 
         self._set_provisioned()
         return True
@@ -321,7 +322,8 @@ class DataSourceSmartOS(sources.DataSource):
                     convert_smartos_network_data(
                         network_data=self.network_data,
                         dns_servers=self.metadata['dns_servers'],
-                        dns_domain=self.metadata['dns_domain']))
+                        dns_domain=self.metadata['dns_domain'],
+                        routes=self.routes_data))
         return self._network_config
 
 
@@ -760,7 +762,8 @@ def get_smartos_environ(uname_version=None, product_name=None):
 
 # Convert SMARTOS 'sdc:nics' data to network_config yaml
 def convert_smartos_network_data(network_data=None,
-                                 dns_servers=None, dns_domain=None):
+                                 dns_servers=None, dns_domain=None,
+                                 routes=None):
     """Return a dictionary of network_config by parsing provided
        SMARTOS sdc:nics configuration data
 
@@ -778,6 +781,10 @@ def convert_smartos_network_data(network_data=None,
     keys are related to ip configuration.  For each ip in the 'ips' list
     we create a subnet entry under 'subnets' pairing the ip to a one in
     the 'gateways' list.
+
+    Each route in sdc:routes is mapped to a route on each interface.
+    The sdc:routes properties 'dst' and 'gateway' map to 'network' and
+    'gateway'.  The 'linklocal' sdc:routes property is ignored.
     """
 
     valid_keys = {
@@ -800,6 +807,10 @@ def convert_smartos_network_data(network_data=None,
             'scope',
             'type',
         ],
+        'route': [
+            'network',
+            'gateway',
+        ],
     }
 
     if dns_servers:
@@ -814,6 +825,9 @@ def convert_smartos_network_data(network_data=None,
     else:
         dns_domain = []
 
+    if not routes:
+        routes = []
+
     def is_valid_ipv4(addr):
         return '.' in addr
 
@@ -840,6 +854,7 @@ def convert_smartos_network_data(network_data=None,
             if ip == "dhcp":
                 subnet = {'type': 'dhcp4'}
             else:
+                routeents = []
                 subnet = dict((k, v) for k, v in nic.items()
                               if k in valid_keys['subnet'])
                 subnet.update({
@@ -861,6 +876,25 @@ def convert_smartos_network_data(network_data=None,
                             pgws[proto]['gw'] = gateways[0]
                             subnet.update({'gateway': pgws[proto]['gw']})
 
+                for route in routes:
+                    rcfg = dict((k, v) for k, v in route.items()
+                                if k in valid_keys['route'])
+                    # Linux uses the value of 'gateway' to determine
+                    # automatically if the route is a forward/next-hop
+                    # (non-local IP for gateway) or an interface/resolver
+                    # (local IP for gateway).  So we can ignore the
+                    # 'interface' attribute of sdc:routes, because SDC
+                    # guarantees that the gateway is a local IP for
+                    # "interface=true".
+                    #
+                    # Eventually we should be smart and compare "gateway"
+                    # to see if it's in the prefix.  We can then smartly
+                    # add or not-add this route.  But for now,
+                    # when in doubt, use brute force! Routes for everyone!
+                    rcfg.update({'network': route['dst']})
+                    routeents.append(rcfg)
+                    subnet.update({'routes': routeents})
+
             subnets.append(subnet)
         cfg.update({'subnets': subnets})
         config.append(cfg)
@@ -904,12 +938,14 @@ if __name__ == "__main__":
             keyname = SMARTOS_ATTRIB_JSON[key]
             data[key] = client.get_json(keyname)
         elif key == "network_config":
-            for depkey in ('network-data', 'dns_servers', 'dns_domain'):
+            for depkey in ('network-data', 'dns_servers', 'dns_domain',
+                           'routes'):
                 load_key(client, depkey, data)
             data[key] = convert_smartos_network_data(
                 network_data=data['network-data'],
                 dns_servers=data['dns_servers'],
-                dns_domain=data['dns_domain'])
+                dns_domain=data['dns_domain'],
+                routes=data['routes'])
         else:
             if key in SMARTOS_ATTRIB_MAP:
                 keyname, strip = SMARTOS_ATTRIB_MAP[key]
diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py
index 3c05a43..17853fc 100644
--- a/cloudinit/tests/test_util.py
+++ b/cloudinit/tests/test_util.py
@@ -3,11 +3,12 @@
 """Tests for cloudinit.util"""
 
 import logging
-from textwrap import dedent
+import platform
 
 import cloudinit.util as util
 
 from cloudinit.tests.helpers import CiTestCase, mock
+from textwrap import dedent
 
 LOG = logging.getLogger(__name__)
 
@@ -16,6 +17,29 @@ MOUNT_INFO = [
     '153 68 254:0 / /home rw,relatime shared:101 - xfs /dev/sda2 rw,attr2'
 ]
 
+OS_RELEASE_SLES = dedent("""\
+    NAME="SLES"\n
+    VERSION="12-SP3"\n
+    VERSION_ID="12.3"\n
+    PRETTY_NAME="SUSE Linux Enterprise Server 12 SP3"\n
+    ID="sles"\nANSI_COLOR="0;32"\n
+    CPE_NAME="cpe:/o:suse:sles:12:sp3"\n
+""")
+
+OS_RELEASE_UBUNTU = dedent("""\
+    NAME="Ubuntu"\n
+    VERSION="16.04.3 LTS (Xenial Xerus)"\n
+    ID=ubuntu\n
+    ID_LIKE=debian\n
+    PRETTY_NAME="Ubuntu 16.04.3 LTS"\n
+    VERSION_ID="16.04"\n
+    HOME_URL="http://www.ubuntu.com/"\n
+    SUPPORT_URL="http://help.ubuntu.com/"\n
+    BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"\n
+    VERSION_CODENAME=xenial\n
+    UBUNTU_CODENAME=xenial\n
+""")
+
 
 class FakeCloud(object):
 
@@ -261,4 +285,56 @@ class TestUdevadmSettle(CiTestCase):
         self.assertRaises(util.ProcessExecutionError, util.udevadm_settle)
 
 
+@mock.patch('os.path.exists')
+class TestGetLinuxDistro(CiTestCase):
+
+    @classmethod
+    def os_release_exists(self, path):
+        """Side effect function"""
+        if path == '/etc/os-release':
+            return 1
+
+    @mock.patch('cloudinit.util.load_file')
+    def test_get_linux_distro_quoted_name(self, m_os_release, m_path_exists):
+        """Verify we get the correct name if the os-release file has
+        the distro name in quotes"""
+        m_os_release.return_value = OS_RELEASE_SLES
+        m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+        dist = util.get_linux_distro()
+        self.assertEqual(('sles', '12.3', platform.machine()), dist)
+
+    @mock.patch('cloudinit.util.load_file')
+    def test_get_linux_distro_bare_name(self, m_os_release, m_path_exists):
+        """Verify we get the correct name if the os-release file does not
+        have the distro name in quotes"""
+        m_os_release.return_value = OS_RELEASE_UBUNTU
+        m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+        dist = util.get_linux_distro()
+        self.assertEqual(('ubuntu', '16.04', platform.machine()), dist)
+
+    @mock.patch('platform.dist')
+    def test_get_linux_distro_no_data(self, m_platform_dist, m_path_exists):
+        """Verify we get no information if os-release does not exist"""
+        m_platform_dist.return_value = ('', '', '')
+        m_path_exists.return_value = 0
+        dist = util.get_linux_distro()
+        self.assertEqual(('', '', ''), dist)
+
+    @mock.patch('platform.dist')
+    def test_get_linux_distro_no_impl(self, m_platform_dist, m_path_exists):
+        """Verify we get an empty tuple when no information exists and
+        Exceptions are not propagated"""
+        m_platform_dist.side_effect = Exception()
+        m_path_exists.return_value = 0
+        dist = util.get_linux_distro()
+        self.assertEqual(('', '', ''), dist)
+
+    @mock.patch('platform.dist')
+    def test_get_linux_distro_plat_data(self, m_platform_dist, m_path_exists):
+        """Verify we get the correct platform information"""
+        m_platform_dist.return_value = ('foo', '1.1', 'aarch64')
+        m_path_exists.return_value = 0
+        dist = util.get_linux_distro()
+        self.assertEqual(('foo', '1.1', 'aarch64'), dist)
+
 # vi: ts=4 expandtab
diff --git a/cloudinit/util.py b/cloudinit/util.py
index c0473b8..d9b61cf 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -576,6 +576,39 @@ def get_cfg_option_int(yobj, key, default=0):
     return int(get_cfg_option_str(yobj, key, default=default))
 
 
+def get_linux_distro():
+    distro_name = ''
+    distro_version = ''
+    if os.path.exists('/etc/os-release'):
+        os_release = load_file('/etc/os-release')
+        for line in os_release.splitlines():
+            if line.strip().startswith('ID='):
+                distro_name = line.split('=')[-1]
+                distro_name = distro_name.replace('"', '')
+            if line.strip().startswith('VERSION_ID='):
+                # Lets hope for the best that distros stay consistent ;)
+                distro_version = line.split('=')[-1]
+                distro_version = distro_version.replace('"', '')
+    else:
+        dist = ('', '', '')
+        try:
+            # Will be removed in 3.7
+            dist = platform.dist()  # pylint: disable=W1505
+        except Exception:
+            pass
+        finally:
+            found = None
+            for entry in dist:
+                if entry:
+                    found = 1
+            if not found:
+                LOG.warning('Unable to determine distribution, template '
+                            'expansion may have unexpected results')
+        return dist
+
+    return (distro_name, distro_version, platform.machine())
+
+
 def system_info():
     info = {
         'platform': platform.platform(),
@@ -583,19 +616,19 @@ def system_info():
         'release': platform.release(),
         'python': platform.python_version(),
         'uname': platform.uname(),
-        'dist': platform.dist(),  # pylint: disable=W1505
+        'dist': get_linux_distro()
     }
     system = info['system'].lower()
     var = 'unknown'
     if system == "linux":
         linux_dist = info['dist'][0].lower()
-        if linux_dist in ('centos', 'fedora', 'debian'):
+        if linux_dist in ('centos', 'debian', 'fedora', 'rhel', 'suse'):
             var = linux_dist
         elif linux_dist in ('ubuntu', 'linuxmint', 'mint'):
             var = 'ubuntu'
         elif linux_dist == 'redhat':
             var = 'rhel'
-        elif linux_dist == 'suse':
+        elif linux_dist in ('opensuse', 'sles'):
             var = 'suse'
         else:
             var = 'linux'
@@ -2531,8 +2564,8 @@ def _call_dmidecode(key, dmidecode_path):
         if result.replace(".", "") == "":
             return ""
         return result
-    except (IOError, OSError) as _err:
-        LOG.debug('failed dmidecode cmd: %s\n%s', cmd, _err)
+    except (IOError, OSError) as e:
+        LOG.debug('failed dmidecode cmd: %s\n%s', cmd, e)
         return None
 
 
diff --git a/debian/changelog b/debian/changelog
index a78aae3..b529b78 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,9 +1,18 @@
-cloud-init (18.2-59-gcd1de5f4-0ubuntu2) UNRELEASED; urgency=medium
+cloud-init (18.2-64-gbbcc5e82-0ubuntu1) cosmic; urgency=medium
 
   * debian/rules: update version.version_string to contain packaged version.
     (LP: #1770712)
+  * New upstream snapshot.
+    - util: add get_linux_distro function to replace platform.dist
+      [Robert Schweikert] (LP: #1745235)
+    - pyflakes: fix unused variable references identified by pyflakes 2.0.0.
+    - - Do not use the systemd_prefix macro, not available in this environment
+      [Robert Schweikert]
+    - doc: Add config info to ec2, openstack and cloudstack datasource docs
+    - Enable SmartOS network metadata to work with netplan via per-subnet
+      routes [Dan McDonald] (LP: #1763512)
 
- -- Scott Moser <smoser@xxxxxxxxxx>  Mon, 04 Jun 2018 10:00:57 -0400
+ -- Chad Smith <chad.smith@xxxxxxxxxxxxx>  Mon, 04 Jun 2018 12:18:16 -0600
 
 cloud-init (18.2-59-gcd1de5f4-0ubuntu1) cosmic; urgency=medium
 
diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst
index 38ba75d..30e57d8 100644
--- a/doc/rtd/topics/datasources.rst
+++ b/doc/rtd/topics/datasources.rst
@@ -17,6 +17,103 @@ own way) internally a datasource abstract class was created to allow for a
 single way to access the different cloud systems methods to provide this data
 through the typical usage of subclasses.
 
+
+instance-data
+-------------
+For reference, cloud-init stores all the metadata, vendordata and userdata
+provided by a cloud in a json blob at ``/run/cloud-init/instance-data.json``.
+While the json contains datasource-specific keys and names, cloud-init will
+maintain a minimal set of standardized keys that will remain stable on any
+cloud. Standardized instance-data keys will be present under a "v1" key.
+Any datasource metadata cloud-init consumes will all be present under the
+"ds" key.
+
+Below is an instance-data.json example from an OpenStack instance:
+
+.. sourcecode:: json
+
+  {
+   "base64-encoded-keys": [
+    "ds/meta-data/random_seed",
+    "ds/user-data"
+   ],
+   "ds": {
+    "ec2_metadata": {
+     "ami-id": "ami-0000032f",
+     "ami-launch-index": "0",
+     "ami-manifest-path": "FIXME",
+     "block-device-mapping": {
+      "ami": "vda",
+      "ephemeral0": "/dev/vdb",
+      "root": "/dev/vda"
+     },
+     "hostname": "xenial-test.novalocal",
+     "instance-action": "none",
+     "instance-id": "i-0006e030",
+     "instance-type": "m1.small",
+     "local-hostname": "xenial-test.novalocal",
+     "local-ipv4": "10.5.0.6",
+     "placement": {
+      "availability-zone": "None"
+     },
+     "public-hostname": "xenial-test.novalocal",
+     "public-ipv4": "10.245.162.145",
+     "reservation-id": "r-fxm623oa",
+     "security-groups": "default"
+    },
+    "meta-data": {
+     "availability_zone": null,
+     "devices": [],
+     "hostname": "xenial-test.novalocal",
+     "instance-id": "3e39d278-0644-4728-9479-678f9212d8f0",
+     "launch_index": 0,
+     "local-hostname": "xenial-test.novalocal",
+     "name": "xenial-test",
+     "project_id": "e0eb2d2538814...",
+     "random_seed": "A6yPN...",
+     "uuid": "3e39d278-0644-4728-9479-678f92..."
+    },
+    "network_json": {
+     "links": [
+      {
+       "ethernet_mac_address": "fa:16:3e:7d:74:9b",
+       "id": "tap9ca524d5-6e",
+       "mtu": 8958,
+       "type": "ovs",
+       "vif_id": "9ca524d5-6e5a-4809-936a-6901..."
+      }
+     ],
+     "networks": [
+      {
+       "id": "network0",
+       "link": "tap9ca524d5-6e",
+       "network_id": "c6adfc18-9753-42eb-b3ea-18b57e6b837f",
+       "type": "ipv4_dhcp"
+      }
+     ],
+     "services": [
+      {
+       "address": "10.10.160.2",
+       "type": "dns"
+      }
+     ]
+    },
+    "user-data": "I2Nsb3VkLWNvbmZpZ...",
+    "vendor-data": null
+   },
+   "v1": {
+    "availability-zone": null,
+    "cloud-name": "openstack",
+    "instance-id": "3e39d278-0644-4728-9479-678f9212d8f0",
+    "local-hostname": "xenial-test",
+    "region": null
+   }
+  }
+
+
+
+Datasource API
+--------------
 The current interface that a datasource object must provide is the following:
 
 .. sourcecode:: python
diff --git a/doc/rtd/topics/datasources/cloudstack.rst b/doc/rtd/topics/datasources/cloudstack.rst
index 225093a..a3101ed 100644
--- a/doc/rtd/topics/datasources/cloudstack.rst
+++ b/doc/rtd/topics/datasources/cloudstack.rst
@@ -4,7 +4,9 @@ CloudStack
 ==========
 
 `Apache CloudStack`_ expose user-data, meta-data, user password and account
-sshkey thru the Virtual-Router. For more details on meta-data and user-data,
+sshkey thru the Virtual-Router. The datasource obtains the VR address via
+dhcp lease information given to the instance.
+For more details on meta-data and user-data,
 refer the `CloudStack Administrator Guide`_. 
 
 URLs to access user-data and meta-data from the Virtual Machine. Here 10.1.1.1
@@ -18,14 +20,26 @@ is the Virtual Router IP:
 
 Configuration
 -------------
+The following configuration can be set for the datasource in system
+configuration (in `/etc/cloud/cloud.cfg` or `/etc/cloud/cloud.cfg.d/`).
 
-Apache CloudStack datasource can be configured as follows:
+The settings that may be configured are:
 
-.. code:: yaml
+ * **max_wait**:  the maximum amount of clock time in seconds that should be
+   spent searching metadata_urls.  A value less than zero will result in only
+   one request being made, to the first in the list. (default: 120)
+ * **timeout**: the timeout value provided to urlopen for each individual http
+   request.  This is used both when selecting a metadata_url and when crawling
+   the metadata service. (default: 50)
 
-    datasource:
-      CloudStack: {}
-      None: {}
+An example configuration with the default values is provided below:
+
+.. sourcecode:: yaml
+
+  datasource:
+   CloudStack:
+    max_wait: 120
+    timeout: 50
     datasource_list:
       - CloudStack
 
diff --git a/doc/rtd/topics/datasources/ec2.rst b/doc/rtd/topics/datasources/ec2.rst
index 3bc66e1..64c325d 100644
--- a/doc/rtd/topics/datasources/ec2.rst
+++ b/doc/rtd/topics/datasources/ec2.rst
@@ -60,4 +60,34 @@ To see which versions are supported from your cloud provider use the following U
     ...
     latest
 
+
+
+Configuration
+-------------
+The following configuration can be set for the datasource in system
+configuration (in `/etc/cloud/cloud.cfg` or `/etc/cloud/cloud.cfg.d/`).
+
+The settings that may be configured are:
+
+ * **metadata_urls**: This list of urls will be searched for an Ec2
+   metadata service. The first entry that successfully returns a 200 response
+   for <url>/<version>/meta-data/instance-id will be selected.
+   (default: ['http://169.254.169.254', 'http://instance-data:8773']).
+ * **max_wait**:  the maximum amount of clock time in seconds that should be
+   spent searching metadata_urls.  A value less than zero will result in only
+   one request being made, to the first in the list. (default: 120)
+ * **timeout**: the timeout value provided to urlopen for each individual http
+   request.  This is used both when selecting a metadata_url and when crawling
+   the metadata service. (default: 50)
+
+An example configuration with the default values is provided below:
+
+.. sourcecode:: yaml
+
+  datasource:
+   Ec2:
+    metadata_urls: ["http://169.254.169.254:80";, "http://instance-data:8773";]
+    max_wait: 120
+    timeout: 50
+
 .. vi: textwidth=78
diff --git a/doc/rtd/topics/datasources/openstack.rst b/doc/rtd/topics/datasources/openstack.rst
index 43592de..0ea8994 100644
--- a/doc/rtd/topics/datasources/openstack.rst
+++ b/doc/rtd/topics/datasources/openstack.rst
@@ -25,18 +25,22 @@ The settings that may be configured are:
    the metadata service. (default: 10)
  * **retries**: The number of retries that should be done for an http request.
    This value is used only after metadata_url is selected. (default: 5)
+ * **apply_network_config**: A boolean specifying whether to configure the
+   network for the instance based on network_data.json provided by the
+   metadata service. When False, only configure dhcp on the primary nic for
+   this instances. (default: True)
 
-An example configuration with the default values is provided as example below:
+An example configuration with the default values is provided below:
 
 .. sourcecode:: yaml
 
-  #cloud-config
   datasource:
    OpenStack:
     metadata_urls: ["http://169.254.169.254";]
     max_wait: -1
     timeout: 10
     retries: 5
+    apply_network_config: True
 
 
 Vendor Data
diff --git a/packages/suse/cloud-init.spec.in b/packages/suse/cloud-init.spec.in
index 366a78c..e781d74 100644
--- a/packages/suse/cloud-init.spec.in
+++ b/packages/suse/cloud-init.spec.in
@@ -5,7 +5,7 @@
 # Or: http://www.rpm.org/max-rpm/ch-rpm-inside.html
 
 Name:           cloud-init
-Version:        {{version}}
+Version:        {{rpm_upstream_version}}
 Release:        1{{subrelease}}%{?dist}
 Summary:        Cloud instance init scripts
 
@@ -16,22 +16,13 @@ URL:            http://launchpad.net/cloud-init
 Source0:        {{archive_name}}
 BuildRoot:      %{_tmppath}/%{name}-%{version}-build
 
-%if 0%{?suse_version} && 0%{?suse_version} <= 1110
-%{!?python_sitelib: %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")}
-%else
 BuildArch:      noarch
-%endif
+
 
 {% for r in buildrequires %}
 BuildRequires:        {{r}}
 {% endfor %}
 
-%if 0%{?suse_version} && 0%{?suse_version} <= 1210
-  %define initsys sysvinit
-%else
-  %define initsys systemd
-%endif
-
 # Install pypi 'dynamic' requirements
 {% for r in requires %}
 Requires:       {{r}}
@@ -39,7 +30,7 @@ Requires:       {{r}}
 
 # Custom patches
 {% for p in patches %}
-Patch{{loop.index0}: {{p}}
+Patch{{loop.index0}}: {{p}}
 {% endfor %}
 
 %description
@@ -63,35 +54,21 @@ end for
 %{__python} setup.py install \
             --skip-build --root=%{buildroot} --prefix=%{_prefix} \
             --record-rpm=INSTALLED_FILES --install-lib=%{python_sitelib} \
-            --init-system=%{initsys}
+            --init-system=systemd
+
+# Move udev rules
+mkdir -p %{buildroot}/usr/lib/udev/rules.d/
+mv %{buildroot}/lib/udev/rules.d/* %{buildroot}/usr/lib/udev/rules.d/
 
 # Remove non-SUSE templates
 rm %{buildroot}/%{_sysconfdir}/cloud/templates/*.debian.*
 rm %{buildroot}/%{_sysconfdir}/cloud/templates/*.redhat.*
 rm %{buildroot}/%{_sysconfdir}/cloud/templates/*.ubuntu.*
 
-# Remove cloud-init tests
-rm -r %{buildroot}/%{python_sitelib}/tests
-
-# Move sysvinit scripts to the correct place and create symbolic links
-%if %{initsys} == sysvinit
-   mkdir -p %{buildroot}/%{_initddir}
-   mv %{buildroot}%{_sysconfdir}/rc.d/init.d/* %{buildroot}%{_initddir}/
-   rmdir %{buildroot}%{_sysconfdir}/rc.d/init.d
-   rmdir %{buildroot}%{_sysconfdir}/rc.d
-
-   mkdir -p %{buildroot}/%{_sbindir}
-   pushd %{buildroot}/%{_initddir}
-   for file in * ; do
-      ln -s %{_initddir}/${file} %{buildroot}/%{_sbindir}/rc${file}
-   done
-   popd
-%endif
-
 # Move documentation
 mkdir -p %{buildroot}/%{_defaultdocdir}
 mv %{buildroot}/usr/share/doc/cloud-init %{buildroot}/%{_defaultdocdir}
-for doc in TODO LICENSE ChangeLog requirements.txt; do
+for doc in LICENSE ChangeLog requirements.txt; do
    cp ${doc} %{buildroot}/%{_defaultdocdir}/cloud-init
 done
 
@@ -114,24 +91,23 @@ version_pys=$(cd "%{buildroot}" && find . -name version.py -type f)
 
 %files
 
-# Sysvinit scripts
-%if %{initsys} == sysvinit
-   %attr(0755, root, root) %{_initddir}/cloud-config
-   %attr(0755, root, root) %{_initddir}/cloud-final
-   %attr(0755, root, root) %{_initddir}/cloud-init-local
-   %attr(0755, root, root) %{_initddir}/cloud-init
-
-   %{_sbindir}/rccloud-*
-%endif
-
 # Program binaries
 %{_bindir}/cloud-init*
 
+# systemd files
+/usr/lib/systemd/system-generators/*
+/usr/lib/systemd/system/*
+
 # There doesn't seem to be an agreed upon place for these
 # although it appears the standard says /usr/lib but rpmbuild
 # will try /usr/lib64 ??
 /usr/lib/%{name}/uncloud-init
 /usr/lib/%{name}/write-ssh-key-fingerprints
+/usr/lib/%{name}/ds-identify
+
+# udev rules
+/usr/lib/udev/rules.d/66-azure-ephemeral.rules
+
 
 # Docs
 %doc %{_defaultdocdir}/cloud-init/*
@@ -145,6 +121,9 @@ version_pys=$(cd "%{buildroot}" && find . -name version.py -type f)
 %config(noreplace) %{_sysconfdir}/cloud/templates/*
 %{_sysconfdir}/bash_completion.d/cloud-init
 
+%{_sysconfdir}/dhcp/dhclient-exit-hooks.d/hook-dhclient
+%{_sysconfdir}/NetworkManager/dispatcher.d/hook-network-manager
+
 # Python code is here...
 %{python_sitelib}/*
 
diff --git a/setup.py b/setup.py
index 85b2337..5ed8eae 100755
--- a/setup.py
+++ b/setup.py
@@ -25,7 +25,7 @@ from distutils.errors import DistutilsArgError
 import subprocess
 
 RENDERED_TMPD_PREFIX = "RENDERED_TEMPD"
-
+VARIANT = None
 
 def is_f(p):
     return os.path.isfile(p)
@@ -114,10 +114,20 @@ def render_tmpl(template):
     atexit.register(shutil.rmtree, tmpd)
     bname = os.path.basename(template).rstrip(tmpl_ext)
     fpath = os.path.join(tmpd, bname)
-    tiny_p([sys.executable, './tools/render-cloudcfg', template, fpath])
+    if VARIANT:
+        tiny_p([sys.executable, './tools/render-cloudcfg', '--variant',
+            VARIANT, template, fpath])
+    else:
+        tiny_p([sys.executable, './tools/render-cloudcfg', template, fpath])
     # return path relative to setup.py
     return os.path.join(os.path.basename(tmpd), bname)
 
+# User can set the variant for template rendering
+if '--distro' in sys.argv:
+    idx = sys.argv.index('--distro')
+    VARIANT = sys.argv[idx+1]
+    del sys.argv[idx+1]
+    sys.argv.remove('--distro')
 
 INITSYS_FILES = {
     'sysvinit': [f for f in glob('sysvinit/redhat/*') if is_f(f)],
@@ -260,7 +270,7 @@ requirements = read_requires()
 setuptools.setup(
     name='cloud-init',
     version=get_version(),
-    description='EC2 initialisation magic',
+    description='Cloud instance initialisation magic',
     author='Scott Moser',
     author_email='scott.moser@xxxxxxxxxxxxx',
     url='http://launchpad.net/cloud-init/',
@@ -277,4 +287,5 @@ setuptools.setup(
     }
 )
 
+
 # vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py
index 706e8eb..dca0b3d 100644
--- a/tests/unittests/test_datasource/test_smartos.py
+++ b/tests/unittests/test_datasource/test_smartos.py
@@ -1027,6 +1027,32 @@ class TestNetworkConversion(TestCase):
         found = convert_net(SDC_NICS_SINGLE_GATEWAY)
         self.assertEqual(expected, found)
 
+    def test_routes_on_all_nics(self):
+        routes = [
+            {'linklocal': False, 'dst': '3.0.0.0/8', 'gateway': '8.12.42.3'},
+            {'linklocal': False, 'dst': '4.0.0.0/8', 'gateway': '10.210.1.4'}]
+        expected = {
+            'version': 1,
+            'config': [
+                {'mac_address': '90:b8:d0:d8:82:b4', 'mtu': 1500,
+                 'name': 'net0', 'type': 'physical',
+                 'subnets': [{'address': '8.12.42.26/24',
+                              'gateway': '8.12.42.1', 'type': 'static',
+                              'routes': [{'network': '3.0.0.0/8',
+                                          'gateway': '8.12.42.3'},
+                                         {'network': '4.0.0.0/8',
+                                         'gateway': '10.210.1.4'}]}]},
+                {'mac_address': '90:b8:d0:0a:51:31', 'mtu': 1500,
+                 'name': 'net1', 'type': 'physical',
+                 'subnets': [{'address': '10.210.1.27/24', 'type': 'static',
+                              'routes': [{'network': '3.0.0.0/8',
+                                          'gateway': '8.12.42.3'},
+                                         {'network': '4.0.0.0/8',
+                                         'gateway': '10.210.1.4'}]}]}]}
+        found = convert_net(SDC_NICS_SINGLE_GATEWAY, routes=routes)
+        self.maxDiff = None
+        self.assertEqual(expected, found)
+
 
 @unittest2.skipUnless(get_smartos_environ() == SMARTOS_ENV_KVM,
                       "Only supported on KVM and bhyve guests under SmartOS")

Follow ups