← Back to team overview

cloud-init-dev team mailing list archive

[Merge] lp:~harlowja/cloud-init/ds-openstack into lp:cloud-init

 

Joshua Harlow has proposed merging lp:~harlowja/cloud-init/ds-openstack into lp:cloud-init.

Requested reviews:
  cloud init development team (cloud-init-dev)

For more details, see:
https://code.launchpad.net/~harlowja/cloud-init/ds-openstack/+merge/204390

Add a openstack specific datasource

Openstack has a unique derivative datasource
that is gaining usage. Previously the config
drive datasource provided part of this functionality
as well as the ec2 datasource, but since new
functionality is being added to openstack is
seems benefical to combine the used parts into
one datasource just made for handling openstack
deployments.

This patch factors out the common logic shared
between the config drive and the openstack
metadata datasource and places that in a shared
helper file and then creates a new openstack
datasource that readers from the openstack metadata
service and refactors the config drive datasource
to use this common logic.
                              
-- 
https://code.launchpad.net/~harlowja/cloud-init/ds-openstack/+merge/204390
Your team cloud init development team is requested to review the proposed merge of lp:~harlowja/cloud-init/ds-openstack into lp:cloud-init.
=== modified file 'cloudinit/ec2_utils.py'
--- cloudinit/ec2_utils.py	2014-01-23 22:48:32 +0000
+++ cloudinit/ec2_utils.py	2014-02-02 06:50:37 +0000
@@ -21,7 +21,6 @@
 
 import functools
 import json
-import urllib
 
 from cloudinit import log as logging
 from cloudinit import url_helper
@@ -40,16 +39,6 @@
     return False
 
 
-def combine_url(base, add_on):
-    base_parsed = list(urlparse(base))
-    path = base_parsed[2]
-    if path and not path.endswith("/"):
-        path += "/"
-    path += urllib.quote(str(add_on), safe="/:")
-    base_parsed[2] = path
-    return urlunparse(base_parsed)
-
-
 # See: http://bit.ly/TyoUQs
 #
 class MetadataMaterializer(object):
@@ -121,14 +110,14 @@
         (leaves, children) = self._parse(blob)
         child_contents = {}
         for c in children:
-            child_url = combine_url(base_url, c)
+            child_url = url_helper.combine_url(base_url, c)
             if not child_url.endswith("/"):
                 child_url += "/"
             child_blob = str(self._caller(child_url))
             child_contents[c] = self._materialize(child_blob, child_url)
         leaf_contents = {}
         for (field, resource) in leaves.items():
-            leaf_url = combine_url(base_url, resource)
+            leaf_url = url_helper.combine_url(base_url, resource)
             leaf_blob = str(self._caller(leaf_url))
             leaf_contents[field] = self._decode_leaf_blob(field, leaf_blob)
         joined = {}
@@ -153,9 +142,14 @@
 def get_instance_userdata(api_version='latest',
                           metadata_address='http://169.254.169.254',
                           ssl_details=None, timeout=5, retries=5):
+<<<<<<< TREE
     ud_url = combine_url(metadata_address, api_version)
     ud_url = combine_url(ud_url, 'user-data')
     user_data = ''
+=======
+    ud_url = url_helper.combine_url(metadata_address, api_version)
+    ud_url = url_helper.combine_url(ud_url, 'user-data')
+>>>>>>> MERGE-SOURCE
     try:
         # It is ok for userdata to not exist (thats why we are stopping if
         # NOT_FOUND occurs) and just in that case returning an empty string.
@@ -178,8 +172,8 @@
 def get_instance_metadata(api_version='latest',
                           metadata_address='http://169.254.169.254',
                           ssl_details=None, timeout=5, retries=5):
-    md_url = combine_url(metadata_address, api_version)
-    md_url = combine_url(md_url, 'meta-data')
+    md_url = url_helper.combine_url(metadata_address, api_version)
+    md_url = url_helper.combine_url(md_url, 'meta-data')
     caller = functools.partial(util.read_file_or_url,
                                ssl_details=ssl_details, timeout=timeout,
                                retries=retries)

=== modified file 'cloudinit/sources/DataSourceConfigDrive.py'
--- cloudinit/sources/DataSourceConfigDrive.py	2014-01-09 21:14:51 +0000
+++ cloudinit/sources/DataSourceConfigDrive.py	2014-02-02 06:50:37 +0000
@@ -18,181 +18,79 @@
 #    You should have received a copy of the GNU General Public License
 #    along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-import base64
-import json
 import os
 
 from cloudinit import log as logging
 from cloudinit import sources
 from cloudinit import util
 
+from cloudinit.sources.helpers import openstack
+
 LOG = logging.getLogger(__name__)
 
 # Various defaults/constants...
 DEFAULT_IID = "iid-dsconfigdrive"
 DEFAULT_MODE = 'pass'
-CFG_DRIVE_FILES_V1 = [
-    "etc/network/interfaces",
-    "root/.ssh/authorized_keys",
-    "meta.js",
-]
 DEFAULT_METADATA = {
     "instance-id": DEFAULT_IID,
 }
 VALID_DSMODES = ("local", "net", "pass", "disabled")
-
-
-class ConfigDriveHelper(object):
-    def __init__(self, distro):
-        self.distro = distro
-
-    def on_first_boot(self, data):
-        if not data:
-            data = {}
-        if 'network_config' in data:
-            LOG.debug("Updating network interfaces from config drive")
-            self.distro.apply_network(data['network_config'])
-        files = data.get('files')
-        if files:
-            LOG.debug("Writing %s injected files", len(files))
-            try:
-                write_files(files)
-            except IOError:
-                util.logexc(LOG, "Failed writing files")
-
-
-class DataSourceConfigDrive(sources.DataSource):
+FS_TYPES = ('vfat', 'iso9660')
+LABEL_TYPES = ('config-2',)
+OPTICAL_DEVICES = tuple(('/dev/sr%s' % i for i in range(0, 2)))
+
+
+class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
     def __init__(self, sys_cfg, distro, paths):
-        sources.DataSource.__init__(self, sys_cfg, distro, paths)
+        super(DataSourceConfigDrive, self).__init__(sys_cfg, distro, paths)
         self.source = None
         self.dsmode = 'local'
         self.seed_dir = os.path.join(paths.seed_dir, 'config_drive')
         self.version = None
         self.ec2_metadata = None
-        self.helper = ConfigDriveHelper(distro)
+        self.files = {}
 
     def __str__(self):
         root = sources.DataSource.__str__(self)
-        mstr = "%s [%s,ver=%s]" % (root,
-                                   self.dsmode,
-                                   self.version)
+        mstr = "%s [%s,ver=%s]" % (root, self.dsmode, self.version)
         mstr += "[source=%s]" % (self.source)
         return mstr
 
-    def _ec2_name_to_device(self, name):
-        if not self.ec2_metadata:
-            return None
-        bdm = self.ec2_metadata.get('block-device-mapping', {})
-        for (ent_name, device) in bdm.items():
-            if name == ent_name:
-                return device
-        return None
-
-    def _os_name_to_device(self, name):
-        device = None
-        try:
-            criteria = 'LABEL=%s' % (name)
-            if name in ['swap']:
-                criteria = 'TYPE=%s' % (name)
-            dev_entries = util.find_devs_with(criteria)
-            if dev_entries:
-                device = dev_entries[0]
-        except util.ProcessExecutionError:
-            pass
-        return device
-
-    def _validate_device_name(self, device):
-        if not device:
-            return None
-        if not device.startswith("/"):
-            device = "/dev/%s" % device
-        if os.path.exists(device):
-            return device
-        # Durn, try adjusting the mapping
-        remapped = self._remap_device(os.path.basename(device))
-        if remapped:
-            LOG.debug("Remapped device name %s => %s", device, remapped)
-            return remapped
-        return None
-
-    def device_name_to_device(self, name):
-        # Translate a 'name' to a 'physical' device
-        if not name:
-            return None
-        # Try the ec2 mapping first
-        names = [name]
-        if name == 'root':
-            names.insert(0, 'ami')
-        if name == 'ami':
-            names.append('root')
-        device = None
-        LOG.debug("Using ec2 metadata lookup to find device %s", names)
-        for n in names:
-            device = self._ec2_name_to_device(n)
-            device = self._validate_device_name(device)
-            if device:
-                break
-        # Try the openstack way second
-        if not device:
-            LOG.debug("Using os lookup to find device %s", names)
-            for n in names:
-                device = self._os_name_to_device(n)
-                device = self._validate_device_name(device)
-                if device:
-                    break
-        # Ok give up...
-        if not device:
-            return None
-        else:
-            LOG.debug("Using cfg drive lookup mapped to device %s", device)
-            return device
-
     def get_data(self):
         found = None
         md = {}
-
         results = {}
         if os.path.isdir(self.seed_dir):
             try:
-                results = read_config_drive_dir(self.seed_dir)
+                results = read_config_drive(self.seed_dir)
                 found = self.seed_dir
-            except NonConfigDriveDir:
+            except openstack.NonReadable:
                 util.logexc(LOG, "Failed reading config drive from %s",
                             self.seed_dir)
         if not found:
-            devlist = find_candidate_devs()
-            for dev in devlist:
+            for dev in find_candidate_devs():
                 try:
-                    results = util.mount_cb(dev, read_config_drive_dir)
+                    results = util.mount_cb(dev, read_config_drive)
                     found = dev
+                except openstack.NonReadable:
+                    pass
+                except util.MountFailedError:
+                    pass
+                except openstack.BrokenMetadata:
+                    util.logexc(LOG, "Broken config drive: %s", dev)
+                if found:
                     break
-                except (NonConfigDriveDir, util.MountFailedError):
-                    pass
-                except BrokenConfigDriveDir:
-                    util.logexc(LOG, "broken config drive: %s", dev)
-
         if not found:
             return False
 
-        md = results['metadata']
+        md = results.get('metadata', {})
         md = util.mergemanydict([md, DEFAULT_METADATA])
-
-        # Perform some metadata 'fixups'
-        #
-        # OpenStack uses the 'hostname' key
-        # while most of cloud-init uses the metadata
-        # 'local-hostname' key instead so if it doesn't
-        # exist we need to make sure its copied over.
-        for (tgt, src) in [('local-hostname', 'hostname')]:
-            if tgt not in md and src in md:
-                md[tgt] = md[src]
-
         user_dsmode = results.get('dsmode', None)
         if user_dsmode not in VALID_DSMODES + (None,):
-            LOG.warn("user specified invalid mode: %s" % user_dsmode)
+            LOG.warn("User specified invalid mode: %s" % user_dsmode)
             user_dsmode = None
 
-        dsmode = get_ds_mode(cfgdrv_ver=results['cfgdrive_ver'],
+        dsmode = get_ds_mode(cfgdrv_ver=results['version'],
                              ds_cfg=self.ds_cfg.get('dsmode'),
                              user=user_dsmode)
 
@@ -209,7 +107,7 @@
         prev_iid = get_previous_iid(self.paths)
         cur_iid = md['instance-id']
         if prev_iid != cur_iid and self.dsmode == "local":
-            self.helper.on_first_boot(results)
+            on_first_boot(results, distro=self.distro)
 
         # dsmode != self.dsmode here if:
         #  * dsmode = "pass",  pass means it should only copy files and then
@@ -225,16 +123,11 @@
         self.metadata = md
         self.ec2_metadata = results.get('ec2-metadata')
         self.userdata_raw = results.get('userdata')
-        self.version = results['cfgdrive_ver']
-
+        self.version = results['version']
+        self.files.update(results.get('files', {}))
+        self.vendordata_raw = results.get('vendordata')
         return True
 
-    def get_public_ssh_keys(self):
-        name = "public_keys"
-        if self.version == 1:
-            name = "public-keys"
-        return sources.normalize_pubkey_data(self.metadata.get(name))
-
 
 class DataSourceConfigDriveNet(DataSourceConfigDrive):
     def __init__(self, sys_cfg, distro, paths):
@@ -242,6 +135,7 @@
         self.dsmode = 'net'
 
 
+<<<<<<< TREE
 class NonConfigDriveDir(Exception):
     pass
 
@@ -458,6 +352,8 @@
     return results
 
 
+=======
+>>>>>>> MERGE-SOURCE
 def get_ds_mode(cfgdrv_ver, ds_cfg=None, user=None):
     """Determine what mode should be used.
     valid values are 'pass', 'disabled', 'local', 'net'
@@ -483,6 +379,21 @@
     return "net"
 
 
+def read_config_drive(source_dir, version="2012-08-10"):
+    reader = openstack.ConfigDriveReader(source_dir)
+    finders = [
+        (reader.read_v2, [], {'version': version}),
+        (reader.read_v1, [], {}),
+    ]
+    excps = []
+    for (functor, args, kwargs) in finders:
+        try:
+            return functor(*args, **kwargs)
+        except openstack.NonReadable as e:
+            excps.append(e)
+    raise excps[-1]
+
+
 def get_previous_iid(paths):
     # interestingly, for this purpose the "previous" instance-id is the current
     # instance-id.  cloud-init hasn't moved them over yet as this datasource
@@ -494,17 +405,76 @@
         return None
 
 
-def write_files(files):
-    for (name, content) in files.iteritems():
-        if name[0] != os.sep:
-            name = os.sep + name
-        util.write_file(name, content, mode=0660)
+def on_first_boot(data, distro=None):
+    """Performs any first-boot actions using data read from a config-drive."""
+    if not isinstance(data, dict):
+        raise TypeError("Config-drive data expected to be a dict; not %s"
+                        % (type(data)))
+    net_conf = data.get("network_config", '')
+    if net_conf and distro:
+        LOG.debug("Updating network interfaces from config drive")
+        distro.apply_network(net_conf)
+    files = data.get('files', {})
+    if files:
+        LOG.debug("Writing %s injected files", len(files))
+        for (filename, content) in files.iteritems():
+            if not filename.startswith(os.sep):
+                filename = os.sep + filename
+            try:
+                util.write_file(filename, content, mode=0660)
+            except IOError:
+                util.logexc(LOG, "Failed writing file: %s", filename)
+
+
+def find_candidate_devs(probe_optical=True):
+    """Return a list of devices that may contain the config drive.
+
+    The returned list is sorted by search order where the first item has
+    should be searched first (highest priority)
+
+    config drive v1:
+       Per documentation, this is "associated as the last available disk on the
+       instance", and should be VFAT.
+       Currently, we do not restrict search list to "last available disk"
+
+    config drive v2:
+       Disk should be:
+        * either vfat or iso9660 formated
+        * labeled with 'config-2'
+    """
+    # query optical drive to get it in blkid cache for 2.6 kernels
+    if probe_optical:
+        for device in OPTICAL_DEVICES:
+            try:
+                util.find_devs_with(path=device)
+            except util.ProcessExecutionError:
+                pass
+
+    by_fstype = []
+    for fs_type in FS_TYPES:
+        by_fstype.extend(util.find_devs_with("TYPE=%s" % (fs_type)))
+
+    by_label = []
+    for label in LABEL_TYPES:
+        by_label.extend(util.find_devs_with("LABEL=%s" % (label)))
+
+    # give preference to "last available disk" (vdb over vda)
+    # note, this is not a perfect rendition of that.
+    by_fstype.sort(reverse=True)
+    by_label.sort(reverse=True)
+
+    # combine list of items by putting by-label items first
+    # followed by fstype items, but with dupes removed
+    combined = (by_label + [d for d in by_fstype if d not in by_label])
+
+    # we are looking for block device (sda, not sda1), ignore partitions
+    return [d for d in combined if not util.is_partition(d)]
 
 
 # Used to match classes to dependencies
 datasources = [
-  (DataSourceConfigDrive, (sources.DEP_FILESYSTEM, )),
-  (DataSourceConfigDriveNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+    (DataSourceConfigDrive, (sources.DEP_FILESYSTEM, )),
+    (DataSourceConfigDriveNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
 ]
 
 

=== modified file 'cloudinit/url_helper.py'
--- cloudinit/url_helper.py	2014-01-25 03:31:28 +0000
+++ cloudinit/url_helper.py	2014-02-02 06:50:37 +0000
@@ -22,6 +22,7 @@
 
 import httplib
 import time
+import urllib
 
 import requests
 from requests import exceptions
@@ -61,6 +62,7 @@
     return urlunparse(parsed_url)
 
 
+<<<<<<< TREE
 # Made to have same accessors as UrlResponse so that the
 # read_file_or_url can return this or that object and the
 # 'user' of those objects will not need to know the difference.
@@ -86,6 +88,25 @@
         self.url = path
 
 
+=======
+def combine_url(base, *add_ons):
+
+    def combine_single(url, add_on):
+        url_parsed = list(urlparse(url))
+        path = url_parsed[2]
+        if path and not path.endswith("/"):
+            path += "/"
+        path += urllib.quote(str(add_on), safe="/:")
+        url_parsed[2] = path
+        return urlunparse(url_parsed)
+
+    url = base
+    for add_on in add_ons:
+        url = combine_single(url, add_on)
+    return url
+
+
+>>>>>>> MERGE-SOURCE
 class UrlResponse(object):
     def __init__(self, response):
         self._response = response
@@ -129,30 +150,56 @@
             self.headers = {}
 
 
+def _get_ssl_args(url, ssl_details):
+    ssl_args = {}
+    scheme = urlparse(url).scheme  # pylint: disable=E1101
+    if scheme == 'https' and ssl_details:
+        if not SSL_ENABLED:
+            LOG.warn("SSL is not enabled, cert. verification can not occur!")
+        else:
+            if 'ca_certs' in ssl_details and ssl_details['ca_certs']:
+                ssl_args['verify'] = ssl_details['ca_certs']
+            else:
+                ssl_args['verify'] = True
+            if 'cert_file' in ssl_details and 'key_file' in ssl_details:
+                ssl_args['cert'] = [ssl_details['cert_file'],
+                                    ssl_details['key_file']]
+            elif 'cert_file' in ssl_details:
+                ssl_args['cert'] = str(ssl_details['cert_file'])
+    return ssl_args
+
+
+def existsurl(url, ssl_details=None, timeout=None):
+    r = _readurl(url, ssl_details=ssl_details, timeout=timeout,
+                 method='HEAD', check_status=False)
+    return r.ok()
+
+
 def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
             headers=None, headers_cb=None, ssl_details=None,
+<<<<<<< TREE
             check_status=True, allow_redirects=True, exception_cb=None):
+=======
+            check_status=True, allow_redirects=True):
+    return _readurl(url, data=data, timeout=timeout, retries=retries,
+                    sec_between=sec_between, headers=headers,
+                    headers_cb=headers_cb, ssl_details=ssl_details,
+                    check_status=check_status,
+                    allow_redirects=allow_redirects)
+
+
+def _readurl(url, data=None, timeout=None, retries=0, sec_between=1,
+             headers=None, headers_cb=None, ssl_details=None,
+             check_status=True, allow_redirects=True, method='GET'):
+>>>>>>> MERGE-SOURCE
     url = _cleanurl(url)
     req_args = {
         'url': url,
     }
+    req_args.update(_get_ssl_args(url, ssl_details))
     scheme = urlparse(url).scheme  # pylint: disable=E1101
-    if scheme == 'https' and ssl_details:
-        if not SSL_ENABLED:
-            LOG.warn("SSL is not enabled, cert. verification can not occur!")
-        else:
-            if 'ca_certs' in ssl_details and ssl_details['ca_certs']:
-                req_args['verify'] = ssl_details['ca_certs']
-            else:
-                req_args['verify'] = True
-            if 'cert_file' in ssl_details and 'key_file' in ssl_details:
-                req_args['cert'] = [ssl_details['cert_file'],
-                                    ssl_details['key_file']]
-            elif 'cert_file' in ssl_details:
-                req_args['cert'] = str(ssl_details['cert_file'])
-
     req_args['allow_redirects'] = allow_redirects
-    req_args['method'] = 'GET'
+    req_args['method'] = method
     if timeout is not None:
         req_args['timeout'] = max(float(timeout), 0)
     if data:

=== modified file 'cloudinit/util.py'
--- cloudinit/util.py	2014-01-29 19:31:47 +0000
+++ cloudinit/util.py	2014-02-02 06:50:37 +0000
@@ -32,6 +32,7 @@
 import grp
 import gzip
 import hashlib
+import json
 import os
 import os.path
 import platform
@@ -362,6 +363,15 @@
             log.log(log_level, text)
 
 
+def load_json(text, root_types=(dict,)):
+    decoded = json.loads(text)
+    if not isinstance(decoded, tuple(root_types)):
+        expected_types = ", ".join([str(t) for t in root_types])
+        raise TypeError("(%s) root types expected, got %s instead"
+                        % (expected_types, type(decoded)))
+    return decoded
+
+
 def is_ipv4(instr):
     """determine if input string is a ipv4 address. return boolean."""
     toks = instr.split('.')

=== modified file 'tests/unittests/test_datasource/test_configdrive.py'
--- tests/unittests/test_datasource/test_configdrive.py	2014-01-24 20:29:09 +0000
+++ tests/unittests/test_datasource/test_configdrive.py	2014-02-02 06:50:37 +0000
@@ -9,6 +9,7 @@
 from cloudinit import helpers
 from cloudinit import settings
 from cloudinit.sources import DataSourceConfigDrive as ds
+from cloudinit.sources.helpers import openstack
 from cloudinit import util
 
 from tests.unittests import helpers as unit_helpers
@@ -71,7 +72,7 @@
 
     def test_ec2_metadata(self):
         populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
-        found = ds.read_config_drive_dir(self.tmp)
+        found = ds.read_config_drive(self.tmp)
         self.assertTrue('ec2-metadata' in found)
         ec2_md = found['ec2-metadata']
         self.assertEqual(EC2_META, ec2_md)
@@ -81,7 +82,7 @@
         cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN,
                                           None,
                                           helpers.Paths({}))
-        found = ds.read_config_drive_dir(self.tmp)
+        found = ds.read_config_drive(self.tmp)
         cfg_ds.metadata = found['metadata']
         name_tests = {
             'ami': '/dev/vda1',
@@ -112,7 +113,7 @@
         cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN,
                                           None,
                                           helpers.Paths({}))
-        found = ds.read_config_drive_dir(self.tmp)
+        found = ds.read_config_drive(self.tmp)
         os_md = found['metadata']
         cfg_ds.metadata = os_md
         name_tests = {
@@ -140,7 +141,7 @@
         cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN,
                                           None,
                                           helpers.Paths({}))
-        found = ds.read_config_drive_dir(self.tmp)
+        found = ds.read_config_drive(self.tmp)
         ec2_md = found['ec2-metadata']
         os_md = found['metadata']
         cfg_ds.ec2_metadata = ec2_md
@@ -165,13 +166,13 @@
                 my_mock.replay()
                 device = cfg_ds.device_name_to_device(name)
                 self.assertEquals(dev_name, device)
-
+ 
     def test_dev_ec2_map(self):
         populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
         cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN,
                                           None,
                                           helpers.Paths({}))
-        found = ds.read_config_drive_dir(self.tmp)
+        found = ds.read_config_drive(self.tmp)
         exists_mock = self.mocker.replace(os.path.exists,
                                           spec=False, passthrough=False)
         exists_mock(mocker.ARGS)
@@ -200,10 +201,11 @@
 
         populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
 
-        found = ds.read_config_drive_dir(self.tmp)
+        found = ds.read_config_drive(self.tmp)
 
         expected_md = copy(OSTACK_META)
         expected_md['instance-id'] = expected_md['uuid']
+        expected_md['local-hostname'] = expected_md['hostname']
 
         self.assertEqual(USER_DATA, found['userdata'])
         self.assertEqual(expected_md, found['metadata'])
@@ -219,10 +221,11 @@
 
         populate_dir(self.tmp, data)
 
-        found = ds.read_config_drive_dir(self.tmp)
+        found = ds.read_config_drive(self.tmp)
 
         expected_md = copy(OSTACK_META)
         expected_md['instance-id'] = expected_md['uuid']
+        expected_md['local-hostname'] = expected_md['hostname']
 
         self.assertEqual(expected_md, found['metadata'])
 
@@ -235,8 +238,8 @@
 
         populate_dir(self.tmp, data)
 
-        self.assertRaises(ds.BrokenConfigDriveDir,
-                          ds.read_config_drive_dir, self.tmp)
+        self.assertRaises(openstack.BrokenMetadata,
+                          ds.read_config_drive, self.tmp)
 
     def test_seed_dir_no_configdrive(self):
         """Verify that no metadata raises NonConfigDriveDir."""
@@ -247,14 +250,14 @@
         data["openstack/latest/random-file.txt"] = "random-content"
         data["content/foo"] = "foocontent"
 
-        self.assertRaises(ds.NonConfigDriveDir,
-                          ds.read_config_drive_dir, my_d)
+        self.assertRaises(openstack.NonReadable,
+                          ds.read_config_drive, my_d)
 
     def test_seed_dir_missing(self):
         """Verify that missing seed_dir raises NonConfigDriveDir."""
         my_d = os.path.join(self.tmp, "nonexistantdirectory")
-        self.assertRaises(ds.NonConfigDriveDir,
-                          ds.read_config_drive_dir, my_d)
+        self.assertRaises(openstack.NonReadable,
+                          ds.read_config_drive, my_d)
 
     def test_find_candidates(self):
         devs_with_answers = {}
@@ -304,7 +307,7 @@
 
 
 def cfg_ds_from_dir(seed_d):
-    found = ds.read_config_drive_dir(seed_d)
+    found = ds.read_config_drive(seed_d)
     cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, None,
                                       helpers.Paths({}))
     populate_ds_from_read_config(cfg_ds, seed_d, found)
@@ -319,7 +322,7 @@
     cfg_ds.metadata = results.get('metadata')
     cfg_ds.ec2_metadata = results.get('ec2-metadata')
     cfg_ds.userdata_raw = results.get('userdata')
-    cfg_ds.version = results.get('cfgdrive_ver')
+    cfg_ds.version = results.get('version')
 
 
 def populate_dir(seed_dir, files):

=== modified file 'tests/unittests/test_ec2_util.py'
--- tests/unittests/test_ec2_util.py	2014-01-23 22:41:09 +0000
+++ tests/unittests/test_ec2_util.py	2014-02-02 06:50:37 +0000
@@ -1,6 +1,7 @@
 from tests.unittests import helpers
 
 from cloudinit import ec2_utils as eu
+from cloudinit import url_helper as uh
 
 import httpretty as hp
 
@@ -48,11 +49,11 @@
                         body="\n".join(['hostname',
                                         'instance-id',
                                         'ami-launch-index']))
-        hp.register_uri(hp.GET, eu.combine_url(base_url, 'hostname'),
+        hp.register_uri(hp.GET, uh.combine_url(base_url, 'hostname'),
                         status=200, body='ec2.fake.host.name.com')
-        hp.register_uri(hp.GET, eu.combine_url(base_url, 'instance-id'),
+        hp.register_uri(hp.GET, uh.combine_url(base_url, 'instance-id'),
                         status=200, body='123')
-        hp.register_uri(hp.GET, eu.combine_url(base_url, 'ami-launch-index'),
+        hp.register_uri(hp.GET, uh.combine_url(base_url, 'ami-launch-index'),
                         status=200, body='1')
         md = eu.get_instance_metadata(self.VERSION, retries=0)
         self.assertEquals(md['hostname'], 'ec2.fake.host.name.com')
@@ -66,14 +67,14 @@
                         body="\n".join(['hostname',
                                         'instance-id',
                                         'public-keys/']))
-        hp.register_uri(hp.GET, eu.combine_url(base_url, 'hostname'),
+        hp.register_uri(hp.GET, uh.combine_url(base_url, 'hostname'),
                         status=200, body='ec2.fake.host.name.com')
-        hp.register_uri(hp.GET, eu.combine_url(base_url, 'instance-id'),
+        hp.register_uri(hp.GET, uh.combine_url(base_url, 'instance-id'),
                         status=200, body='123')
-        hp.register_uri(hp.GET, eu.combine_url(base_url, 'public-keys/'),
+        hp.register_uri(hp.GET, uh.combine_url(base_url, 'public-keys/'),
                         status=200, body='0=my-public-key')
         hp.register_uri(hp.GET,
-                        eu.combine_url(base_url, 'public-keys/0/openssh-key'),
+                        uh.combine_url(base_url, 'public-keys/0/openssh-key'),
                         status=200, body='ssh-rsa AAAA.....wZEf my-public-key')
         md = eu.get_instance_metadata(self.VERSION, retries=0, timeout=0.1)
         self.assertEquals(md['hostname'], 'ec2.fake.host.name.com')
@@ -87,18 +88,18 @@
                         body="\n".join(['hostname',
                                         'instance-id',
                                         'public-keys/']))
-        hp.register_uri(hp.GET, eu.combine_url(base_url, 'hostname'),
+        hp.register_uri(hp.GET, uh.combine_url(base_url, 'hostname'),
                         status=200, body='ec2.fake.host.name.com')
-        hp.register_uri(hp.GET, eu.combine_url(base_url, 'instance-id'),
+        hp.register_uri(hp.GET, uh.combine_url(base_url, 'instance-id'),
                         status=200, body='123')
-        hp.register_uri(hp.GET, eu.combine_url(base_url, 'public-keys/'),
+        hp.register_uri(hp.GET, uh.combine_url(base_url, 'public-keys/'),
                         status=200,
                         body="\n".join(['0=my-public-key', '1=my-other-key']))
         hp.register_uri(hp.GET,
-                        eu.combine_url(base_url, 'public-keys/0/openssh-key'),
+                        uh.combine_url(base_url, 'public-keys/0/openssh-key'),
                         status=200, body='ssh-rsa AAAA.....wZEf my-public-key')
         hp.register_uri(hp.GET,
-                        eu.combine_url(base_url, 'public-keys/1/openssh-key'),
+                        uh.combine_url(base_url, 'public-keys/1/openssh-key'),
                         status=200, body='ssh-rsa AAAA.....wZEf my-other-key')
         md = eu.get_instance_metadata(self.VERSION, retries=0, timeout=0.1)
         self.assertEquals(md['hostname'], 'ec2.fake.host.name.com')
@@ -112,20 +113,20 @@
                         body="\n".join(['hostname',
                                         'instance-id',
                                         'block-device-mapping/']))
-        hp.register_uri(hp.GET, eu.combine_url(base_url, 'hostname'),
+        hp.register_uri(hp.GET, uh.combine_url(base_url, 'hostname'),
                         status=200, body='ec2.fake.host.name.com')
-        hp.register_uri(hp.GET, eu.combine_url(base_url, 'instance-id'),
+        hp.register_uri(hp.GET, uh.combine_url(base_url, 'instance-id'),
                         status=200, body='123')
         hp.register_uri(hp.GET,
-                        eu.combine_url(base_url, 'block-device-mapping/'),
+                        uh.combine_url(base_url, 'block-device-mapping/'),
                         status=200,
                         body="\n".join(['ami', 'ephemeral0']))
         hp.register_uri(hp.GET,
-                        eu.combine_url(base_url, 'block-device-mapping/ami'),
+                        uh.combine_url(base_url, 'block-device-mapping/ami'),
                         status=200,
                         body="sdb")
         hp.register_uri(hp.GET,
-                        eu.combine_url(base_url,
+                        uh.combine_url(base_url,
                                        'block-device-mapping/ephemeral0'),
                         status=200,
                         body="sdc")


Follow ups