← Back to team overview

cloud-init-dev team mailing list archive

[Merge] ~chad.smith/cloud-init:ubuntu/devel into cloud-init:ubuntu/devel

 

Chad Smith has proposed merging ~chad.smith/cloud-init:ubuntu/devel into cloud-init:ubuntu/devel.

Commit message:
sync upstream snapshot for release into cosmic

Requested reviews:
  cloud-init commiters (cloud-init-dev)
Related bugs:
  Bug #1795741 in cloud-init: "Release 18.4"
  https://bugs.launchpad.net/cloud-init/+bug/1795741

For more details, see:
https://code.launchpad.net/~chad.smith/cloud-init/+git/cloud-init/+merge/356030
-- 
Your team cloud-init commiters is requested to review the proposed merge of ~chad.smith/cloud-init:ubuntu/devel into cloud-init:ubuntu/devel.
diff --git a/ChangeLog b/ChangeLog
index 72c5287..9c043b0 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,86 @@
+18.4:
+ - add rtd example docs about new standardized keys
+ - use ds._crawled_metadata instance attribute if set when writing
+   instance-data.json
+ - ec2: update crawled metadata. add standardized keys
+ - tests: allow skipping an entire cloud_test without running.
+ - tests: disable lxd tests on cosmic
+ - cii-tests: use unittest2.SkipTest in ntp_chrony due to new deps
+ - lxd: adjust to snap installed lxd.
+ - docs: surface experimental doc in instance-data.json
+ - tests: fix ec2 integration tests. process meta_data instead of meta-data
+ - Add support for Infiniband network interfaces (IPoIB). [Mark Goddard]
+ - cli: add cloud-init query subcommand to query instance metadata
+ - tools/tox-venv: update for new features.
+ - pylint: ignore warning assignment-from-no-return for _write_network
+ - stages: Fix bug causing datasource to have incorrect sys_cfg.
+   (LP: #1787459)
+ - Remove dead-code _write_network distro implementations.
+ - net_util: ensure static configs have netmask in translate_network result
+   [Thomas Berger] (LP: #1792454)
+ - Fall back to root:root on syslog permissions if other options fail.
+   [Robert Schweikert]
+ - tests: Add mock for util.get_hostname. [Robert Schweikert] (LP: #1792799)
+ - ds-identify: doc string cleanup.
+ - OpenStack: Support setting mac address on bond.
+   [Fabian Wiesel] (LP: #1682064)
+ - bash_completion/cloud-init: fix shell syntax error.
+ - EphemeralIPv4Network: Be more explicit when adding default route.
+   (LP: #1792415)
+ - OpenStack: support reading of newer versions of metdata.
+ - OpenStack: fix bug causing 'latest' version to be used from network.
+   (LP: #1792157)
+ - user-data: jinja template to render instance-data.json in cloud-config
+   (LP: #1791781)
+ - config: disable ssh access to a configured user account
+ - tests: print failed testname instead of docstring upon failure
+ - tests: Disallow use of util.subp except for where needed.
+ - sysconfig: refactor sysconfig to accept distro specific templates paths
+ - Add unit tests for config/cc_ssh.py [Francis Ginther]
+ - Fix the built-in cloudinit/tests/helpers:skipIf
+ - read-version: enhance error message [Joshua Powers]
+ - hyperv_reporting_handler: simplify threaded publisher
+ - VMWare: Fix a network config bug in vm with static IPv4 and no gateway.
+   [Pengpeng Sun] (LP: #1766538)
+ - logging: Add logging config type hyperv for reporting via Azure KVP
+   [Andy Liu]
+ - tests: disable other snap test as well [Joshua Powers]
+ - tests: disable snap, fix write_files binary [Joshua Powers]
+ - Add datasource Oracle Compute Infrastructure (OCI).
+ - azure: allow azure to generate network configuration from IMDS per boot.
+ - Scaleway: Add network configuration to the DataSource [Louis Bouchard]
+ - docs: Fix example cloud-init analyze command to match output.
+   [Wesley Gao]
+ - netplan: Correctly render macaddress on a bonds and bridges when
+   provided. (LP: #1784699)
+ - tools: Add 'net-convert' subcommand command to 'cloud-init devel'.
+ - redhat: remove ssh keys on new instance. (LP: #1781094)
+ - Use typeset or local in profile.d scripts. (LP: #1784713)
+ - OpenNebula: Fix null gateway6 [Akihiko Ota] (LP: #1768547)
+ - oracle: fix detect_openstack to report True on OracleCloud.com DMI data
+   (LP: #1784685)
+ - tests: improve LXDInstance trying to workaround or catch bug.
+ - update_metadata re-config on every boot comments and tests not quite
+   right [Mike Gerdts]
+ - tests: Collect build_info from system if available.
+ - pylint: Fix pylint warnings reported in pylint 2.0.0.
+ - get_linux_distro: add support for rhel via redhat-release.
+ - get_linux_distro: add support for centos6 and rawhide flavors of redhat
+   (LP: #1781229)
+ - tools: add '--debug' to tools/net-convert.py
+ - tests: bump the version of paramiko to 2.4.1.
+ - docs: note in rtd about avoiding /tmp when writing files (LP: #1727876)
+ - ubuntu,centos,debian: get_linux_distro to align with platform.dist
+   (LP: #1780481)
+ - Fix boothook docs on environment variable name (INSTANCE_I ->
+   INSTANCE_ID) [Marc Tamsky]
+ - update_metadata: a datasource can support network re-config every boot
+ - tests: drop salt-minion integration test (LP: #1778737)
+ - Retry on failed import of gpg receive keys.
+ - tools: Fix run-container when neither source or binary package requested.
+ - docs: Fix a small spelling error. [Oz N Tiram]
+ - tox: use simplestreams from git repository rather than bzr.
+
 18.3:
  - docs: represent sudo:false in docs for user_groups config module
  - Explicitly prevent `sudo` access for user module
diff --git a/bash_completion/cloud-init b/bash_completion/cloud-init
index 6d01bf3..8c25032 100644
--- a/bash_completion/cloud-init
+++ b/bash_completion/cloud-init
@@ -10,7 +10,7 @@ _cloudinit_complete()
     cur_word="${COMP_WORDS[COMP_CWORD]}"
     prev_word="${COMP_WORDS[COMP_CWORD-1]}"
 
-    subcmds="analyze clean collect-logs devel dhclient-hook features init modules single status"
+    subcmds="analyze clean collect-logs devel dhclient-hook features init modules query single status"
     base_params="--help --file --version --debug --force"
     case ${COMP_CWORD} in
         1)
@@ -40,6 +40,8 @@ _cloudinit_complete()
                     COMPREPLY=($(compgen -W "--help --mode" -- $cur_word))
                     ;;
 
+                query)
+                    COMPREPLY=($(compgen -W "--all --help --instance-data --list-keys --user-data --vendor-data --debug" -- $cur_word));;
                 single)
                     COMPREPLY=($(compgen -W "--help --name --frequency --report" -- $cur_word))
                     ;;
diff --git a/cloudinit/cmd/devel/render.py b/cloudinit/cmd/devel/render.py
index e85933d..2ba6b68 100755
--- a/cloudinit/cmd/devel/render.py
+++ b/cloudinit/cmd/devel/render.py
@@ -9,7 +9,6 @@ import sys
 from cloudinit.handlers.jinja_template import render_jinja_payload_from_file
 from cloudinit import log
 from cloudinit.sources import INSTANCE_JSON_FILE
-from cloudinit import util
 from . import addLogHandlerCLI, read_cfg_paths
 
 NAME = 'render'
@@ -54,11 +53,7 @@ def handle_args(name, args):
             paths.run_dir, INSTANCE_JSON_FILE)
     else:
         instance_data_fn = args.instance_data
-    try:
-        with open(instance_data_fn) as stream:
-            instance_data = stream.read()
-        instance_data = util.load_json(instance_data)
-    except IOError:
+    if not os.path.exists(instance_data_fn):
         LOG.error('Missing instance-data.json file: %s', instance_data_fn)
         return 1
     try:
diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py
index 0eee583..5a43702 100644
--- a/cloudinit/cmd/main.py
+++ b/cloudinit/cmd/main.py
@@ -791,6 +791,10 @@ def main(sysv_args=None):
                                      ' pass to this module'))
     parser_single.set_defaults(action=('single', main_single))
 
+    parser_query = subparsers.add_parser(
+        'query',
+        help='Query standardized instance metadata from the command line.')
+
     parser_dhclient = subparsers.add_parser('dhclient-hook',
                                             help=('run the dhclient hook'
                                                   'to record network info'))
@@ -842,6 +846,12 @@ def main(sysv_args=None):
             clean_parser(parser_clean)
             parser_clean.set_defaults(
                 action=('clean', handle_clean_args))
+        elif sysv_args[0] == 'query':
+            from cloudinit.cmd.query import (
+                get_parser as query_parser, handle_args as handle_query_args)
+            query_parser(parser_query)
+            parser_query.set_defaults(
+                action=('render', handle_query_args))
         elif sysv_args[0] == 'status':
             from cloudinit.cmd.status import (
                 get_parser as status_parser, handle_status_args)
diff --git a/cloudinit/cmd/query.py b/cloudinit/cmd/query.py
new file mode 100644
index 0000000..7d2d4fe
--- /dev/null
+++ b/cloudinit/cmd/query.py
@@ -0,0 +1,155 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Query standardized instance metadata from the command line."""
+
+import argparse
+import os
+import six
+import sys
+
+from cloudinit.handlers.jinja_template import (
+    convert_jinja_instance_data, render_jinja_payload)
+from cloudinit.cmd.devel import addLogHandlerCLI, read_cfg_paths
+from cloudinit import log
+from cloudinit.sources import (
+    INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE, REDACT_SENSITIVE_VALUE)
+from cloudinit import util
+
+NAME = 'query'
+LOG = log.getLogger(NAME)
+
+
+def get_parser(parser=None):
+    """Build or extend an arg parser for query utility.
+
+    @param parser: Optional existing ArgumentParser instance representing the
+        query subcommand which will be extended to support the args of
+        this utility.
+
+    @returns: ArgumentParser with proper argument configuration.
+    """
+    if not parser:
+        parser = argparse.ArgumentParser(
+            prog=NAME, description='Query cloud-init instance data')
+    parser.add_argument(
+        '-d', '--debug', action='store_true', default=False,
+        help='Add verbose messages during template render')
+    parser.add_argument(
+        '-i', '--instance-data', type=str,
+        help=('Path to instance-data.json file. Default is /run/cloud-init/%s'
+              % INSTANCE_JSON_FILE))
+    parser.add_argument(
+        '-l', '--list-keys', action='store_true', default=False,
+        help=('List query keys available at the provided instance-data'
+              ' <varname>.'))
+    parser.add_argument(
+        '-u', '--user-data', type=str,
+        help=('Path to user-data file. Default is'
+              ' /var/lib/cloud/instance/user-data.txt'))
+    parser.add_argument(
+        '-v', '--vendor-data', type=str,
+        help=('Path to vendor-data file. Default is'
+              ' /var/lib/cloud/instance/vendor-data.txt'))
+    parser.add_argument(
+        'varname', type=str, nargs='?',
+        help=('A dot-delimited instance data variable to query from'
+              ' instance-data query. For example: v2.local_hostname'))
+    parser.add_argument(
+        '-a', '--all', action='store_true', default=False, dest='dump_all',
+        help='Dump all available instance-data')
+    parser.add_argument(
+        '-f', '--format', type=str, dest='format',
+        help=('Optionally specify a custom output format string. Any'
+              ' instance-data variable can be specified between double-curly'
+              ' braces. For example -f "{{ v2.cloud_name }}"'))
+    return parser
+
+
+def handle_args(name, args):
+    """Handle calls to 'cloud-init query' as a subcommand."""
+    paths = None
+    addLogHandlerCLI(LOG, log.DEBUG if args.debug else log.WARNING)
+    if not any([args.list_keys, args.varname, args.format, args.dump_all]):
+        LOG.error(
+            'Expected one of the options: --all, --format,'
+            ' --list-keys or varname')
+        get_parser().print_help()
+        return 1
+
+    uid = os.getuid()
+    if not all([args.instance_data, args.user_data, args.vendor_data]):
+        paths = read_cfg_paths()
+    if not args.instance_data:
+        if uid == 0:
+            default_json_fn = INSTANCE_JSON_SENSITIVE_FILE
+        else:
+            default_json_fn = INSTANCE_JSON_FILE  # World readable
+        instance_data_fn = os.path.join(paths.run_dir, default_json_fn)
+    else:
+        instance_data_fn = args.instance_data
+    if not args.user_data:
+        user_data_fn = os.path.join(paths.instance_link, 'user-data.txt')
+    else:
+        user_data_fn = args.user_data
+    if not args.vendor_data:
+        vendor_data_fn = os.path.join(paths.instance_link, 'vendor-data.txt')
+    else:
+        vendor_data_fn = args.vendor_data
+
+    try:
+        instance_json = util.load_file(instance_data_fn)
+    except IOError:
+        LOG.error('Missing instance-data.json file: %s', instance_data_fn)
+        return 1
+
+    instance_data = util.load_json(instance_json)
+    if uid != 0:
+        instance_data['userdata'] = (
+            '<%s> file:%s' % (REDACT_SENSITIVE_VALUE, user_data_fn))
+        instance_data['vendordata'] = (
+            '<%s> file:%s' % (REDACT_SENSITIVE_VALUE, vendor_data_fn))
+    else:
+        instance_data['userdata'] = util.load_file(user_data_fn)
+        instance_data['vendordata'] = util.load_file(vendor_data_fn)
+    if args.format:
+        payload = '## template: jinja\n{fmt}'.format(fmt=args.format)
+        rendered_payload = render_jinja_payload(
+            payload=payload, payload_fn='query commandline',
+            instance_data=instance_data,
+            debug=True if args.debug else False)
+        if rendered_payload:
+            print(rendered_payload)
+            return 0
+        return 1
+
+    response = convert_jinja_instance_data(instance_data)
+    if args.varname:
+        try:
+            for var in args.varname.split('.'):
+                response = response[var]
+        except KeyError:
+            LOG.error('Undefined instance-data key %s', args.varname)
+            return 1
+        if args.list_keys:
+            if not isinstance(response, dict):
+                LOG.error("--list-keys provided but '%s' is not a dict", var)
+                return 1
+            response = '\n'.join(sorted(response.keys()))
+    elif args.list_keys:
+        response = '\n'.join(sorted(response.keys()))
+    if not isinstance(response, six.string_types):
+        response = util.json_dumps(response)
+    print(response)
+    return 0
+
+
+def main():
+    """Tool to query specific instance-data values."""
+    parser = get_parser()
+    sys.exit(handle_args(NAME, parser.parse_args()))
+
+
+if __name__ == '__main__':
+    main()
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/tests/test_main.py b/cloudinit/cmd/tests/test_main.py
index e2c54ae..a1e534f 100644
--- a/cloudinit/cmd/tests/test_main.py
+++ b/cloudinit/cmd/tests/test_main.py
@@ -125,7 +125,9 @@ class TestMain(FilesystemMockingTestCase):
             updated_cfg.update(
                 {'def_log_file': '/var/log/cloud-init.log',
                  'log_cfgs': [],
-                 'syslog_fix_perms': ['syslog:adm', 'root:adm', 'root:wheel'],
+                 'syslog_fix_perms': [
+                     'syslog:adm', 'root:adm', 'root:wheel', 'root:root'
+                 ],
                  'vendor_data': {'enabled': True, 'prefix': []}})
             updated_cfg.pop('system_info')
 
diff --git a/cloudinit/cmd/tests/test_query.py b/cloudinit/cmd/tests/test_query.py
new file mode 100644
index 0000000..fb87c6a
--- /dev/null
+++ b/cloudinit/cmd/tests/test_query.py
@@ -0,0 +1,193 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from six import StringIO
+from textwrap import dedent
+import os
+
+from collections import namedtuple
+from cloudinit.cmd import query
+from cloudinit.helpers import Paths
+from cloudinit.sources import REDACT_SENSITIVE_VALUE, INSTANCE_JSON_FILE
+from cloudinit.tests.helpers import CiTestCase, mock
+from cloudinit.util import ensure_dir, write_file
+
+
+class TestQuery(CiTestCase):
+
+    with_logs = True
+
+    args = namedtuple(
+        'queryargs',
+        ('debug dump_all format instance_data list_keys user_data vendor_data'
+         ' varname'))
+
+    def setUp(self):
+        super(TestQuery, self).setUp()
+        self.tmp = self.tmp_dir()
+        self.instance_data = self.tmp_path('instance-data', dir=self.tmp)
+
+    def test_handle_args_error_on_missing_param(self):
+        """Error when missing required parameters and print usage."""
+        args = self.args(
+            debug=False, dump_all=False, format=None, instance_data=None,
+            list_keys=False, user_data=None, vendor_data=None, varname=None)
+        with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
+            with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
+                self.assertEqual(1, query.handle_args('anyname', args))
+        expected_error = (
+            'ERROR: Expected one of the options: --all, --format, --list-keys'
+            ' or varname\n')
+        self.assertIn(expected_error, self.logs.getvalue())
+        self.assertIn('usage: query', m_stdout.getvalue())
+        self.assertIn(expected_error, m_stderr.getvalue())
+
+    def test_handle_args_error_on_missing_instance_data(self):
+        """When instance_data file path does not exist, log an error."""
+        absent_fn = self.tmp_path('absent', dir=self.tmp)
+        args = self.args(
+            debug=False, dump_all=True, format=None, instance_data=absent_fn,
+            list_keys=False, user_data='ud', vendor_data='vd', varname=None)
+        with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
+            self.assertEqual(1, query.handle_args('anyname', args))
+        self.assertIn(
+            'ERROR: Missing instance-data.json file: %s' % absent_fn,
+            self.logs.getvalue())
+        self.assertIn(
+            'ERROR: Missing instance-data.json file: %s' % absent_fn,
+            m_stderr.getvalue())
+
+    def test_handle_args_defaults_instance_data(self):
+        """When no instance_data argument, default to configured run_dir."""
+        args = self.args(
+            debug=False, dump_all=True, format=None, instance_data=None,
+            list_keys=False, user_data=None, vendor_data=None, varname=None)
+        run_dir = self.tmp_path('run_dir', dir=self.tmp)
+        ensure_dir(run_dir)
+        paths = Paths({'run_dir': run_dir})
+        self.add_patch('cloudinit.cmd.query.read_cfg_paths', 'm_paths')
+        self.m_paths.return_value = paths
+        with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
+            self.assertEqual(1, query.handle_args('anyname', args))
+        json_file = os.path.join(run_dir, INSTANCE_JSON_FILE)
+        self.assertIn(
+            'ERROR: Missing instance-data.json file: %s' % json_file,
+            self.logs.getvalue())
+        self.assertIn(
+            'ERROR: Missing instance-data.json file: %s' % json_file,
+            m_stderr.getvalue())
+
+    def test_handle_args_dumps_all_instance_data(self):
+        """When --all is specified query will dump all instance data vars."""
+        write_file(self.instance_data, '{"my-var": "it worked"}')
+        args = self.args(
+            debug=False, dump_all=True, format=None,
+            instance_data=self.instance_data, list_keys=False,
+            user_data='ud', vendor_data='vd', varname=None)
+        with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
+            self.assertEqual(0, query.handle_args('anyname', args))
+        self.assertEqual(
+            '{\n "my_var": "it worked",\n "userdata": "<%s> file:ud",\n'
+            ' "vendordata": "<%s> file:vd"\n}\n' % (
+                REDACT_SENSITIVE_VALUE, REDACT_SENSITIVE_VALUE),
+            m_stdout.getvalue())
+
+    def test_handle_args_returns_top_level_varname(self):
+        """When the argument varname is passed, report its value."""
+        write_file(self.instance_data, '{"my-var": "it worked"}')
+        args = self.args(
+            debug=False, dump_all=True, format=None,
+            instance_data=self.instance_data, list_keys=False,
+            user_data='ud', vendor_data='vd', varname='my_var')
+        with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
+            self.assertEqual(0, query.handle_args('anyname', args))
+        self.assertEqual('it worked\n', m_stdout.getvalue())
+
+    def test_handle_args_returns_nested_varname(self):
+        """If user_data file is a jinja template render instance-data vars."""
+        write_file(self.instance_data,
+                   '{"v1": {"key-2": "value-2"}, "my-var": "it worked"}')
+        args = self.args(
+            debug=False, dump_all=False, format=None,
+            instance_data=self.instance_data, user_data='ud', vendor_data='vd',
+            list_keys=False, varname='v1.key_2')
+        with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
+            self.assertEqual(0, query.handle_args('anyname', args))
+        self.assertEqual('value-2\n', m_stdout.getvalue())
+
+    def test_handle_args_returns_standardized_vars_to_top_level_aliases(self):
+        """Any standardized vars under v# are promoted as top-level aliases."""
+        write_file(
+            self.instance_data,
+            '{"v1": {"v1_1": "val1.1"}, "v2": {"v2_2": "val2.2"},'
+            ' "top": "gun"}')
+        expected = dedent("""\
+            {
+             "top": "gun",
+             "userdata": "<redacted for non-root user> file:ud",
+             "v1": {
+              "v1_1": "val1.1"
+             },
+             "v1_1": "val1.1",
+             "v2": {
+              "v2_2": "val2.2"
+             },
+             "v2_2": "val2.2",
+             "vendordata": "<redacted for non-root user> file:vd"
+            }
+        """)
+        args = self.args(
+            debug=False, dump_all=True, format=None,
+            instance_data=self.instance_data, user_data='ud', vendor_data='vd',
+            list_keys=False, varname=None)
+        with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
+            self.assertEqual(0, query.handle_args('anyname', args))
+        self.assertEqual(expected, m_stdout.getvalue())
+
+    def test_handle_args_list_keys_sorts_top_level_keys_when_no_varname(self):
+        """Sort all top-level keys when only --list-keys provided."""
+        write_file(
+            self.instance_data,
+            '{"v1": {"v1_1": "val1.1"}, "v2": {"v2_2": "val2.2"},'
+            ' "top": "gun"}')
+        expected = 'top\nuserdata\nv1\nv1_1\nv2\nv2_2\nvendordata\n'
+        args = self.args(
+            debug=False, dump_all=False, format=None,
+            instance_data=self.instance_data, list_keys=True, user_data='ud',
+            vendor_data='vd', varname=None)
+        with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
+            self.assertEqual(0, query.handle_args('anyname', args))
+        self.assertEqual(expected, m_stdout.getvalue())
+
+    def test_handle_args_list_keys_sorts_nested_keys_when_varname(self):
+        """Sort all nested keys of varname object when --list-keys provided."""
+        write_file(
+            self.instance_data,
+            '{"v1": {"v1_1": "val1.1", "v1_2": "val1.2"}, "v2":' +
+            ' {"v2_2": "val2.2"}, "top": "gun"}')
+        expected = 'v1_1\nv1_2\n'
+        args = self.args(
+            debug=False, dump_all=False, format=None,
+            instance_data=self.instance_data, list_keys=True,
+            user_data='ud', vendor_data='vd', varname='v1')
+        with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
+            self.assertEqual(0, query.handle_args('anyname', args))
+        self.assertEqual(expected, m_stdout.getvalue())
+
+    def test_handle_args_list_keys_errors_when_varname_is_not_a_dict(self):
+        """Raise an error when --list-keys and varname specify a non-list."""
+        write_file(
+            self.instance_data,
+            '{"v1": {"v1_1": "val1.1", "v1_2": "val1.2"}, "v2": ' +
+            '{"v2_2": "val2.2"}, "top": "gun"}')
+        expected_error = "ERROR: --list-keys provided but 'top' is not a dict"
+        args = self.args(
+            debug=False, dump_all=False, format=None,
+            instance_data=self.instance_data, list_keys=True, user_data='ud',
+            vendor_data='vd',  varname='top')
+        with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
+            with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
+                self.assertEqual(1, query.handle_args('anyname', args))
+        self.assertEqual('', m_stdout.getvalue())
+        self.assertIn(expected_error, m_stderr.getvalue())
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py
index a604825..24a8ebe 100644
--- a/cloudinit/config/cc_lxd.py
+++ b/cloudinit/config/cc_lxd.py
@@ -104,6 +104,7 @@ def handle(name, cfg, cloud, log, args):
             'network_address', 'network_port', 'storage_backend',
             'storage_create_device', 'storage_create_loop',
             'storage_pool', 'trust_password')
+        util.subp(['lxd', 'waitready', '--timeout=300'])
         cmd = ['lxd', 'init', '--auto']
         for k in init_keys:
             if init_cfg.get(k):
@@ -260,7 +261,9 @@ def bridge_to_cmd(bridge_cfg):
 
 
 def _lxc(cmd):
-    env = {'LC_ALL': 'C'}
+    env = {'LC_ALL': 'C',
+           'HOME': os.environ.get('HOME', '/root'),
+           'USER': os.environ.get('USER', 'root')}
     util.subp(['lxc'] + list(cmd) + ["--force-local"], update_env=env)
 
 
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index b8a48e8..ef618c2 100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -74,11 +74,10 @@ class Distro(object):
     def install_packages(self, pkglist):
         raise NotImplementedError()
 
-    @abc.abstractmethod
     def _write_network(self, settings):
-        # In the future use the http://fedorahosted.org/netcf/
-        # to write this blob out in a distro format
-        raise NotImplementedError()
+        raise RuntimeError(
+            "Legacy function '_write_network' was called in distro '%s'.\n"
+            "_write_network_config needs implementation.\n" % self.name)
 
     def _write_network_config(self, settings):
         raise NotImplementedError()
@@ -144,7 +143,11 @@ class Distro(object):
         # this applies network where 'settings' is interfaces(5) style
         # it is obsolete compared to apply_network_config
         # Write it out
+
+        # pylint: disable=assignment-from-no-return
+        # We have implementations in arch, freebsd and gentoo still
         dev_names = self._write_network(settings)
+        # pylint: enable=assignment-from-no-return
         # Now try to bring them up
         if bring_up:
             return self._bring_up_interfaces(dev_names)
diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py
index 33cc0bf..d517fb8 100644
--- a/cloudinit/distros/debian.py
+++ b/cloudinit/distros/debian.py
@@ -109,11 +109,6 @@ class Distro(distros.Distro):
         self.update_package_sources()
         self.package_command('install', pkgs=pkglist)
 
-    def _write_network(self, settings):
-        # this is a legacy method, it will always write eni
-        util.write_file(self.network_conf_fn["eni"], settings)
-        return ['all']
-
     def _write_network_config(self, netconfig):
         _maybe_remove_legacy_eth0()
         return self._supported_write_network_config(netconfig)
diff --git a/cloudinit/distros/net_util.py b/cloudinit/distros/net_util.py
index 1ce1aa7..edfcd99 100644
--- a/cloudinit/distros/net_util.py
+++ b/cloudinit/distros/net_util.py
@@ -67,6 +67,10 @@
 #     }
 # }
 
+from cloudinit.net.network_state import (
+    net_prefix_to_ipv4_mask, mask_and_ipv4_to_bcast_addr)
+
+
 def translate_network(settings):
     # Get the standard cmd, args from the ubuntu format
     entries = []
@@ -134,6 +138,21 @@ def translate_network(settings):
                     val = info[k].strip().lower()
                     if val:
                         iface_info[k] = val
+            # handle static ip configurations using
+            # ipaddress/prefix-length format
+            if 'address' in iface_info:
+                if 'netmask' not in iface_info:
+                    # check if the address has a network prefix
+                    addr, _, prefix = iface_info['address'].partition('/')
+                    if prefix:
+                        iface_info['netmask'] = (
+                            net_prefix_to_ipv4_mask(prefix))
+                        iface_info['address'] = addr
+                        # if we set the netmask, we also can set the broadcast
+                        iface_info['broadcast'] = (
+                            mask_and_ipv4_to_bcast_addr(
+                                iface_info['netmask'], addr))
+
             # Name server info provided??
             if 'dns-nameservers' in info:
                 iface_info['dns-nameservers'] = info['dns-nameservers'].split()
diff --git a/cloudinit/distros/opensuse.py b/cloudinit/distros/opensuse.py
index 1fe896a..1bfe047 100644
--- a/cloudinit/distros/opensuse.py
+++ b/cloudinit/distros/opensuse.py
@@ -16,7 +16,6 @@ from cloudinit import helpers
 from cloudinit import log as logging
 from cloudinit import util
 
-from cloudinit.distros import net_util
 from cloudinit.distros import rhel_util as rhutil
 from cloudinit.settings import PER_INSTANCE
 
@@ -172,52 +171,6 @@ class Distro(distros.Distro):
             conf.set_hostname(hostname)
             util.write_file(out_fn, str(conf), 0o644)
 
-    def _write_network(self, settings):
-        # Convert debian settings to ifcfg format
-        entries = net_util.translate_network(settings)
-        LOG.debug("Translated ubuntu style network settings %s into %s",
-                  settings, entries)
-        # Make the intermediate format as the suse format...
-        nameservers = []
-        searchservers = []
-        dev_names = entries.keys()
-        for (dev, info) in entries.items():
-            net_fn = self.network_script_tpl % (dev)
-            route_fn = self.route_conf_tpl % (dev)
-            mode = None
-            if info.get('auto', None):
-                mode = 'auto'
-            else:
-                mode = 'manual'
-            bootproto = info.get('bootproto', None)
-            gateway = info.get('gateway', None)
-            net_cfg = {
-                'BOOTPROTO': bootproto,
-                'BROADCAST': info.get('broadcast'),
-                'GATEWAY': gateway,
-                'IPADDR': info.get('address'),
-                'LLADDR': info.get('hwaddress'),
-                'NETMASK': info.get('netmask'),
-                'STARTMODE': mode,
-                'USERCONTROL': 'no'
-            }
-            if dev != 'lo':
-                net_cfg['ETHTOOL_OPTIONS'] = ''
-            else:
-                net_cfg['FIREWALL'] = 'no'
-            rhutil.update_sysconfig_file(net_fn, net_cfg, True)
-            if gateway and bootproto == 'static':
-                default_route = 'default    %s' % gateway
-                util.write_file(route_fn, default_route, 0o644)
-            if 'dns-nameservers' in info:
-                nameservers.extend(info['dns-nameservers'])
-            if 'dns-search' in info:
-                searchservers.extend(info['dns-search'])
-        if nameservers or searchservers:
-            rhutil.update_resolve_conf_file(self.resolve_conf_fn,
-                                            nameservers, searchservers)
-        return dev_names
-
     def _write_network_config(self, netconfig):
         return self._supported_write_network_config(netconfig)
 
diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py
index ff51343..f55d96f 100644
--- a/cloudinit/distros/rhel.py
+++ b/cloudinit/distros/rhel.py
@@ -13,7 +13,6 @@ from cloudinit import helpers
 from cloudinit import log as logging
 from cloudinit import util
 
-from cloudinit.distros import net_util
 from cloudinit.distros import rhel_util
 from cloudinit.settings import PER_INSTANCE
 
@@ -65,54 +64,6 @@ class Distro(distros.Distro):
     def _write_network_config(self, netconfig):
         return self._supported_write_network_config(netconfig)
 
-    def _write_network(self, settings):
-        # TODO(harlowja) fix this... since this is the ubuntu format
-        entries = net_util.translate_network(settings)
-        LOG.debug("Translated ubuntu style network settings %s into %s",
-                  settings, entries)
-        # Make the intermediate format as the rhel format...
-        nameservers = []
-        searchservers = []
-        dev_names = entries.keys()
-        use_ipv6 = False
-        for (dev, info) in entries.items():
-            net_fn = self.network_script_tpl % (dev)
-            net_cfg = {
-                'DEVICE': dev,
-                'NETMASK': info.get('netmask'),
-                'IPADDR': info.get('address'),
-                'BOOTPROTO': info.get('bootproto'),
-                'GATEWAY': info.get('gateway'),
-                'BROADCAST': info.get('broadcast'),
-                'MACADDR': info.get('hwaddress'),
-                'ONBOOT': _make_sysconfig_bool(info.get('auto')),
-            }
-            if info.get('inet6'):
-                use_ipv6 = True
-                net_cfg.update({
-                    'IPV6INIT': _make_sysconfig_bool(True),
-                    'IPV6ADDR': info.get('ipv6').get('address'),
-                    'IPV6_DEFAULTGW': info.get('ipv6').get('gateway'),
-                })
-            rhel_util.update_sysconfig_file(net_fn, net_cfg)
-            if 'dns-nameservers' in info:
-                nameservers.extend(info['dns-nameservers'])
-            if 'dns-search' in info:
-                searchservers.extend(info['dns-search'])
-        if nameservers or searchservers:
-            rhel_util.update_resolve_conf_file(self.resolve_conf_fn,
-                                               nameservers, searchservers)
-        if dev_names:
-            net_cfg = {
-                'NETWORKING': _make_sysconfig_bool(True),
-            }
-            # If IPv6 interface present, enable ipv6 networking
-            if use_ipv6:
-                net_cfg['NETWORKING_IPV6'] = _make_sysconfig_bool(True)
-                net_cfg['IPV6_AUTOCONF'] = _make_sysconfig_bool(False)
-            rhel_util.update_sysconfig_file(self.network_conf_fn, net_cfg)
-        return dev_names
-
     def apply_locale(self, locale, out_fn=None):
         if self.uses_systemd():
             if not out_fn:
diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py
index 3cc1fb1..dcd2645 100644
--- a/cloudinit/helpers.py
+++ b/cloudinit/helpers.py
@@ -239,6 +239,10 @@ class ConfigMerger(object):
             if cc_fn and os.path.isfile(cc_fn):
                 try:
                     i_cfgs.append(util.read_conf(cc_fn))
+                except PermissionError:
+                    LOG.debug(
+                        'Skipped loading cloud-config from %s due to'
+                        ' non-root.', cc_fn)
                 except Exception:
                     util.logexc(LOG, 'Failed loading of cloud-config from %s',
                                 cc_fn)
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
index 5e87bca..f83d368 100644
--- a/cloudinit/net/__init__.py
+++ b/cloudinit/net/__init__.py
@@ -569,6 +569,20 @@ def get_interface_mac(ifname):
     return read_sys_net_safe(ifname, path)
 
 
+def get_ib_interface_hwaddr(ifname, ethernet_format):
+    """Returns the string value of an Infiniband interface's hardware
+    address. If ethernet_format is True, an Ethernet MAC-style 6 byte
+    representation of the address will be returned.
+    """
+    # Type 32 is Infiniband.
+    if read_sys_net_safe(ifname, 'type') == '32':
+        mac = get_interface_mac(ifname)
+        if mac and ethernet_format:
+            # Use bytes 13-15 and 18-20 of the hardware address.
+            mac = mac[36:-14] + mac[51:]
+        return mac
+
+
 def get_interfaces_by_mac():
     """Build a dictionary of tuples {mac: name}.
 
@@ -580,6 +594,15 @@ def get_interfaces_by_mac():
                 "duplicate mac found! both '%s' and '%s' have mac '%s'" %
                 (name, ret[mac], mac))
         ret[mac] = name
+        # Try to get an Infiniband hardware address (in 6 byte Ethernet format)
+        # for the interface.
+        ib_mac = get_ib_interface_hwaddr(name, True)
+        if ib_mac:
+            if ib_mac in ret:
+                raise RuntimeError(
+                    "duplicate mac found! both '%s' and '%s' have mac '%s'" %
+                    (name, ret[ib_mac], ib_mac))
+            ret[ib_mac] = name
     return ret
 
 
@@ -607,6 +630,21 @@ def get_interfaces():
     return ret
 
 
+def get_ib_hwaddrs_by_interface():
+    """Build a dictionary mapping Infiniband interface names to their hardware
+    address."""
+    ret = {}
+    for name, _, _, _ in get_interfaces():
+        ib_mac = get_ib_interface_hwaddr(name, False)
+        if ib_mac:
+            if ib_mac in ret:
+                raise RuntimeError(
+                    "duplicate mac found! both '%s' and '%s' have mac '%s'" %
+                    (name, ret[ib_mac], ib_mac))
+            ret[name] = ib_mac
+    return ret
+
+
 class EphemeralIPv4Network(object):
     """Context manager which sets up temporary static network configuration.
 
diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py
index 72c803e..f76e508 100644
--- a/cloudinit/net/network_state.py
+++ b/cloudinit/net/network_state.py
@@ -483,6 +483,10 @@ class NetworkStateInterpreter(object):
 
         interfaces.update({iface['name']: iface})
 
+    @ensure_command_keys(['name'])
+    def handle_infiniband(self, command):
+        self.handle_physical(command)
+
     @ensure_command_keys(['address'])
     def handle_nameserver(self, command):
         dns = self._network_state.get('dns')
diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
index 66e970e..9c16d3a 100644
--- a/cloudinit/net/sysconfig.py
+++ b/cloudinit/net/sysconfig.py
@@ -174,6 +174,7 @@ class NetInterface(ConfigMap):
         'ethernet': 'Ethernet',
         'bond': 'Bond',
         'bridge': 'Bridge',
+        'infiniband': 'InfiniBand',
     }
 
     def __init__(self, iface_name, base_sysconf_dir, templates,
@@ -569,6 +570,18 @@ class Renderer(renderer.Renderer):
             cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets)
 
     @classmethod
+    def _render_ib_interfaces(cls, network_state, iface_contents):
+        ib_filter = renderer.filter_by_type('infiniband')
+        for iface in network_state.iter_interfaces(ib_filter):
+            iface_name = iface['name']
+            iface_cfg = iface_contents[iface_name]
+            iface_cfg.kind = 'infiniband'
+            iface_subnets = iface.get("subnets", [])
+            route_cfg = iface_cfg.routes
+            cls._render_subnets(iface_cfg, iface_subnets)
+            cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets)
+
+    @classmethod
     def _render_sysconfig(cls, base_sysconf_dir, network_state,
                           templates=None):
         '''Given state, return /etc/sysconfig files + contents'''
@@ -586,6 +599,7 @@ class Renderer(renderer.Renderer):
         cls._render_bond_interfaces(network_state, iface_contents)
         cls._render_vlan_interfaces(network_state, iface_contents)
         cls._render_bridge_interfaces(network_state, iface_contents)
+        cls._render_ib_interfaces(network_state, iface_contents)
         contents = {}
         for iface_name, iface_cfg in iface_contents.items():
             if iface_cfg or iface_cfg.children:
diff --git a/cloudinit/settings.py b/cloudinit/settings.py
index ea367cb..b1ebaad 100644
--- a/cloudinit/settings.py
+++ b/cloudinit/settings.py
@@ -44,7 +44,7 @@ CFG_BUILTIN = {
     ],
     'def_log_file': '/var/log/cloud-init.log',
     'log_cfgs': [],
-    'syslog_fix_perms': ['syslog:adm', 'root:adm', 'root:wheel'],
+    'syslog_fix_perms': ['syslog:adm', 'root:adm', 'root:wheel', 'root:root'],
     'system_info': {
         'paths': {
             'cloud_dir': '/var/lib/cloud',
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index 4cb2897..664dc4b 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -196,7 +196,7 @@ def on_first_boot(data, distro=None, network=True):
         net_conf = data.get("network_config", '')
         if net_conf and distro:
             LOG.warning("Updating network interfaces from config drive")
-            distro.apply_network(net_conf)
+            distro.apply_network_config(eni.convert_eni_data(net_conf))
     write_injected_files(data.get('files'))
 
 
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index a775f1a..5ac9882 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -38,8 +38,17 @@ DEP_FILESYSTEM = "FILESYSTEM"
 DEP_NETWORK = "NETWORK"
 DS_PREFIX = 'DataSource'
 
-# File in which instance meta-data, user-data and vendor-data is written
+EXPERIMENTAL_TEXT = (
+    "EXPERIMENTAL: The structure and format of content scoped under the 'ds'"
+    " key may change in subsequent releases of cloud-init.")
+
+
+# File in which public available instance meta-data is written
+# security-sensitive key values are redacted from this world-readable file
 INSTANCE_JSON_FILE = 'instance-data.json'
+# security-sensitive key values are present in this root-readable file
+INSTANCE_JSON_SENSITIVE_FILE = 'instance-data-sensitive.json'
+REDACT_SENSITIVE_VALUE = 'redacted for non-root user'
 
 # Key which can be provide a cloud's official product name to cloud-init
 METADATA_CLOUD_NAME_KEY = 'cloud-name'
@@ -58,7 +67,7 @@ class InvalidMetaDataException(Exception):
     pass
 
 
-def process_instance_metadata(metadata, key_path=''):
+def process_instance_metadata(metadata, key_path='', sensitive_keys=()):
     """Process all instance metadata cleaning it up for persisting as json.
 
     Strip ci-b64 prefix and catalog any 'base64_encoded_keys' as a list
@@ -67,22 +76,46 @@ def process_instance_metadata(metadata, key_path=''):
     """
     md_copy = copy.deepcopy(metadata)
     md_copy['base64_encoded_keys'] = []
+    md_copy['sensitive_keys'] = []
     for key, val in metadata.items():
         if key_path:
             sub_key_path = key_path + '/' + key
         else:
             sub_key_path = key
+        if key in sensitive_keys or sub_key_path in sensitive_keys:
+            md_copy['sensitive_keys'].append(sub_key_path)
         if isinstance(val, str) and val.startswith('ci-b64:'):
             md_copy['base64_encoded_keys'].append(sub_key_path)
             md_copy[key] = val.replace('ci-b64:', '')
         if isinstance(val, dict):
-            return_val = process_instance_metadata(val, sub_key_path)
+            return_val = process_instance_metadata(
+                val, sub_key_path, sensitive_keys)
             md_copy['base64_encoded_keys'].extend(
                 return_val.pop('base64_encoded_keys'))
+            md_copy['sensitive_keys'].extend(
+                return_val.pop('sensitive_keys'))
             md_copy[key] = return_val
     return md_copy
 
 
+def redact_sensitive_keys(metadata, redact_value=REDACT_SENSITIVE_VALUE):
+    """Redact any sensitive keys from to provided metadata dictionary.
+
+    Replace any keys values listed in 'sensitive_keys' with redact_value.
+    """
+    if not metadata.get('sensitive_keys', []):
+        return metadata
+    md_copy = copy.deepcopy(metadata)
+    for key_path in metadata.get('sensitive_keys'):
+        path_parts = key_path.split('/')
+        obj = md_copy
+        for path in path_parts:
+            if isinstance(obj[path], dict) and path != path_parts[-1]:
+                obj = obj[path]
+        obj[path] = redact_value
+    return md_copy
+
+
 URLParams = namedtuple(
     'URLParms', ['max_wait_seconds', 'timeout_seconds', 'num_retries'])
 
@@ -127,6 +160,10 @@ class DataSource(object):
 
     _dirty_cache = False
 
+    # N-tuple of keypaths or keynames redact from instance-data.json for
+    # non-root users
+    sensitive_metadata_keys = ('security-credentials',)
+
     def __init__(self, sys_cfg, distro, paths, ud_proc=None):
         self.sys_cfg = sys_cfg
         self.distro = distro
@@ -152,12 +189,24 @@ class DataSource(object):
 
     def _get_standardized_metadata(self):
         """Return a dictionary of standardized metadata keys."""
-        return {'v1': {
-            'local-hostname': self.get_hostname(),
-            'instance-id': self.get_instance_id(),
-            'cloud-name': self.cloud_name,
-            'region': self.region,
-            'availability-zone': self.availability_zone}}
+        local_hostname = self.get_hostname()
+        instance_id = self.get_instance_id()
+        availability_zone = self.availability_zone
+        cloud_name = self.cloud_name
+        # When adding new standard keys prefer underscore-delimited instead
+        # of hyphen-delimted to support simple variable references in jinja
+        # templates.
+        return {
+            'v1': {
+                'availability-zone': availability_zone,
+                'availability_zone': availability_zone,
+                'cloud-name': cloud_name,
+                'cloud_name': cloud_name,
+                'instance-id': instance_id,
+                'instance_id': instance_id,
+                'local-hostname': local_hostname,
+                'local_hostname': local_hostname,
+                'region': self.region}}
 
     def clear_cached_attrs(self, attr_defaults=()):
         """Reset any cached metadata attributes to datasource defaults.
@@ -199,10 +248,8 @@ class DataSource(object):
         @return True on successful write, False otherwise.
         """
         instance_data = {
-            'ds': {
-                'meta_data': self.metadata,
-                'user_data': self.get_userdata_raw(),
-                'vendor_data': self.get_vendordata_raw()}}
+            'ds': {'_doc': EXPERIMENTAL_TEXT,
+                   'meta_data': self.metadata}}
         if hasattr(self, 'network_json'):
             network_json = getattr(self, 'network_json')
             if network_json != UNSET:
@@ -217,7 +264,9 @@ class DataSource(object):
             # Process content base64encoding unserializable values
             content = util.json_dumps(instance_data)
             # Strip base64: prefix and set base64_encoded_keys list.
-            processed_data = process_instance_metadata(json.loads(content))
+            processed_data = process_instance_metadata(
+                json.loads(content),
+                sensitive_keys=self.sensitive_metadata_keys)
         except TypeError as e:
             LOG.warning('Error persisting instance-data.json: %s', str(e))
             return False
@@ -225,7 +274,11 @@ class DataSource(object):
             LOG.warning('Error persisting instance-data.json: %s', str(e))
             return False
         json_file = os.path.join(self.paths.run_dir, INSTANCE_JSON_FILE)
-        write_json(json_file, processed_data, mode=0o600)
+        write_json(json_file, processed_data)  # World readable
+        json_sensitive_file = os.path.join(self.paths.run_dir,
+                                           INSTANCE_JSON_SENSITIVE_FILE)
+        write_json(json_sensitive_file,
+                   redact_sensitive_keys(processed_data), mode=0o600)
         return True
 
     def _get_data(self):
diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py
index 76a6e31..9c29cea 100644
--- a/cloudinit/sources/helpers/openstack.py
+++ b/cloudinit/sources/helpers/openstack.py
@@ -675,6 +675,17 @@ def convert_net_json(network_json=None, known_macs=None):
             else:
                 cfg[key] = fmt % link_id_info[target]['name']
 
+    # Infiniband interfaces may be referenced in network_data.json by a 6 byte
+    # Ethernet MAC-style address, and we use that address to look up the
+    # interface name above. Now ensure that the hardware address is set to the
+    # full 20 byte address.
+    ib_known_hwaddrs = net.get_ib_hwaddrs_by_interface()
+    if ib_known_hwaddrs:
+        for cfg in config:
+            if cfg['name'] in ib_known_hwaddrs:
+                cfg['mac_address'] = ib_known_hwaddrs[cfg['name']]
+                cfg['type'] = 'infiniband'
+
     for service in services:
         cfg = service
         cfg.update({'type': 'nameserver'})
diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py
index 8299af2..8082019 100644
--- a/cloudinit/sources/tests/test_init.py
+++ b/cloudinit/sources/tests/test_init.py
@@ -1,5 +1,6 @@
 # This file is part of cloud-init. See LICENSE file for license information.
 
+import copy
 import inspect
 import os
 import six
@@ -9,7 +10,8 @@ from cloudinit.event import EventType
 from cloudinit.helpers import Paths
 from cloudinit import importer
 from cloudinit.sources import (
-    INSTANCE_JSON_FILE, DataSource, UNSET)
+    EXPERIMENTAL_TEXT, INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE,
+    REDACT_SENSITIVE_VALUE, UNSET, DataSource, redact_sensitive_keys)
 from cloudinit.tests.helpers import CiTestCase, skipIf, mock
 from cloudinit.user_data import UserDataProcessor
 from cloudinit import util
@@ -20,20 +22,24 @@ class DataSourceTestSubclassNet(DataSource):
     dsname = 'MyTestSubclass'
     url_max_wait = 55
 
-    def __init__(self, sys_cfg, distro, paths, custom_userdata=None,
-                 get_data_retval=True):
+    def __init__(self, sys_cfg, distro, paths, custom_metadata=None,
+                 custom_userdata=None, get_data_retval=True):
         super(DataSourceTestSubclassNet, self).__init__(
             sys_cfg, distro, paths)
         self._custom_userdata = custom_userdata
+        self._custom_metadata = custom_metadata
         self._get_data_retval = get_data_retval
 
     def _get_cloud_name(self):
         return 'SubclassCloudName'
 
     def _get_data(self):
-        self.metadata = {'availability_zone': 'myaz',
-                         'local-hostname': 'test-subclass-hostname',
-                         'region': 'myregion'}
+        if self._custom_metadata:
+            self.metadata = self._custom_metadata
+        else:
+            self.metadata = {'availability_zone': 'myaz',
+                             'local-hostname': 'test-subclass-hostname',
+                             'region': 'myregion'}
         if self._custom_userdata:
             self.userdata_raw = self._custom_userdata
         else:
@@ -278,7 +284,7 @@ class TestDataSource(CiTestCase):
             os.path.exists(json_file), 'Found unexpected file %s' % json_file)
 
     def test_get_data_writes_json_instance_data_on_success(self):
-        """get_data writes INSTANCE_JSON_FILE to run_dir as readonly root."""
+        """get_data writes INSTANCE_JSON_FILE to run_dir as world readable."""
         tmp = self.tmp_dir()
         datasource = DataSourceTestSubclassNet(
             self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
@@ -287,40 +293,92 @@ class TestDataSource(CiTestCase):
         content = util.load_file(json_file)
         expected = {
             'base64_encoded_keys': [],
+            'sensitive_keys': [],
             'v1': {
                 'availability-zone': 'myaz',
+                'availability_zone': 'myaz',
                 'cloud-name': 'subclasscloudname',
+                'cloud_name': 'subclasscloudname',
                 'instance-id': 'iid-datasource',
+                'instance_id': 'iid-datasource',
                 'local-hostname': 'test-subclass-hostname',
+                'local_hostname': 'test-subclass-hostname',
                 'region': 'myregion'},
             'ds': {
+                '_doc': EXPERIMENTAL_TEXT,
                 'meta_data': {'availability_zone': 'myaz',
                               'local-hostname': 'test-subclass-hostname',
-                              'region': 'myregion'},
-                'user_data': 'userdata_raw',
-                'vendor_data': 'vendordata_raw'}}
-        self.maxDiff = None
+                              'region': 'myregion'}}}
         self.assertEqual(expected, util.load_json(content))
         file_stat = os.stat(json_file)
+        self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode))
+        self.assertEqual(expected, util.load_json(content))
+
+    def test_get_data_writes_json_instance_data_sensitive(self):
+        """get_data writes INSTANCE_JSON_SENSITIVE_FILE as readonly root."""
+        tmp = self.tmp_dir()
+        datasource = DataSourceTestSubclassNet(
+            self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
+            custom_metadata={
+                'availability_zone': 'myaz',
+                'local-hostname': 'test-subclass-hostname',
+                'region': 'myregion',
+                'some': {'security-credentials': {
+                    'cred1': 'sekret', 'cred2': 'othersekret'}}})
+        self.assertEqual(
+            ('security-credentials',), datasource.sensitive_metadata_keys)
+        datasource.get_data()
+        json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
+        sensitive_json_file = self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, tmp)
+        redacted = util.load_json(util.load_file(json_file))
+        self.assertEqual(
+            {'cred1': 'sekret', 'cred2': 'othersekret'},
+            redacted['ds']['meta_data']['some']['security-credentials'])
+        content = util.load_file(sensitive_json_file)
+        expected = {
+            'base64_encoded_keys': [],
+            'sensitive_keys': ['ds/meta_data/some/security-credentials'],
+            'v1': {
+                'availability-zone': 'myaz',
+                'availability_zone': 'myaz',
+                'cloud-name': 'subclasscloudname',
+                'cloud_name': 'subclasscloudname',
+                'instance-id': 'iid-datasource',
+                'instance_id': 'iid-datasource',
+                'local-hostname': 'test-subclass-hostname',
+                'local_hostname': 'test-subclass-hostname',
+                'region': 'myregion'},
+            'ds': {
+                '_doc': EXPERIMENTAL_TEXT,
+                'meta_data': {
+                    'availability_zone': 'myaz',
+                    'local-hostname': 'test-subclass-hostname',
+                    'region': 'myregion',
+                    'some': {'security-credentials': REDACT_SENSITIVE_VALUE}}}
+        }
+        self.maxDiff = None
+        self.assertEqual(expected, util.load_json(content))
+        file_stat = os.stat(sensitive_json_file)
         self.assertEqual(0o600, stat.S_IMODE(file_stat.st_mode))
+        self.assertEqual(expected, util.load_json(content))
 
     def test_get_data_handles_redacted_unserializable_content(self):
         """get_data warns unserializable content in INSTANCE_JSON_FILE."""
         tmp = self.tmp_dir()
         datasource = DataSourceTestSubclassNet(
             self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
-            custom_userdata={'key1': 'val1', 'key2': {'key2.1': self.paths}})
+            custom_metadata={'key1': 'val1', 'key2': {'key2.1': self.paths}})
         datasource.get_data()
         json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
         content = util.load_file(json_file)
-        expected_userdata = {
+        expected_metadata = {
             'key1': 'val1',
             'key2': {
                 'key2.1': "Warning: redacted unserializable type <class"
                           " 'cloudinit.helpers.Paths'>"}}
         instance_json = util.load_json(content)
         self.assertEqual(
-            expected_userdata, instance_json['ds']['user_data'])
+            expected_metadata, instance_json['ds']['meta_data'])
 
     def test_persist_instance_data_writes_ec2_metadata_when_set(self):
         """When ec2_metadata class attribute is set, persist to json."""
@@ -361,17 +419,17 @@ class TestDataSource(CiTestCase):
         tmp = self.tmp_dir()
         datasource = DataSourceTestSubclassNet(
             self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
-            custom_userdata={'key1': 'val1', 'key2': {'key2.1': b'\x123'}})
+            custom_metadata={'key1': 'val1', 'key2': {'key2.1': b'\x123'}})
         self.assertTrue(datasource.get_data())
         json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
         content = util.load_file(json_file)
         instance_json = util.load_json(content)
-        self.assertEqual(
-            ['ds/user_data/key2/key2.1'],
+        self.assertItemsEqual(
+            ['ds/meta_data/key2/key2.1'],
             instance_json['base64_encoded_keys'])
         self.assertEqual(
             {'key1': 'val1', 'key2': {'key2.1': 'EjM='}},
-            instance_json['ds']['user_data'])
+            instance_json['ds']['meta_data'])
 
     @skipIf(not six.PY2, "json serialization on <= py2.7 handles bytes")
     def test_get_data_handles_bytes_values(self):
@@ -379,7 +437,7 @@ class TestDataSource(CiTestCase):
         tmp = self.tmp_dir()
         datasource = DataSourceTestSubclassNet(
             self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
-            custom_userdata={'key1': 'val1', 'key2': {'key2.1': b'\x123'}})
+            custom_metadata={'key1': 'val1', 'key2': {'key2.1': b'\x123'}})
         self.assertTrue(datasource.get_data())
         json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
         content = util.load_file(json_file)
@@ -387,7 +445,7 @@ class TestDataSource(CiTestCase):
         self.assertEqual([], instance_json['base64_encoded_keys'])
         self.assertEqual(
             {'key1': 'val1', 'key2': {'key2.1': '\x123'}},
-            instance_json['ds']['user_data'])
+            instance_json['ds']['meta_data'])
 
     @skipIf(not six.PY2, "Only python2 hits UnicodeDecodeErrors on non-utf8")
     def test_non_utf8_encoding_logs_warning(self):
@@ -395,7 +453,7 @@ class TestDataSource(CiTestCase):
         tmp = self.tmp_dir()
         datasource = DataSourceTestSubclassNet(
             self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
-            custom_userdata={'key1': 'val1', 'key2': {'key2.1': b'ab\xaadef'}})
+            custom_metadata={'key1': 'val1', 'key2': {'key2.1': b'ab\xaadef'}})
         self.assertTrue(datasource.get_data())
         json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
         self.assertFalse(os.path.exists(json_file))
@@ -509,4 +567,36 @@ class TestDataSource(CiTestCase):
             self.logs.getvalue())
 
 
+class TestRedactSensitiveData(CiTestCase):
+
+    def test_redact_sensitive_data_noop_when_no_sensitive_keys_present(self):
+        """When sensitive_keys is absent or empty from metadata do nothing."""
+        md = {'my': 'data'}
+        self.assertEqual(
+            md, redact_sensitive_keys(md, redact_value='redacted'))
+        md['sensitive_keys'] = []
+        self.assertEqual(
+            md, redact_sensitive_keys(md, redact_value='redacted'))
+
+    def test_redact_sensitive_data_redacts_exact_match_name(self):
+        """Only exact matched sensitive_keys are redacted from metadata."""
+        md = {'sensitive_keys': ['md/secure'],
+              'md': {'secure': 's3kr1t', 'insecure': 'publik'}}
+        secure_md = copy.deepcopy(md)
+        secure_md['md']['secure'] = 'redacted'
+        self.assertEqual(
+            secure_md,
+            redact_sensitive_keys(md, redact_value='redacted'))
+
+    def test_redact_sensitive_data_does_redacts_with_default_string(self):
+        """When redact_value is absent, REDACT_SENSITIVE_VALUE is used."""
+        md = {'sensitive_keys': ['md/secure'],
+              'md': {'secure': 's3kr1t', 'insecure': 'publik'}}
+        secure_md = copy.deepcopy(md)
+        secure_md['md']['secure'] = 'redacted for non-root user'
+        self.assertEqual(
+            secure_md,
+            redact_sensitive_keys(md))
+
+
 # vi: ts=4 expandtab
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index ef5c699..8a06412 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -88,7 +88,7 @@ class Init(object):
             # from whatever it was to a new set...
             if self.datasource is not NULL_DATA_SOURCE:
                 self.datasource.distro = self._distro
-                self.datasource.sys_cfg = system_config
+                self.datasource.sys_cfg = self.cfg
         return self._distro
 
     @property
diff --git a/cloudinit/version.py b/cloudinit/version.py
index 3b60fc4..844a02e 100644
--- a/cloudinit/version.py
+++ b/cloudinit/version.py
@@ -4,7 +4,7 @@
 #
 # This file is part of cloud-init. See LICENSE file for license information.
 
-__VERSION__ = "18.3"
+__VERSION__ = "18.4"
 _PACKAGED_VERSION = '@@PACKAGED_VERSION@@'
 
 FEATURES = [
diff --git a/debian/changelog b/debian/changelog
index 86f8a53..8dd1c4e 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,27 @@
+cloud-init (18.4-0ubuntu1) cosmic; urgency=medium
+
+  * New upstream release.
+    - release 18.4
+    - tests: allow skipping an entire cloud_test without running.
+    - tests: disable lxd tests on cosmic
+    - cii-tests: use unittest2.SkipTest in ntp_chrony due to new deps
+    - lxd: adjust to snap installed lxd.
+    - docs: surface experimental doc in instance-data.json
+    - tests: fix ec2 integration tests. process meta_data instead of meta-data
+    - Add support for Infiniband network interfaces (IPoIB). [Mark Goddard]
+    - cli: add cloud-init query subcommand to query instance metadata
+    - tools/tox-venv: update for new features.
+    - pylint: ignore warning assignment-from-no-return for _write_network
+    - stages: Fix bug causing datasource to have incorrect sys_cfg.
+    - Remove dead-code _write_network distro implementations.
+    - net_util: ensure static configs have netmask in translate_network result
+      [Thomas Berger]
+    - Fall back to root:root on syslog permissions if other options fail.
+      [Robert Schweikert]
+    - tests: Add mock for util.get_hostname. [Robert Schweikert]
+
+ -- Chad Smith <chad.smith@xxxxxxxxxxxxx>  Tue, 02 Oct 2018 19:55:23 -0600
+
 cloud-init (18.3-46-gbb60f61b-0ubuntu1) cosmic; urgency=medium
 
   * New upstream snapshot.
diff --git a/doc/rtd/index.rst b/doc/rtd/index.rst
index de67f36..20a99a3 100644
--- a/doc/rtd/index.rst
+++ b/doc/rtd/index.rst
@@ -31,6 +31,7 @@ initialization of a cloud instance.
    topics/capabilities.rst
    topics/availability.rst
    topics/format.rst
+   topics/instancedata.rst
    topics/dir_layout.rst
    topics/examples.rst
    topics/boot.rst
diff --git a/doc/rtd/topics/capabilities.rst b/doc/rtd/topics/capabilities.rst
index 2d8e253..0d8b894 100644
--- a/doc/rtd/topics/capabilities.rst
+++ b/doc/rtd/topics/capabilities.rst
@@ -18,7 +18,7 @@ User configurability
 
     User-data can be given by the user at instance launch time. See
     :ref:`user_data_formats` for acceptable user-data content.
-    
+
 
 This is done via the ``--user-data`` or ``--user-data-file`` argument to
 ec2-run-instances for example.
@@ -53,10 +53,9 @@ system:
 
   % cloud-init --help
   usage: cloud-init [-h] [--version] [--file FILES]
-
                     [--debug] [--force]
-                    {init,modules,single,dhclient-hook,features,analyze,devel,collect-logs,clean,status}
-                    ...
+                    {init,modules,single,query,dhclient-hook,features,analyze,devel,collect-logs,clean,status}
+                                                         ...
 
   optional arguments:
     -h, --help            show this help message and exit
@@ -68,17 +67,19 @@ system:
                           your own risk)
 
   Subcommands:
-    {init,modules,single,dhclient-hook,features,analyze,devel,collect-logs,clean,status}
+    {init,modules,single,query,dhclient-hook,features,analyze,devel,collect-logs,clean,status}
       init                initializes cloud-init and performs initial modules
       modules             activates modules using a given configuration key
       single              run a single module
+      query               Query instance metadata from the command line
       dhclient-hook       run the dhclient hookto record network info
       features            list defined features
       analyze             Devel tool: Analyze cloud-init logs and data
       devel               Run development tools
       collect-logs        Collect and tar all cloud-init debug info
-      clean               Remove logs and artifacts so cloud-init can re-run.
-      status              Report cloud-init status or wait on completion.
+      clean               Remove logs and artifacts so cloud-init can re-run
+      status              Report cloud-init status or wait on completion
+
 
 CLI Subcommand details
 ======================
@@ -104,8 +105,8 @@ cloud-init status
 Report whether cloud-init is running, done, disabled or errored. Exits
 non-zero if an error is detected in cloud-init.
 
- * **--long**: Detailed status information.
- * **--wait**: Block until cloud-init completes.
+* **--long**: Detailed status information.
+* **--wait**: Block until cloud-init completes.
 
 .. code-block:: shell-session
 
@@ -143,6 +144,68 @@ Logs collected are:
  * journalctl output
  * /var/lib/cloud/instance/user-data.txt
 
+.. _cli_query:
+
+cloud-init query
+------------------
+Query standardized cloud instance metadata crawled by cloud-init and stored
+in ``/run/cloud-init/instance-data.json``. This is a convenience command-line
+interface to reference any cached configuration metadata that cloud-init
+crawls when booting the instance. See :ref:`instance_metadata` for more info.
+
+* **--all**: Dump all available instance data as json which can be queried.
+* **--instance-data**: Optional path to a different instance-data.json file to
+  source for queries.
+* **--list-keys**: List available query keys from cached instance data.
+
+.. code-block:: shell-session
+
+  # List all top-level query keys available (includes standardized aliases)
+  % cloud-init query --list-keys
+  availability_zone
+  base64_encoded_keys
+  cloud_name
+  ds
+  instance_id
+  local_hostname
+  region
+  v1
+
+* **<varname>**: A dot-delimited variable path into the instance-data.json
+   object.
+
+.. code-block:: shell-session
+
+  # Query cloud-init standardized metadata on any cloud
+  % cloud-init query v1.cloud_name
+  aws  # or openstack, azure, gce etc.
+
+  # Any standardized instance-data under a <v#> key is aliased as a top-level
+  # key for convenience.
+  % cloud-init query cloud_name
+  aws  # or openstack, azure, gce etc.
+
+  # Query datasource-specific metadata on EC2
+  % cloud-init query ds.meta_data.public_ipv4
+
+* **--format** A string that will use jinja-template syntax to render a string
+   replacing
+
+.. code-block:: shell-session
+
+  # Generate a custom hostname fqdn based on instance-id, cloud and region
+  % cloud-init query --format 'custom-{{instance_id}}.{{region}}.{{v1.cloud_name}}.com'
+  custom-i-0e91f69987f37ec74.us-east-2.aws.com
+
+
+.. note::
+  The standardized instance data keys under **v#** are guaranteed not to change
+  behavior or format. If using top-level convenience aliases for any
+  standardized instance data keys, the most value (highest **v#**) of that key
+  name is what is reported as the top-level value. So these aliases act as a
+  'latest'.
+
+
 .. _cli_analyze:
 
 cloud-init analyze
@@ -150,10 +213,10 @@ cloud-init analyze
 Get detailed reports of where cloud-init spends most of its time. See
 :ref:`boot_time_analysis` for more info.
 
- * **blame** Report ordered by most costly operations.
- * **dump** Machine-readable JSON dump of all cloud-init tracked events.
- * **show** show time-ordered report of the cost of operations during each
-   boot stage.
+* **blame** Report ordered by most costly operations.
+* **dump** Machine-readable JSON dump of all cloud-init tracked events.
+* **show** show time-ordered report of the cost of operations during each
+  boot stage.
 
 .. _cli_devel:
 
@@ -182,8 +245,8 @@ cloud-init clean
 Remove cloud-init artifacts from /var/lib/cloud and optionally reboot the
 machine to so cloud-init re-runs all stages as it did on first boot.
 
- * **--logs**: Optionally remove /var/log/cloud-init*log files.
- * **--reboot**: Reboot the system after removing artifacts.
+* **--logs**: Optionally remove /var/log/cloud-init*log files.
+* **--reboot**: Reboot the system after removing artifacts.
 
 .. _cli_init:
 
@@ -195,7 +258,7 @@ Can be run on the commandline, but is generally gated to run only once
 due to semaphores in **/var/lib/cloud/instance/sem/** and
 **/var/lib/cloud/sem**.
 
- * **--local**: Run *init-local* stage instead of *init*.
+* **--local**: Run *init-local* stage instead of *init*.
 
 .. _cli_modules:
 
@@ -210,8 +273,8 @@ declared to run in various boot stages in the file
 commandline, but each module is gated to run only once due to semaphores
 in ``/var/lib/cloud/``.
 
- * **--mode (init|config|final)**: Run *modules:init*, *modules:config* or
-   *modules:final* cloud-init stages. See :ref:`boot_stages` for more info.
+* **--mode (init|config|final)**: Run *modules:init*, *modules:config* or
+  *modules:final* cloud-init stages. See :ref:`boot_stages` for more info.
 
 .. _cli_single:
 
@@ -221,9 +284,9 @@ Attempt to run a single named cloud config module.  The following example
 re-runs the cc_set_hostname module ignoring the module default frequency
 of once-per-instance:
 
- * **--name**: The cloud-config module name to run
- * **--frequency**: Optionally override the declared module frequency
-   with one of (always|once-per-instance|once)
+* **--name**: The cloud-config module name to run
+* **--frequency**: Optionally override the declared module frequency
+  with one of (always|once-per-instance|once)
 
 .. code-block:: shell-session
 
diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst
index 14432e6..e34f145 100644
--- a/doc/rtd/topics/datasources.rst
+++ b/doc/rtd/topics/datasources.rst
@@ -17,146 +17,10 @@ own way) internally a datasource abstract class was created to allow for a
 single way to access the different cloud systems methods to provide this data
 through the typical usage of subclasses.
 
-
-.. _instance_metadata:
-
-instance-data
--------------
-For reference, cloud-init stores all the metadata, vendordata and userdata
-provided by a cloud in a json blob at ``/run/cloud-init/instance-data.json``.
-While the json contains datasource-specific keys and names, cloud-init will
-maintain a minimal set of standardized keys that will remain stable on any
-cloud. Standardized instance-data keys will be present under a "v1" key.
-Any datasource metadata cloud-init consumes will all be present under the
-"ds" key.
-
-Below is an instance-data.json example from an OpenStack instance:
-
-.. sourcecode:: json
-
-  {
-   "base64-encoded-keys": [
-    "ds/meta-data/random_seed",
-    "ds/user-data"
-   ],
-   "ds": {
-    "ec2_metadata": {
-     "ami-id": "ami-0000032f",
-     "ami-launch-index": "0",
-     "ami-manifest-path": "FIXME",
-     "block-device-mapping": {
-      "ami": "vda",
-      "ephemeral0": "/dev/vdb",
-      "root": "/dev/vda"
-     },
-     "hostname": "xenial-test.novalocal",
-     "instance-action": "none",
-     "instance-id": "i-0006e030",
-     "instance-type": "m1.small",
-     "local-hostname": "xenial-test.novalocal",
-     "local-ipv4": "10.5.0.6",
-     "placement": {
-      "availability-zone": "None"
-     },
-     "public-hostname": "xenial-test.novalocal",
-     "public-ipv4": "10.245.162.145",
-     "reservation-id": "r-fxm623oa",
-     "security-groups": "default"
-    },
-    "meta-data": {
-     "availability_zone": null,
-     "devices": [],
-     "hostname": "xenial-test.novalocal",
-     "instance-id": "3e39d278-0644-4728-9479-678f9212d8f0",
-     "launch_index": 0,
-     "local-hostname": "xenial-test.novalocal",
-     "name": "xenial-test",
-     "project_id": "e0eb2d2538814...",
-     "random_seed": "A6yPN...",
-     "uuid": "3e39d278-0644-4728-9479-678f92..."
-    },
-    "network_json": {
-     "links": [
-      {
-       "ethernet_mac_address": "fa:16:3e:7d:74:9b",
-       "id": "tap9ca524d5-6e",
-       "mtu": 8958,
-       "type": "ovs",
-       "vif_id": "9ca524d5-6e5a-4809-936a-6901..."
-      }
-     ],
-     "networks": [
-      {
-       "id": "network0",
-       "link": "tap9ca524d5-6e",
-       "network_id": "c6adfc18-9753-42eb-b3ea-18b57e6b837f",
-       "type": "ipv4_dhcp"
-      }
-     ],
-     "services": [
-      {
-       "address": "10.10.160.2",
-       "type": "dns"
-      }
-     ]
-    },
-    "user-data": "I2Nsb3VkLWNvbmZpZ...",
-    "vendor-data": null
-   },
-   "v1": {
-    "availability-zone": null,
-    "cloud-name": "openstack",
-    "instance-id": "3e39d278-0644-4728-9479-678f9212d8f0",
-    "local-hostname": "xenial-test",
-    "region": null
-   }
-  }
-
- 
-As of cloud-init v. 18.4, any values present in
-``/run/cloud-init/instance-data.json`` can be used in cloud-init user data
-scripts or cloud config data. This allows consumers to use cloud-init's
-vendor-neutral, standardized metadata keys as well as datasource-specific
-content for any scripts or cloud-config modules they are using.
-
-To use instance-data.json values in scripts and **#config-config** files the
-user-data will need to contain the following header as the first line **## template: jinja**. Cloud-init will source all variables defined in
-``/run/cloud-init/instance-data.json`` and allow scripts or cloud-config files 
-to reference those paths. Below are two examples::
-
- * Cloud config calling home with the ec2 public hostname and avaliability-zone
-    ```
-    ## template: jinja
-    #cloud-config
-    runcmd:
-        - echo 'EC2 public hostname allocated to instance: {{ ds.meta_data.public_hostname }}' > /tmp/instance_metadata
-        - echo 'EC2 avaiability zone: {{ v1.availability_zone }}' >> /tmp/instance_metadata 
-        - curl -X POST -d '{"hostname": "{{ds.meta_data.public_hostname }}", "availability-zone": "{{ v1.availability_zone }}"}'  https://example.com.com
-    ```
-
- * Custom user script performing different operations based on region
-    ```
-    ## template: jinja
-    #!/bin/bash
-    {% if v1.region == 'us-east-2' -%}
-    echo 'Installing custom proxies for {{ v1.region }}
-    sudo apt-get install my-xtra-fast-stack
-    {%- endif %}
-    ...
-
-    ```
-
-.. note::
-  Trying to reference jinja variables that don't exist in
-  instance-data.json will result in warnings in ``/var/log/cloud-init.log``
-  and the following string in your rendered user-data:
-  ``CI_MISSING_JINJA_VAR/<your_varname>``.
-  
-.. note::
-  To save time designing your user-data for a specific cloud's
-  instance-data.json, use the 'render' cloud-init command on an
-  instance booted on your favorite cloud. See :ref:`cli_devel` for more
-  information.
+Any metadata processed by cloud-init's datasources is persisted as
+``/run/cloud0-init/instance-data.json``. Cloud-init provides tooling
+to quickly introspect some of that data. See :ref:`instance_metadata` for
+more information.
 
 
 Datasource API
@@ -196,14 +60,14 @@ The current interface that a datasource object must provide is the following:
     # or does not exist)
     def device_name_to_device(self, name)
 
-    # gets the locale string this instance should be applying 
+    # gets the locale string this instance should be applying
     # which typically used to adjust the instances locale settings files
     def get_locale(self)
 
     @property
     def availability_zone(self)
 
-    # gets the instance id that was assigned to this instance by the 
+    # gets the instance id that was assigned to this instance by the
     # cloud provider or when said instance id does not exist in the backing
     # metadata this will return 'iid-datasource'
     def get_instance_id(self)
diff --git a/doc/rtd/topics/instancedata.rst b/doc/rtd/topics/instancedata.rst
new file mode 100644
index 0000000..634e180
--- /dev/null
+++ b/doc/rtd/topics/instancedata.rst
@@ -0,0 +1,297 @@
+.. _instance_metadata:
+
+*****************
+Instance Metadata
+*****************
+
+What is a instance data?
+========================
+
+Instance data is the collection of all configuration data that cloud-init
+processes to configure the instance. This configuration typically
+comes from any number of sources:
+
+* cloud-provided metadata services (aka metadata)
+* custom config-drive attached to the instance
+* cloud-config seed files in the booted cloud image or distribution
+* vendordata provided from files or cloud metadata services
+* userdata provided at instance creation
+
+Each cloud provider presents unique configuration metadata in different
+formats to the instance. Cloud-init provides a cache of any crawled metadata
+as well as a versioned set of standardized instance data keys which it makes
+available on all platforms.
+
+Cloud-init produces a simple json object in
+``/run/cloud-init/instance-data.json`` which represents standardized and
+versioned representation of the metadata it consumes during initial boot. The
+intent is to provide the following benefits to users or scripts on any system
+deployed with cloud-init:
+
+* simple static object to query to obtain a instance's metadata
+* speed: avoid costly network transactions for metadata that is already cached
+  on the filesytem
+* reduce need to recrawl metadata services for static metadata that is already
+  cached
+* leverage cloud-init's best practices for crawling cloud-metadata services
+* avoid rolling unique metadata crawlers on each cloud platform to get
+  metadata configuration values
+
+Cloud-init stores any instance data processed in the following files:
+
+* ``/run/cloud-init/instance-data.json``: world-readable json containing
+  standardized keys, sensitive keys redacted
+* ``/run/cloud-init/instance-data-sensitive.json``: root-readable unredacted
+  json blob
+* ``/var/lib/cloud/instance/user-data.txt``: root-readable sensitive raw
+  userdata
+* ``/var/lib/cloud/instance/vendor-data.txt``: root-readable sensitive raw
+  vendordata
+
+Cloud-init redacts any security sensitive content from instance-data.json,
+stores ``/run/cloud-init/instance-data.json`` as a world-readable json file.
+Because user-data and vendor-data can contain passwords both of these files
+are readonly for *root* as well. The *root* user can also read
+``/run/cloud-init/instance-data-sensitive.json`` which is all instance data
+from instance-data.json as well as unredacted sensitive content.
+
+
+Format of instance-data.json
+============================
+
+The instance-data.json and instance-data-sensitive.json files are well-formed
+JSON and record the set of keys and values for any metadata processed by
+cloud-init. Cloud-init standardizes the format for this content so that it
+can be generalized across different cloud platforms.
+
+There are three basic top-level keys:
+
+* **base64_encoded_keys**: A list of forward-slash delimited key paths into
+  the instance-data.json object whose value is base64encoded for json
+  compatibility. Values at these paths should be decoded to get the original
+  value.
+
+* **sensitive_keys**: A list of forward-slash delimited key paths into
+  the instance-data.json object whose value is considered by the datasource as
+  'security sensitive'. Only the keys listed here will be redacted from
+  instance-data.json for non-root users.
+
+* **ds**: Datasource-specific metadata crawled for the specific cloud
+  platform. It should closely represent the structure of the cloud metadata
+  crawled. The structure of content and details provided are entirely
+  cloud-dependent. Mileage will vary depending on what the cloud exposes.
+  The content exposed under the 'ds' key is currently **experimental** and
+  expected to change slightly in the upcoming cloud-init release.
+
+* **v1**: Standardized cloud-init metadata keys, these keys are guaranteed to
+  exist on all cloud platforms. They will also retain their current behavior
+  and format and will be carried forward even if cloud-init introduces a new
+  version of standardized keys with **v2**.
+
+The standardized keys present:
+
++----------------------+-----------------------------------------------+---------------------------+
+|  Key path            | Description                                   | Examples                  |
++======================+===============================================+===========================+
+| v1.cloud_name        | The name of the cloud provided by metadata    | aws, openstack, azure,    |
+|                      | key 'cloud-name' or the cloud-init datasource | configdrive, nocloud,     |
+|                      | name which was discovered.                    | ovf, etc.                 |
++----------------------+-----------------------------------------------+---------------------------+
+| v1.instance_id       | Unique instance_id allocated by the cloud     | i-<somehash>              |
++----------------------+-----------------------------------------------+---------------------------+
+| v1.local_hostname    | The internal or local hostname of the system  | ip-10-41-41-70,           |
+|                      |                                               | <user-provided-hostname>  |
++----------------------+-----------------------------------------------+---------------------------+
+| v1.region            | The physical region/datacenter in which the   | us-east-2                 |
+|                      | instance is deployed                          |                           |
++----------------------+-----------------------------------------------+---------------------------+
+| v1.availability_zone | The physical availability zone in which the   | us-east-2b, nova, null    |
+|                      | instance is deployed                          |                           |
++----------------------+-----------------------------------------------+---------------------------+
+
+
+Below is an example of ``/run/cloud-init/instance_data.json`` on an EC2
+instance:
+
+.. sourcecode:: json
+
+  {
+   "base64_encoded_keys": [],
+   "sensitive_keys": [],
+   "ds": {
+    "meta_data": {
+     "ami-id": "ami-014e1416b628b0cbf",
+     "ami-launch-index": "0",
+     "ami-manifest-path": "(unknown)",
+     "block-device-mapping": {
+      "ami": "/dev/sda1",
+      "ephemeral0": "sdb",
+      "ephemeral1": "sdc",
+      "root": "/dev/sda1"
+     },
+     "hostname": "ip-10-41-41-70.us-east-2.compute.internal",
+     "instance-action": "none",
+     "instance-id": "i-04fa31cfc55aa7976",
+     "instance-type": "t2.micro",
+     "local-hostname": "ip-10-41-41-70.us-east-2.compute.internal",
+     "local-ipv4": "10.41.41.70",
+     "mac": "06:b6:92:dd:9d:24",
+     "metrics": {
+      "vhostmd": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
+     },
+     "network": {
+      "interfaces": {
+       "macs": {
+	"06:b6:92:dd:9d:24": {
+	 "device-number": "0",
+	 "interface-id": "eni-08c0c9fdb99b6e6f4",
+	 "ipv4-associations": {
+	  "18.224.22.43": "10.41.41.70"
+	 },
+	 "local-hostname": "ip-10-41-41-70.us-east-2.compute.internal",
+	 "local-ipv4s": "10.41.41.70",
+	 "mac": "06:b6:92:dd:9d:24",
+	 "owner-id": "437526006925",
+	 "public-hostname": "ec2-18-224-22-43.us-east-2.compute.amazonaws.com",
+	 "public-ipv4s": "18.224.22.43",
+	 "security-group-ids": "sg-828247e9",
+	 "security-groups": "Cloud-init integration test secgroup",
+	 "subnet-id": "subnet-282f3053",
+	 "subnet-ipv4-cidr-block": "10.41.41.0/24",
+	 "subnet-ipv6-cidr-blocks": "2600:1f16:b80:ad00::/64",
+	 "vpc-id": "vpc-252ef24d",
+	 "vpc-ipv4-cidr-block": "10.41.0.0/16",
+	 "vpc-ipv4-cidr-blocks": "10.41.0.0/16",
+	 "vpc-ipv6-cidr-blocks": "2600:1f16:b80:ad00::/56"
+	}
+       }
+      }
+     },
+     "placement": {
+      "availability-zone": "us-east-2b"
+     },
+     "profile": "default-hvm",
+     "public-hostname": "ec2-18-224-22-43.us-east-2.compute.amazonaws.com",
+     "public-ipv4": "18.224.22.43",
+     "public-keys": {
+      "cloud-init-integration": [
+       "ssh-rsa
+  AAAAB3NzaC1yc2EAAAADAQABAAABAQDSL7uWGj8cgWyIOaspgKdVy0cKJ+UTjfv7jBOjG2H/GN8bJVXy72XAvnhM0dUM+CCs8FOf0YlPX+Frvz2hKInrmRhZVwRSL129PasD12MlI3l44u6IwS1o/W86Q+tkQYEljtqDOo0a+cOsaZkvUNzUyEXUwz/lmYa6G4hMKZH4NBj7nbAAF96wsMCoyNwbWryBnDYUr6wMbjRR1J9Pw7Xh7WRC73wy4Va2YuOgbD3V/5ZrFPLbWZW/7TFXVrql04QVbyei4aiFR5n//GvoqwQDNe58LmbzX/xvxyKJYdny2zXmdAhMxbrpFQsfpkJ9E/H5w0yOdSvnWbUoG5xNGoOB
+  cloud-init-integration"
+      ]
+     },
+     "reservation-id": "r-06ab75e9346f54333",
+     "security-groups": "Cloud-init integration test secgroup",
+     "services": {
+      "domain": "amazonaws.com",
+      "partition": "aws"
+     }
+    }
+   },
+   "v1": {
+    "availability-zone": "us-east-2b",
+    "availability_zone": "us-east-2b",
+    "cloud-name": "aws",
+    "cloud_name": "aws",
+    "instance-id": "i-04fa31cfc55aa7976",
+    "instance_id": "i-04fa31cfc55aa7976",
+    "local-hostname": "ip-10-41-41-70",
+    "local_hostname": "ip-10-41-41-70",
+    "region": "us-east-2"
+   }
+  }
+
+
+Using instance-data
+===================
+
+As of cloud-init v. 18.4, any variables present in
+``/run/cloud-init/instance-data.json`` can be used in:
+
+* User-data scripts
+* Cloud config data
+* Command line interface via **cloud-init query** or
+  **cloud-init devel render**
+
+Many clouds allow users to provide user-data to an instance at
+the time the instance is launched. Cloud-init supports a number of
+:ref:`user_data_formats`.
+
+Both user-data scripts and **#cloud-config** data support jinja template
+rendering.
+When the first line of the provided user-data begins with,
+**## template: jinja** cloud-init will use jinja to render that file.
+Any instance-data-sensitive.json variables are surfaced as dot-delimited
+jinja template variables because cloud-config modules are run as 'root'
+user.
+
+
+Below are some examples of providing these types of user-data:
+
+* Cloud config calling home with the ec2 public hostname and avaliability-zone
+
+.. code-block:: shell-session
+
+  ## template: jinja
+  #cloud-config
+  runcmd:
+      - echo 'EC2 public hostname allocated to instance: {{
+        ds.meta_data.public_hostname }}' > /tmp/instance_metadata
+      - echo 'EC2 avaiability zone: {{ v1.availability_zone }}' >>
+        /tmp/instance_metadata
+      - curl -X POST -d '{"hostname": "{{ds.meta_data.public_hostname }}",
+        "availability-zone": "{{ v1.availability_zone }}"}'
+        https://example.com
+
+* Custom user-data script performing different operations based on region
+
+.. code-block:: shell-session
+
+   ## template: jinja
+   #!/bin/bash
+   {% if v1.region == 'us-east-2' -%}
+   echo 'Installing custom proxies for {{ v1.region }}
+   sudo apt-get install my-xtra-fast-stack
+   {%- endif %}
+   ...
+
+.. note::
+  Trying to reference jinja variables that don't exist in
+  instance-data.json will result in warnings in ``/var/log/cloud-init.log``
+  and the following string in your rendered user-data:
+  ``CI_MISSING_JINJA_VAR/<your_varname>``.
+
+Cloud-init also surfaces a commandline tool **cloud-init query** which can
+assist developers or scripts with obtaining instance metadata easily. See
+:ref:`cli_query` for more information.
+
+To cut down on keystrokes on the command line, cloud-init also provides
+top-level key aliases for any standardized ``v#`` keys present. The preceding
+``v1`` is not required of ``v1.var_name`` These aliases will represent the
+value of the highest versioned standard key. For example, ``cloud_name``
+value will be ``v2.cloud_name`` if both ``v1`` and ``v2`` keys are present in
+instance-data.json.
+The **query** command also publishes ``userdata`` and ``vendordata`` keys to
+the root user which will contain the decoded user and vendor data provided to
+this instance. Non-root users referencing userdata or vendordata keys will
+see only redacted values.
+
+.. code-block:: shell-session
+
+ # List all top-level instance-data keys available
+ % cloud-init query --list-keys
+
+ # Find your EC2 ami-id
+ % cloud-init query ds.metadata.ami_id
+
+ # Format your cloud_name and region using jinja template syntax
+ % cloud-init query --format 'cloud: {{ v1.cloud_name }} myregion: {{
+ % v1.region }}'
+
+.. note::
+  To save time designing a user-data template for a specific cloud's
+  instance-data.json, use the 'render' cloud-init command on an
+  instance booted on your favorite cloud. See :ref:`cli_devel` for more
+  information.
+
+.. vi: textwidth=78
diff --git a/integration-requirements.txt b/integration-requirements.txt
index f80cb94..880d988 100644
--- a/integration-requirements.txt
+++ b/integration-requirements.txt
@@ -5,16 +5,17 @@
 # the packages/pkg-deps.json file as well.
 #
 
+unittest2
 # ec2 backend
 boto3==1.5.9
 
 # ssh communication
 paramiko==2.4.1
 
+
 # lxd backend
 # 04/03/2018: enables use of lxd 3.0
 git+https://github.com/lxc/pylxd.git@4b8ab1802f9aee4eb29cf7b119dae0aa47150779
 
-
 # finds latest image information
 git+https://git.launchpad.net/simplestreams
diff --git a/tests/cloud_tests/collect.py b/tests/cloud_tests/collect.py
index 75b5061..642745d 100644
--- a/tests/cloud_tests/collect.py
+++ b/tests/cloud_tests/collect.py
@@ -9,6 +9,7 @@ from cloudinit import util as c_util
 from tests.cloud_tests import (config, LOG, setup_image, util)
 from tests.cloud_tests.stage import (PlatformComponent, run_stage, run_single)
 from tests.cloud_tests import platforms
+from tests.cloud_tests.testcases import base, get_test_class
 
 
 def collect_script(instance, base_dir, script, script_name):
@@ -63,6 +64,7 @@ def collect_test_data(args, snapshot, os_name, test_name):
     res = ({}, 1)
 
     # load test config
+    test_name_in = test_name
     test_name = config.path_to_name(test_name)
     test_config = config.load_test_config(test_name)
     user_data = test_config['cloud_config']
@@ -75,6 +77,16 @@ def collect_test_data(args, snapshot, os_name, test_name):
         LOG.warning('test config %s is not enabled, skipping', test_name)
         return ({}, 0)
 
+    test_class = get_test_class(
+        config.name_to_module(test_name_in),
+        test_data={'platform': snapshot.platform_name, 'os_name': os_name},
+        test_conf=test_config['cloud_config'])
+    try:
+        test_class.maybeSkipTest()
+    except base.SkipTest as s:
+        LOG.warning('skipping test config %s: %s', test_name, s)
+        return ({}, 0)
+
     # if testcase requires a feature flag that the image does not support,
     # skip the testcase with a warning
     req_features = test_config.get('required_features', [])
diff --git a/tests/cloud_tests/testcases/__init__.py b/tests/cloud_tests/testcases/__init__.py
index bd548f5..6bb39f7 100644
--- a/tests/cloud_tests/testcases/__init__.py
+++ b/tests/cloud_tests/testcases/__init__.py
@@ -4,8 +4,7 @@
 
 import importlib
 import inspect
-import unittest
-from unittest.util import strclass
+import unittest2
 
 from cloudinit.util import read_conf
 
@@ -13,7 +12,7 @@ from tests.cloud_tests import config
 from tests.cloud_tests.testcases.base import CloudTestCase as base_test
 
 
-def discover_tests(test_name):
+def discover_test(test_name):
     """Discover tests in test file for 'testname'.
 
     @return_value: list of test classes
@@ -25,35 +24,48 @@ def discover_tests(test_name):
     except NameError:
         raise ValueError('no test verifier found at: {}'.format(testmod_name))
 
-    return [mod for name, mod in inspect.getmembers(testmod)
-            if inspect.isclass(mod) and base_test in inspect.getmro(mod) and
-            getattr(mod, '__test__', True)]
+    found = [mod for name, mod in inspect.getmembers(testmod)
+             if (inspect.isclass(mod)
+                 and base_test in inspect.getmro(mod)
+                 and getattr(mod, '__test__', True))]
+    if len(found) != 1:
+        raise RuntimeError(
+            "Unexpected situation, multiple tests for %s: %s" % (
+                test_name, found))
 
+    return found
 
-def get_suite(test_name, data, conf):
-    """Get test suite with all tests for 'testname'.
 
-    @return_value: a test suite
-    """
-    suite = unittest.TestSuite()
-    for test_class in discover_tests(test_name):
+def get_test_class(test_name, test_data, test_conf):
+    test_class = discover_test(test_name)[0]
+
+    class DynamicTestSubclass(test_class):
 
-        class tmp(test_class):
+        _realclass = test_class
+        data = test_data
+        conf = test_conf
+        release_conf = read_conf(config.RELEASES_CONF)['releases']
 
-            _realclass = test_class
+        def __str__(self):
+            return "%s (%s)" % (self._testMethodName,
+                                unittest2.util.strclass(self._realclass))
 
-            def __str__(self):
-                return "%s (%s)" % (self._testMethodName,
-                                    strclass(self._realclass))
+        @classmethod
+        def setUpClass(cls):
+            cls.maybeSkipTest()
 
-            @classmethod
-            def setUpClass(cls):
-                cls.data = data
-                cls.conf = conf
-                cls.release_conf = read_conf(config.RELEASES_CONF)['releases']
+    return DynamicTestSubclass
 
-        suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(tmp))
 
+def get_suite(test_name, data, conf):
+    """Get test suite with all tests for 'testname'.
+
+    @return_value: a test suite
+    """
+    suite = unittest2.TestSuite()
+    suite.addTest(
+        unittest2.defaultTestLoader.loadTestsFromTestCase(
+            get_test_class(test_name, data, conf)))
     return suite
 
 # vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/base.py b/tests/cloud_tests/testcases/base.py
index 2745827..e18d601 100644
--- a/tests/cloud_tests/testcases/base.py
+++ b/tests/cloud_tests/testcases/base.py
@@ -5,15 +5,15 @@
 import crypt
 import json
 import re
-import unittest
+import unittest2
 
 
 from cloudinit import util as c_util
 
-SkipTest = unittest.SkipTest
+SkipTest = unittest2.SkipTest
 
 
-class CloudTestCase(unittest.TestCase):
+class CloudTestCase(unittest2.TestCase):
     """Base test class for verifiers."""
 
     # data gets populated in get_suite.setUpClass
@@ -31,6 +31,11 @@ class CloudTestCase(unittest.TestCase):
     def is_distro(self, distro_name):
         return self.os_cfg['os'] == distro_name
 
+    @classmethod
+    def maybeSkipTest(cls):
+        """Present to allow subclasses to override and raise a skipTest."""
+        pass
+
     def assertPackageInstalled(self, name, version=None):
         """Check dpkg-query --show output for matching package name.
 
@@ -167,11 +172,12 @@ class CloudTestCase(unittest.TestCase):
                 'Skipping instance-data.json test.'
                 ' OS: %s not bionic or newer' % self.os_name)
         instance_data = json.loads(out)
-        self.assertEqual(
-            ['ds/user_data'], instance_data['base64_encoded_keys'])
+        self.assertItemsEqual(
+            [],
+            instance_data['base64_encoded_keys'])
         ds = instance_data.get('ds', {})
         v1_data = instance_data.get('v1', {})
-        metadata = ds.get('meta-data', {})
+        metadata = ds.get('meta_data', {})
         macs = metadata.get(
             'network', {}).get('interfaces', {}).get('macs', {})
         if not macs:
@@ -187,10 +193,10 @@ class CloudTestCase(unittest.TestCase):
             metadata.get('placement', {}).get('availability-zone'),
             'Could not determine EC2 Availability zone placement')
         self.assertIsNotNone(
-            v1_data['availability-zone'], 'expected ec2 availability-zone')
-        self.assertEqual('aws', v1_data['cloud-name'])
-        self.assertIn('i-', v1_data['instance-id'])
-        self.assertIn('ip-', v1_data['local-hostname'])
+            v1_data['availability_zone'], 'expected ec2 availability_zone')
+        self.assertEqual('aws', v1_data['cloud_name'])
+        self.assertIn('i-', v1_data['instance_id'])
+        self.assertIn('ip-', v1_data['local_hostname'])
         self.assertIsNotNone(v1_data['region'], 'expected ec2 region')
 
     def test_instance_data_json_lxd(self):
@@ -213,16 +219,14 @@ class CloudTestCase(unittest.TestCase):
                 ' OS: %s not bionic or newer' % self.os_name)
         instance_data = json.loads(out)
         v1_data = instance_data.get('v1', {})
-        self.assertEqual(
-            ['ds/user_data', 'ds/vendor_data'],
-            sorted(instance_data['base64_encoded_keys']))
-        self.assertEqual('nocloud', v1_data['cloud-name'])
+        self.assertItemsEqual([], sorted(instance_data['base64_encoded_keys']))
+        self.assertEqual('nocloud', v1_data['cloud_name'])
         self.assertIsNone(
-            v1_data['availability-zone'],
-            'found unexpected lxd availability-zone %s' %
-            v1_data['availability-zone'])
-        self.assertIn('cloud-test', v1_data['instance-id'])
-        self.assertIn('cloud-test', v1_data['local-hostname'])
+            v1_data['availability_zone'],
+            'found unexpected lxd availability_zone %s' %
+            v1_data['availability_zone'])
+        self.assertIn('cloud-test', v1_data['instance_id'])
+        self.assertIn('cloud-test', v1_data['local_hostname'])
         self.assertIsNone(
             v1_data['region'],
             'found unexpected lxd region %s' % v1_data['region'])
@@ -248,18 +252,17 @@ class CloudTestCase(unittest.TestCase):
                 ' OS: %s not bionic or newer' % self.os_name)
         instance_data = json.loads(out)
         v1_data = instance_data.get('v1', {})
-        self.assertEqual(
-            ['ds/user_data'], instance_data['base64_encoded_keys'])
-        self.assertEqual('nocloud', v1_data['cloud-name'])
+        self.assertItemsEqual([], instance_data['base64_encoded_keys'])
+        self.assertEqual('nocloud', v1_data['cloud_name'])
         self.assertIsNone(
-            v1_data['availability-zone'],
-            'found unexpected kvm availability-zone %s' %
-            v1_data['availability-zone'])
+            v1_data['availability_zone'],
+            'found unexpected kvm availability_zone %s' %
+            v1_data['availability_zone'])
         self.assertIsNotNone(
             re.match(r'[\da-f]{8}(-[\da-f]{4}){3}-[\da-f]{12}',
-                     v1_data['instance-id']),
-            'kvm instance-id is not a UUID: %s' % v1_data['instance-id'])
-        self.assertIn('ubuntu', v1_data['local-hostname'])
+                     v1_data['instance_id']),
+            'kvm instance_id is not a UUID: %s' % v1_data['instance_id'])
+        self.assertIn('ubuntu', v1_data['local_hostname'])
         self.assertIsNone(
             v1_data['region'],
             'found unexpected lxd region %s' % v1_data['region'])
diff --git a/tests/cloud_tests/testcases/modules/lxd_bridge.py b/tests/cloud_tests/testcases/modules/lxd_bridge.py
index c0262ba..ea545e0 100644
--- a/tests/cloud_tests/testcases/modules/lxd_bridge.py
+++ b/tests/cloud_tests/testcases/modules/lxd_bridge.py
@@ -7,15 +7,25 @@ from tests.cloud_tests.testcases import base
 class TestLxdBridge(base.CloudTestCase):
     """Test LXD module."""
 
+    @classmethod
+    def maybeSkipTest(cls):
+        """Skip on cosmic for two reasons:
+        a.) LP: #1795036 - 'lxd init' fails on cosmic kernel.
+        b.) apt install lxd installs via snap which can be slow
+            as that will download core snap and lxd."""
+        os_name = cls.data.get('os_name', 'UNKNOWN')
+        if os_name == "cosmic":
+            raise base.SkipTest('Skipping test on cosmic (LP: #1795036).')
+
     def test_lxd(self):
         """Test lxd installed."""
         out = self.get_data_file('lxd')
-        self.assertIn('/usr/bin/lxd', out)
+        self.assertIn('/lxd', out)
 
     def test_lxc(self):
         """Test lxc installed."""
         out = self.get_data_file('lxc')
-        self.assertIn('/usr/bin/lxc', out)
+        self.assertIn('/lxc', out)
 
     def test_bridge(self):
         """Test bridge config."""
diff --git a/tests/cloud_tests/testcases/modules/lxd_dir.py b/tests/cloud_tests/testcases/modules/lxd_dir.py
index 1495674..797bafe 100644
--- a/tests/cloud_tests/testcases/modules/lxd_dir.py
+++ b/tests/cloud_tests/testcases/modules/lxd_dir.py
@@ -7,14 +7,24 @@ from tests.cloud_tests.testcases import base
 class TestLxdDir(base.CloudTestCase):
     """Test LXD module."""
 
+    @classmethod
+    def maybeSkipTest(cls):
+        """Skip on cosmic for two reasons:
+        a.) LP: #1795036 - 'lxd init' fails on cosmic kernel.
+        b.) apt install lxd installs via snap which can be slow
+            as that will download core snap and lxd."""
+        os_name = cls.data.get('os_name', 'UNKNOWN')
+        if os_name == "cosmic":
+            raise base.SkipTest('Skipping test on cosmic (LP: #1795036).')
+
     def test_lxd(self):
         """Test lxd installed."""
         out = self.get_data_file('lxd')
-        self.assertIn('/usr/bin/lxd', out)
+        self.assertIn('/lxd', out)
 
     def test_lxc(self):
         """Test lxc installed."""
         out = self.get_data_file('lxc')
-        self.assertIn('/usr/bin/lxc', out)
+        self.assertIn('/lxc', out)
 
 # vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ntp_chrony.py b/tests/cloud_tests/testcases/modules/ntp_chrony.py
index 7d34177..0f4c3d0 100644
--- a/tests/cloud_tests/testcases/modules/ntp_chrony.py
+++ b/tests/cloud_tests/testcases/modules/ntp_chrony.py
@@ -1,7 +1,7 @@
 # This file is part of cloud-init. See LICENSE file for license information.
 
 """cloud-init Integration Test Verify Script."""
-import unittest
+import unittest2
 
 from tests.cloud_tests.testcases import base
 
@@ -13,7 +13,7 @@ class TestNtpChrony(base.CloudTestCase):
         """Skip this suite of tests on lxd and artful or older."""
         if self.platform == 'lxd':
             if self.is_distro('ubuntu') and self.os_version_cmp('artful') <= 0:
-                raise unittest.SkipTest(
+                raise unittest2.SkipTest(
                     'No support for chrony on containers <= artful.'
                     ' LP: #1589780')
         return super(TestNtpChrony, self).setUp()
diff --git a/tests/cloud_tests/verify.py b/tests/cloud_tests/verify.py
index bfb2744..9911ecf 100644
--- a/tests/cloud_tests/verify.py
+++ b/tests/cloud_tests/verify.py
@@ -3,7 +3,7 @@
 """Verify test results."""
 
 import os
-import unittest
+import unittest2
 
 from tests.cloud_tests import (config, LOG, util, testcases)
 
@@ -18,7 +18,7 @@ def verify_data(data_dir, platform, os_name, tests):
     @return_value: {<test_name>: {passed: True/False, failures: []}}
     """
     base_dir = os.sep.join((data_dir, platform, os_name))
-    runner = unittest.TextTestRunner(verbosity=util.current_verbosity())
+    runner = unittest2.TextTestRunner(verbosity=util.current_verbosity())
     res = {}
     for test_name in tests:
         LOG.debug('verifying test data for %s', test_name)
diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py
index 740fb76..6e33935 100644
--- a/tests/unittests/test_distros/test_netconfig.py
+++ b/tests/unittests/test_distros/test_netconfig.py
@@ -34,6 +34,19 @@ auto eth1
 iface eth1 inet dhcp
 '''
 
+BASE_NET_CFG_FROM_V2 = '''
+auto lo
+iface lo inet loopback
+
+auto eth0
+iface eth0 inet static
+    address 192.168.1.5/24
+    gateway 192.168.1.254
+
+auto eth1
+iface eth1 inet dhcp
+'''
+
 BASE_NET_CFG_IPV6 = '''
 auto lo
 iface lo inet loopback
@@ -262,6 +275,32 @@ hn0: flags=8843<UP,BROADCAST,RUNNING,SIMPLEX,MULTICAST> metric 0 mtu 1500
                 '''), results[rc_conf])
         self.assertEqual(0o644, get_mode(rc_conf, tmpd))
 
+    def test_simple_write_freebsd_from_v2eni(self):
+        fbsd_distro = self._get_distro('freebsd')
+
+        rc_conf = '/etc/rc.conf'
+        read_bufs = {
+            rc_conf: 'initial-rc-conf-not-validated',
+            '/etc/resolv.conf': 'initial-resolv-conf-not-validated',
+        }
+
+        tmpd = self.tmp_dir()
+        populate_dir(tmpd, read_bufs)
+        with self.reRooted(tmpd):
+            with mock.patch("cloudinit.distros.freebsd.util.subp",
+                            return_value=('vtnet0', '')):
+                fbsd_distro.apply_network(BASE_NET_CFG_FROM_V2, False)
+                results = dir2dict(tmpd)
+
+        self.assertIn(rc_conf, results)
+        self.assertCfgEquals(
+            dedent('''\
+                ifconfig_vtnet0="192.168.1.5 netmask 255.255.255.0"
+                ifconfig_vtnet1="DHCP"
+                defaultrouter="192.168.1.254"
+                '''), results[rc_conf])
+        self.assertEqual(0o644, get_mode(rc_conf, tmpd))
+
     def test_apply_network_config_fallback_freebsd(self):
         fbsd_distro = self._get_distro('freebsd')
 
@@ -328,16 +367,6 @@ class TestNetCfgDistroUbuntuEni(TestNetCfgDistroBase):
             self.assertEqual(expected, results[cfgpath])
             self.assertEqual(0o644, get_mode(cfgpath, tmpd))
 
-    def test_simple_write_ub(self):
-        expected_cfgs = {
-            self.eni_path(): BASE_NET_CFG,
-        }
-
-        # ub_distro.apply_network(BASE_NET_CFG, False)
-        self._apply_and_verify_eni(self.distro.apply_network,
-                                   BASE_NET_CFG,
-                                   expected_cfgs=expected_cfgs.copy())
-
     def test_apply_network_config_eni_ub(self):
         expected_cfgs = {
             self.eni_path(): V1_NET_CFG_OUTPUT,
@@ -428,35 +457,6 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase):
             self.assertCfgEquals(expected, results[cfgpath])
             self.assertEqual(0o644, get_mode(cfgpath, tmpd))
 
-    def test_simple_write_rh(self):
-        expected_cfgs = {
-            self.ifcfg_path('lo'): dedent("""\
-                DEVICE="lo"
-                ONBOOT=yes
-                """),
-            self.ifcfg_path('eth0'): dedent("""\
-                DEVICE="eth0"
-                BOOTPROTO="static"
-                NETMASK="255.255.255.0"
-                IPADDR="192.168.1.5"
-                ONBOOT=yes
-                GATEWAY="192.168.1.254"
-                BROADCAST="192.168.1.0"
-                """),
-            self.ifcfg_path('eth1'): dedent("""\
-                DEVICE="eth1"
-                BOOTPROTO="dhcp"
-                ONBOOT=yes
-                """),
-            self.control_path(): dedent("""\
-                NETWORKING=yes
-                """),
-        }
-        # rh_distro.apply_network(BASE_NET_CFG, False)
-        self._apply_and_verify(self.distro.apply_network,
-                               BASE_NET_CFG,
-                               expected_cfgs=expected_cfgs.copy())
-
     def test_apply_network_config_rh(self):
         expected_cfgs = {
             self.ifcfg_path('eth0'): dedent("""\
@@ -488,47 +488,6 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase):
                                V1_NET_CFG,
                                expected_cfgs=expected_cfgs.copy())
 
-    def test_write_ipv6_rhel(self):
-        expected_cfgs = {
-            self.ifcfg_path('lo'): dedent("""\
-                DEVICE="lo"
-                ONBOOT=yes
-                """),
-            self.ifcfg_path('eth0'): dedent("""\
-                DEVICE="eth0"
-                BOOTPROTO="static"
-                NETMASK="255.255.255.0"
-                IPADDR="192.168.1.5"
-                ONBOOT=yes
-                GATEWAY="192.168.1.254"
-                BROADCAST="192.168.1.0"
-                IPV6INIT=yes
-                IPV6ADDR="2607:f0d0:1002:0011::2"
-                IPV6_DEFAULTGW="2607:f0d0:1002:0011::1"
-                """),
-            self.ifcfg_path('eth1'): dedent("""\
-                DEVICE="eth1"
-                BOOTPROTO="static"
-                NETMASK="255.255.255.0"
-                IPADDR="192.168.1.6"
-                ONBOOT=no
-                GATEWAY="192.168.1.254"
-                BROADCAST="192.168.1.0"
-                IPV6INIT=yes
-                IPV6ADDR="2607:f0d0:1002:0011::3"
-                IPV6_DEFAULTGW="2607:f0d0:1002:0011::1"
-                """),
-            self.control_path(): dedent("""\
-                NETWORKING=yes
-                NETWORKING_IPV6=yes
-                IPV6_AUTOCONF=no
-                """),
-        }
-        # rh_distro.apply_network(BASE_NET_CFG_IPV6, False)
-        self._apply_and_verify(self.distro.apply_network,
-                               BASE_NET_CFG_IPV6,
-                               expected_cfgs=expected_cfgs.copy())
-
     def test_apply_network_config_ipv6_rh(self):
         expected_cfgs = {
             self.ifcfg_path('eth0'): dedent("""\
@@ -588,37 +547,6 @@ class TestNetCfgDistroOpensuse(TestNetCfgDistroBase):
             self.assertCfgEquals(expected, results[cfgpath])
             self.assertEqual(0o644, get_mode(cfgpath, tmpd))
 
-    def test_simple_write_opensuse(self):
-        """Opensuse network rendering writes appropriate sysconfig files."""
-        expected_cfgs = {
-            self.ifcfg_path('lo'): dedent('''
-                STARTMODE="auto"
-                USERCONTROL="no"
-                FIREWALL="no"
-                '''),
-            self.ifcfg_path('eth0'): dedent('''
-                BOOTPROTO="static"
-                BROADCAST="192.168.1.0"
-                GATEWAY="192.168.1.254"
-                IPADDR="192.168.1.5"
-                NETMASK="255.255.255.0"
-                STARTMODE="auto"
-                USERCONTROL="no"
-                ETHTOOL_OPTIONS=""
-                '''),
-            self.ifcfg_path('eth1'): dedent('''
-                BOOTPROTO="dhcp"
-                STARTMODE="auto"
-                USERCONTROL="no"
-                ETHTOOL_OPTIONS=""
-                ''')
-        }
-
-        # distro.apply_network(BASE_NET_CFG, False)
-        self._apply_and_verify(self.distro.apply_network,
-                               BASE_NET_CFG,
-                               expected_cfgs=expected_cfgs.copy())
-
     def test_apply_network_config_opensuse(self):
         """Opensuse uses apply_network_config and renders sysconfig"""
         expected_cfgs = {
diff --git a/tests/unittests/test_handler/test_handler_apt_source_v3.py b/tests/unittests/test_handler/test_handler_apt_source_v3.py
index a81c67c..90fe6ee 100644
--- a/tests/unittests/test_handler/test_handler_apt_source_v3.py
+++ b/tests/unittests/test_handler/test_handler_apt_source_v3.py
@@ -949,7 +949,8 @@ deb http://ubuntu.com/ubuntu/ xenial-proposed main""")
         self.assertEqual(
             orig, cc_apt_configure.disable_suites(["proposed"], orig, rel))
 
-    def test_apt_v3_mirror_search_dns(self):
+    @mock.patch("cloudinit.util.get_hostname", return_value='abc.localdomain')
+    def test_apt_v3_mirror_search_dns(self, m_get_hostname):
         """test_apt_v3_mirror_search_dns - Test searching dns patterns"""
         pmir = "phit"
         smir = "shit"
diff --git a/tests/unittests/test_handler/test_handler_lxd.py b/tests/unittests/test_handler/test_handler_lxd.py
index 4dd7e09..2478ebc 100644
--- a/tests/unittests/test_handler/test_handler_lxd.py
+++ b/tests/unittests/test_handler/test_handler_lxd.py
@@ -43,12 +43,12 @@ class TestLxd(t_help.CiTestCase):
         self.assertTrue(mock_util.which.called)
         # no bridge config, so maybe_cleanup should not be called.
         self.assertFalse(m_maybe_clean.called)
-        init_call = mock_util.subp.call_args_list[0][0][0]
-        self.assertEqual(init_call,
-                         ['lxd', 'init', '--auto',
-                          '--network-address=0.0.0.0',
-                          '--storage-backend=zfs',
-                          '--storage-pool=poolname'])
+        self.assertEqual(
+            [mock.call(['lxd', 'waitready', '--timeout=300']),
+             mock.call(
+                 ['lxd', 'init', '--auto', '--network-address=0.0.0.0',
+                  '--storage-backend=zfs', '--storage-pool=poolname'])],
+            mock_util.subp.call_args_list)
 
     @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default")
     @mock.patch("cloudinit.config.cc_lxd.util")
diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py
index f3165da..5d9c7d9 100644
--- a/tests/unittests/test_net.py
+++ b/tests/unittests/test_net.py
@@ -3260,11 +3260,15 @@ class TestGetInterfacesByMac(CiTestCase):
     def _se_interface_has_own_mac(self, name):
         return name in self.data['own_macs']
 
+    def _se_get_ib_interface_hwaddr(self, name, ethernet_format):
+        ib_hwaddr = self.data.get('ib_hwaddr', {})
+        return ib_hwaddr.get(name, {}).get(ethernet_format)
+
     def _mock_setup(self):
         self.data = copy.deepcopy(self._data)
         self.data['devices'] = set(list(self.data['macs'].keys()))
         mocks = ('get_devicelist', 'get_interface_mac', 'is_bridge',
-                 'interface_has_own_mac', 'is_vlan')
+                 'interface_has_own_mac', 'is_vlan', 'get_ib_interface_hwaddr')
         self.mocks = {}
         for n in mocks:
             m = mock.patch('cloudinit.net.' + n,
@@ -3338,6 +3342,20 @@ class TestGetInterfacesByMac(CiTestCase):
         ret = net.get_interfaces_by_mac()
         self.assertEqual('lo', ret[empty_mac])
 
+    def test_ib(self):
+        ib_addr = '80:00:00:28:fe:80:00:00:00:00:00:00:00:11:22:03:00:33:44:56'
+        ib_addr_eth_format = '00:11:22:33:44:56'
+        self._mock_setup()
+        self.data['devices'] = ['enp0s1', 'ib0']
+        self.data['own_macs'].append('ib0')
+        self.data['macs']['ib0'] = ib_addr
+        self.data['ib_hwaddr'] = {'ib0': {True: ib_addr_eth_format,
+                                          False: ib_addr}}
+        result = net.get_interfaces_by_mac()
+        expected = {'aa:aa:aa:aa:aa:01': 'enp0s1',
+                    ib_addr_eth_format: 'ib0', ib_addr: 'ib0'}
+        self.assertEqual(expected, result)
+
 
 class TestInterfacesSorting(CiTestCase):
 
@@ -3352,6 +3370,67 @@ class TestInterfacesSorting(CiTestCase):
             ['enp0s3', 'enp0s8', 'enp0s13', 'enp1s2', 'enp2s0', 'enp2s3'])
 
 
+class TestGetIBHwaddrsByInterface(CiTestCase):
+
+    _ib_addr = '80:00:00:28:fe:80:00:00:00:00:00:00:00:11:22:03:00:33:44:56'
+    _ib_addr_eth_format = '00:11:22:33:44:56'
+    _data = {'devices': ['enp0s1', 'enp0s2', 'bond1', 'bridge1',
+                         'bridge1-nic', 'tun0', 'ib0'],
+             'bonds': ['bond1'],
+             'bridges': ['bridge1'],
+             'own_macs': ['enp0s1', 'enp0s2', 'bridge1-nic', 'bridge1', 'ib0'],
+             'macs': {'enp0s1': 'aa:aa:aa:aa:aa:01',
+                      'enp0s2': 'aa:aa:aa:aa:aa:02',
+                      'bond1': 'aa:aa:aa:aa:aa:01',
+                      'bridge1': 'aa:aa:aa:aa:aa:03',
+                      'bridge1-nic': 'aa:aa:aa:aa:aa:03',
+                      'tun0': None,
+                      'ib0': _ib_addr},
+             'ib_hwaddr': {'ib0': {True: _ib_addr_eth_format,
+                                   False: _ib_addr}}}
+    data = {}
+
+    def _mock_setup(self):
+        self.data = copy.deepcopy(self._data)
+        mocks = ('get_devicelist', 'get_interface_mac', 'is_bridge',
+                 'interface_has_own_mac', 'get_ib_interface_hwaddr')
+        self.mocks = {}
+        for n in mocks:
+            m = mock.patch('cloudinit.net.' + n,
+                           side_effect=getattr(self, '_se_' + n))
+            self.addCleanup(m.stop)
+            self.mocks[n] = m.start()
+
+    def _se_get_devicelist(self):
+        return self.data['devices']
+
+    def _se_get_interface_mac(self, name):
+        return self.data['macs'][name]
+
+    def _se_is_bridge(self, name):
+        return name in self.data['bridges']
+
+    def _se_interface_has_own_mac(self, name):
+        return name in self.data['own_macs']
+
+    def _se_get_ib_interface_hwaddr(self, name, ethernet_format):
+        ib_hwaddr = self.data.get('ib_hwaddr', {})
+        return ib_hwaddr.get(name, {}).get(ethernet_format)
+
+    def test_ethernet(self):
+        self._mock_setup()
+        self.data['devices'].remove('ib0')
+        result = net.get_ib_hwaddrs_by_interface()
+        expected = {}
+        self.assertEqual(expected, result)
+
+    def test_ib(self):
+        self._mock_setup()
+        result = net.get_ib_hwaddrs_by_interface()
+        expected = {'ib0': self._ib_addr}
+        self.assertEqual(expected, result)
+
+
 def _gzip_data(data):
     with io.BytesIO() as iobuf:
         gzfp = gzip.GzipFile(mode="wb", fileobj=iobuf)
diff --git a/tools/tox-venv b/tools/tox-venv
index 76ed507..a5d2162 100755
--- a/tools/tox-venv
+++ b/tools/tox-venv
@@ -1,42 +1,185 @@
 #!/bin/sh
+# https://gist.github.com/smoser/2d4100a6a5d230ca937f
 
+CR='
+'
 error() { echo "$@" 1>&2; }
 fail() { [ $# -eq 0 ] || error "$@"; exit 1; }
+get_env_dirs() {
+    # read 'tox --showconfig'. return list of
+    #  envname:dir
+    local key="" equal="" val="" curenv="" out=""
+    while read key equal val; do
+        case "$key" in
+           "[testenv:"*)
+                curenv=${key#*:};
+                curenv=${curenv%%"]"*};
+                continue;;
+        esac
+        if [ "${key#*=}" != "$key" ]; then
+            # older tox shows key=value or key=   value
+            # newer tox shows: key =    value
+            key=${key%%=*}
+            val=${equal}
+        fi
+        [ "$key" = "envdir" ] || continue
+        out="${out:+${out}${CR}}${curenv}:$val"
+    done
+    echo "$out"
+}
+
+load_config() {
+    local tox_ini="$1" out="" envs=""
+    if [ "$tox_ini" = "${CACHED_ENVS_INI}" ]; then
+        _RET="$CACHED_ENVS"
+        return
+    fi
+    out=$(tox -c "$tox_ini" --showconfig) || return 1
+    envs=$(echo "$out" | get_env_dirs) || return 1
+    CACHED_ENVS="$envs"
+    CACHED_ENVS_INI="$tox_ini"
+    _RET="$envs"
+}
+
+list_environments() {
+    local tox_ini="$1" prefix="  " out="" envs="" oifs="$IFS"
+    load_config "$tox_ini" || return 1
+    envs="${_RET}"
+    IFS="$CR"
+    for d in ${envs}; do
+        env=${d%%:*}
+        dir=${d#*:}
+        [ -f "$dir/bin/activate" ] && s="*" || s=""
+        echo "${prefix}$env$s";
+    done
+    IFS="$oifs"
+}
+
+get_command() {
+    local tox_ini="$1" env="$2" out=""
+    shift 2
+    out=$(
+        sed -e ':x; /\\$/ { N; s/\\\n[ ]*//; tx };' "${tox_ini}" |
+        gawk '
+        $1 ~ /^\[testenv.*\]/ {
+            name=$1;
+            sub("\\[", "", name); sub(".*:", "", name);
+            sub("].*", "", name);
+            curenv=name; };
+        $1 == "basepython" && (name == "testenv" || name == n) { python=$3 }
+        $1 == "commands" && (name == "testenv" || name == n) {
+            sub("commands = ", ""); cmd = $0; };
+        END {
+            sub("{envpython}", python, cmd);
+            sub("{toxinidir}", toxinidir, cmd);
+            if (inargs == "") replacement = "\\1"
+            else replacement = inargs
+            cmd = gensub(/{posargs:?([^}]*)}/, replacement, "global", cmd)
+            print(cmd);
+            }' n="$env" toxinidir="$(dirname $tox_ini)" inargs="$*")
+    if [ -z "$out" ]; then
+        error "Failed to find command for $env in $tox_ini"
+        return 1
+    fi
+    echo "$out"
+}
+
+get_env_dir() {
+    local tox_ini="$1" env="$2" oifs="$IFS" t="" d="" envs=""
+    if [ "${TOX_VENV_SHORTCUT:-1}" != "0" ]; then
+        local stox_d="${tox_ini%/*}/.tox/${env}"
+        if [ -e "${stox_d}/bin/activate" ]; then
+            _RET="${stox_d}"
+            return
+        fi
+    fi
+    load_config "$tox_ini" && envs="$_RET" || return 1
+    IFS="$CR"
+    for t in $envs; do
+        [ "$env" = "${t%%:*}" ] && d="${t#*:}" && break
+    done
+    IFS=${oifs}
+    [ -n "$d" ] || return 1
+    _RET="$d"
+}
+
 Usage() {
-   cat <<EOF
-Usage: ${0##*/} tox-environment [command [args]]
+    local tox_ini="$1"
+    cat <<EOF
+Usage: ${0##*/} [--no-create] tox-environment [command [args]]
    run command with provided arguments in the provided tox environment
-   command defaults to \${SHELL:-/bin/sh}.
+   command defaults to 'cmd' (see below).
+
+   run with '--list' to show available environments
 
-   invoke with '--list' to show available environments
+   if 'command' above is literal 'cmd' or '-', then the 'command' will
+   be read from tox.ini.  This allows you to do:
+      tox-venv py27 - tests/some/sub/dir
+   and have the 'command' read correctly and have that execute:
+      python -m nose tests/some/sub/dir
 EOF
-}
-list_toxes() {
-   local td="$1" pre="$2" d=""
-   ( cd "$tox_d" &&
-     for d in *; do [ -f "$d/bin/activate" ] && echo "${pre}$d"; done)
+
+    if [ -f "$tox_ini" ]; then
+        local oini=${tox_ini}
+        [ "${tox_ini}" -ef "$PWD/tox.ini" ] && oini="./tox.ini"
+        echo
+        echo "environments in $oini"
+        list_environments "$tox_ini"
+    fi
 }
 
-[ $# -eq 0 ] && { Usage 1>&2; exit 1; }
-[ "$1" = "-h" -o "$1" = "--help" ] && { Usage; exit 0; }
+if [ -f tox.ini ]; then
+    tox_ini="$PWD/tox.ini"
+else
+    tox_ini="${0%/*}/../tox.ini"
+fi
 
-env="$1"
-shift
-tox_d="${0%/*}/../.tox"
-activate="$tox_d/$env/bin/activate"
+[ $# -eq 0 ] && { Usage "$tox_ini" 1>&2; exit 1; }
+[ "$1" = "-h" -o "$1" = "--help" ] && { Usage "$tox_ini"; exit 0; }
 
+[ -f "$tox_ini" ] || fail "$tox_ini: did not find tox.ini"
 
-[ -d "$tox_d" ] || fail "$tox_d: not a dir. maybe run 'tox'?"
+if [ "$1" = "-l" -o "$1" = "--list" ]; then
+    list_environments "$tox_ini"
+    exit
+fi
 
-[ "$env" = "-l" -o "$env" = "--list" ] && { list_toxes ; exit ; }
+nocreate="false"
+if [ "$1" = "--no-create" ]; then
+    nocreate="true"
+    shift
+fi
 
-if [ ! -f "$activate" ]; then
-   error "$env: not a valid tox environment?"
-   error "try one of:"
-   list_toxes "$tox_d" "  "
-   fail
+env="$1"
+shift
+[ "$1" = "--" ] && shift
+get_env_dir "$tox_ini" "$env" && activate="$_RET/bin/activate" || activate=""
+
+if [ -z "$activate" -o ! -f "$activate" ]; then
+    if $nocreate; then
+        fail "tox env '$env' did not exist, and no-create specified"
+    elif [ -n "$activate" ]; then
+        error "attempting to create $env:"
+        error "    tox -c $tox_ini --recreate --notest -e $env"
+        tox -c "$tox_ini" --recreate --notest -e "$env" ||
+            fail "failed creation of env $env"
+    else
+        error "$env: not a valid tox environment?"
+        error "found tox_ini=$tox_ini"
+        error "try one of:"
+        list_environments "$tox_ini" 1>&2
+        fail
+    fi
 fi
 . "$activate"
 
-[ "$#" -gt 0 ] || set -- ${SHELL:-/bin/bash}
+[ $# -eq 0 ] && set -- cmd
+if [ "$1" = "cmd" -o "$1" = "-" ]; then
+   shift
+   out=$(get_command "$tox_ini" "$env" "$@") || exit
+   eval set -- "$out"
+fi
+echo "inside tox:$env running: $*" 1>&2
 debian_chroot="tox:$env" exec "$@"
+
+# vi: ts=4 expandtab

Follow ups