cloud-init-dev team mailing list archive
-
cloud-init-dev team
-
Mailing list archive
-
Message #02060
[Merge] ~smoser/cloud-init:bug/1686514-azure-reformat-large into cloud-init:master
Scott Moser has proposed merging ~smoser/cloud-init:bug/1686514-azure-reformat-large into cloud-init:master.
Requested reviews:
cloud init development team (cloud-init-dev)
For more details, see:
https://code.launchpad.net/~smoser/cloud-init/+git/cloud-init/+merge/323420
--
Your team cloud init development team is requested to review the proposed merge of ~smoser/cloud-init:bug/1686514-azure-reformat-large into cloud-init:master.
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 04358b7..9b3a0ae 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -266,56 +266,71 @@ class DataSourceAzureNet(sources.DataSource):
return
+def _partitions_on_device(devpath, maxnum=16):
+ # return a list of tuples (ptnum, path) for each part on devpath
+ for suff in ("-part", "p", ""):
+ found = []
+ for pnum in range(1, maxnum):
+ ppath = devpath + suff + str(pnum)
+ if os.path.exists(ppath):
+ found.append((pnum, os.path.realpath(ppath)))
+ if found:
+ return found
+ return []
+
+
+def _has_ntfs_filesystem(devpath):
+ ntfs_devices = util.find_devs_with("TYPE=ntfs", no_cache=True)
+ LOG.debug('ntfs_devices found = %s', ntfs_devices)
+ return os.path.realpath(devpath) in ntfs_devices
+
+
def can_dev_be_reformatted(devpath):
# determine if the ephemeral block device path devpath
# is newly formatted after a resize.
+ # A newly formatted disk will:
+ # a.) have a partition table (dos or gpt)
+ # b.) have 1 partition that is ntfs formatted, or
+ # have 2 partitions with the second partition ntfs formatted.
+ # (larger instances with >2TB ephemeral disk have gpt, and will
+ # have a microsoft reserved partition as part 1. LP: #1686514)
+ # c.) the ntfs partition will have no files other possibly
+ # 'dataloss_warning_readme.txt'
if not os.path.exists(devpath):
return False, 'device %s does not exist' % devpath
- realpath = os.path.realpath(devpath)
- LOG.debug('Resolving realpath of %s -> %s', devpath, realpath)
-
- # it is possible that the block device might exist, but the kernel
- # have not yet read the partition table and sent events. we udevadm settle
- # to hope to resolve that. Better here would probably be to test and see,
- # and then settle if we didn't find anything and try again.
- if util.which("udevadm"):
- util.subp(["udevadm", "settle"])
+ LOG.debug('Resolving realpath of %s -> %s', devpath,
+ os.path.realpath(devpath))
# devpath of /dev/sd[a-z] or /dev/disk/cloud/azure_resource
# where partitions are "<devpath>1" or "<devpath>-part1" or "<devpath>p1"
- part1path = None
- for suff in ("-part", "p", ""):
- cand = devpath + suff + "1"
- if os.path.exists(cand):
- if os.path.exists(devpath + suff + "2"):
- msg = ('device %s had more than 1 partition: %s, %s' %
- devpath, cand, devpath + suff + "2")
- return False, msg
- part1path = cand
- break
-
- if part1path is None:
+ partitions = _partitions_on_device(devpath)
+ if len(partitions) == 0:
return False, 'device %s was not partitioned' % devpath
+ elif len(partitions) > 2:
+ msg = ('device %s had 3 or more partitions: %s' %
+ (devpath, ' '.join([p[1] for p in partitions])))
+ return False, msg
+ elif len(partitions) == 2:
+ cand_part, cand_path = partitions[1]
+ else:
+ cand_part, cand_path = partitions[0]
- real_part1path = os.path.realpath(part1path)
- ntfs_devices = util.find_devs_with("TYPE=ntfs", no_cache=True)
- LOG.debug('ntfs_devices found = %s', ntfs_devices)
- if real_part1path not in ntfs_devices:
- msg = ('partition 1 (%s -> %s) on device %s was not ntfs formatted' %
- (part1path, real_part1path, devpath))
+ if not _has_ntfs_filesystem(cand_path):
+ msg = ('partition %s (%s) on device %s was not ntfs formatted' %
+ (cand_part, cand_path, devpath))
return False, msg
def count_files(mp):
ignored = set(['dataloss_warning_readme.txt'])
return len([f for f in os.listdir(mp) if f.lower() not in ignored])
- bmsg = ('partition 1 (%s -> %s) on device %s was ntfs formatted' %
- (part1path, real_part1path, devpath))
+ bmsg = ('partition %s (%s) on device %s was ntfs formatted' %
+ (cand_part, cand_path, devpath))
try:
- file_count = util.mount_cb(part1path, count_files)
+ file_count = util.mount_cb(cand_part, count_files)
except util.MountFailedError as e:
- return False, bmsg + ' but mount of %s failed: %s' % (part1path, e)
+ return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e)
if file_count != 0:
return False, bmsg + ' but had %d files on it.' % file_count
@@ -335,6 +350,12 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120,
devpath, maxwait)
return
+ # it is possible that the block device might exist, but the kernel
+ # have not yet read the partition table and sent events. we udevadm settle
+ # to resolve that.
+ if util.which("udevadm"):
+ util.subp(["udevadm", "settle"])
+
result = False
msg = None
if is_new_instance:
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index 8d22bb5..900ef3a 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -1,8 +1,8 @@
# This file is part of cloud-init. See LICENSE file for license information.
from cloudinit import helpers
-from cloudinit.util import b64e, decode_binary, load_file
-from cloudinit.sources import DataSourceAzure
+from cloudinit.util import b64e, decode_binary, load_file, write_file
+from cloudinit.sources import DataSourceAzure as dsaz
from ..helpers import TestCase, populate_dir, mock, ExitStack, PY26, SkipTest
@@ -115,8 +115,7 @@ class TestAzureDataSource(TestCase):
populate_dir(os.path.join(self.paths.seed_dir, "azure"),
{'ovf-env.xml': data['ovfcontent']})
- mod = DataSourceAzure
- mod.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d
+ dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d
self.get_metadata_from_fabric = mock.MagicMock(return_value={
'public-keys': [],
@@ -125,19 +124,19 @@ class TestAzureDataSource(TestCase):
self.instance_id = 'test-instance-id'
self.apply_patches([
- (mod, 'list_possible_azure_ds_devs', dsdevs),
- (mod, 'invoke_agent', _invoke_agent),
- (mod, 'wait_for_files', _wait_for_files),
- (mod, 'pubkeys_from_crt_files', _pubkeys_from_crt_files),
- (mod, 'perform_hostname_bounce', mock.MagicMock()),
- (mod, 'get_hostname', mock.MagicMock()),
- (mod, 'set_hostname', mock.MagicMock()),
- (mod, 'get_metadata_from_fabric', self.get_metadata_from_fabric),
- (mod.util, 'read_dmi_data', mock.MagicMock(
+ (dsaz, 'list_possible_azure_ds_devs', dsdevs),
+ (dsaz, 'invoke_agent', _invoke_agent),
+ (dsaz, 'wait_for_files', _wait_for_files),
+ (dsaz, 'pubkeys_from_crt_files', _pubkeys_from_crt_files),
+ (dsaz, 'perform_hostname_bounce', mock.MagicMock()),
+ (dsaz, 'get_hostname', mock.MagicMock()),
+ (dsaz, 'set_hostname', mock.MagicMock()),
+ (dsaz, 'get_metadata_from_fabric', self.get_metadata_from_fabric),
+ (dsaz.util, 'read_dmi_data', mock.MagicMock(
return_value=self.instance_id)),
])
- dsrc = mod.DataSourceAzureNet(
+ dsrc = dsaz.DataSourceAzureNet(
data.get('sys_cfg', {}), distro=None, paths=self.paths)
if agent_command is not None:
dsrc.ds_cfg['agent_command'] = agent_command
@@ -353,7 +352,7 @@ class TestAzureDataSource(TestCase):
cfg = dsrc.get_config_obj()
self.assertEqual(dsrc.device_name_to_device("ephemeral0"),
- DataSourceAzure.RESOURCE_DISK_PATH)
+ dsaz.RESOURCE_DISK_PATH)
assert 'disk_setup' in cfg
assert 'fs_setup' in cfg
self.assertIsInstance(cfg['disk_setup'], dict)
@@ -403,14 +402,13 @@ class TestAzureDataSource(TestCase):
# Make sure that the redacted password on disk is not used by CI
self.assertNotEqual(dsrc.cfg.get('password'),
- DataSourceAzure.DEF_PASSWD_REDACTION)
+ dsaz.DEF_PASSWD_REDACTION)
# Make sure that the password was really encrypted
et = ET.fromstring(on_disk_ovf)
for elem in et.iter():
if 'UserPassword' in elem.tag:
- self.assertEqual(DataSourceAzure.DEF_PASSWD_REDACTION,
- elem.text)
+ self.assertEqual(dsaz.DEF_PASSWD_REDACTION, elem.text)
def test_ovf_env_arrives_in_waagent_dir(self):
xml = construct_valid_ovf_env(data={}, userdata="FOODATA")
@@ -459,17 +457,17 @@ class TestAzureBounce(TestCase):
def mock_out_azure_moving_parts(self):
self.patches.enter_context(
- mock.patch.object(DataSourceAzure, 'invoke_agent'))
+ mock.patch.object(dsaz, 'invoke_agent'))
self.patches.enter_context(
- mock.patch.object(DataSourceAzure, 'wait_for_files'))
+ mock.patch.object(dsaz, 'wait_for_files'))
self.patches.enter_context(
- mock.patch.object(DataSourceAzure, 'list_possible_azure_ds_devs',
+ mock.patch.object(dsaz, 'list_possible_azure_ds_devs',
mock.MagicMock(return_value=[])))
self.patches.enter_context(
- mock.patch.object(DataSourceAzure, 'get_metadata_from_fabric',
+ mock.patch.object(dsaz, 'get_metadata_from_fabric',
mock.MagicMock(return_value={})))
self.patches.enter_context(
- mock.patch.object(DataSourceAzure.util, 'read_dmi_data',
+ mock.patch.object(dsaz.util, 'read_dmi_data',
mock.MagicMock(return_value='test-instance-id')))
def setUp(self):
@@ -478,13 +476,13 @@ class TestAzureBounce(TestCase):
self.waagent_d = os.path.join(self.tmp, 'var', 'lib', 'waagent')
self.paths = helpers.Paths({'cloud_dir': self.tmp})
self.addCleanup(shutil.rmtree, self.tmp)
- DataSourceAzure.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d
+ dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d
self.patches = ExitStack()
self.mock_out_azure_moving_parts()
self.get_hostname = self.patches.enter_context(
- mock.patch.object(DataSourceAzure, 'get_hostname'))
+ mock.patch.object(dsaz, 'get_hostname'))
self.set_hostname = self.patches.enter_context(
- mock.patch.object(DataSourceAzure, 'set_hostname'))
+ mock.patch.object(dsaz, 'set_hostname'))
self.subp = self.patches.enter_context(
mock.patch('cloudinit.sources.DataSourceAzure.util.subp'))
@@ -495,7 +493,7 @@ class TestAzureBounce(TestCase):
if ovfcontent is not None:
populate_dir(os.path.join(self.paths.seed_dir, "azure"),
{'ovf-env.xml': ovfcontent})
- dsrc = DataSourceAzure.DataSourceAzureNet(
+ dsrc = dsaz.DataSourceAzureNet(
{}, distro=None, paths=self.paths)
if agent_command is not None:
dsrc.ds_cfg['agent_command'] = agent_command
@@ -608,7 +606,7 @@ class TestAzureBounce(TestCase):
def test_default_bounce_command_used_by_default(self):
cmd = 'default-bounce-command'
- DataSourceAzure.BUILTIN_DS_CONFIG['hostname_bounce']['command'] = cmd
+ dsaz.BUILTIN_DS_CONFIG['hostname_bounce']['command'] = cmd
cfg = {'hostname_bounce': {'policy': 'force'}}
data = self.get_ovf_env_with_dscfg('some-hostname', cfg)
self._get_ds(data, agent_command=['not', '__builtin__']).get_data()
@@ -636,15 +634,105 @@ class TestAzureBounce(TestCase):
class TestReadAzureOvf(TestCase):
def test_invalid_xml_raises_non_azure_ds(self):
invalid_xml = "<foo>" + construct_valid_ovf_env(data={})
- self.assertRaises(DataSourceAzure.BrokenAzureDataSource,
- DataSourceAzure.read_azure_ovf, invalid_xml)
+ self.assertRaises(dsaz.BrokenAzureDataSource,
+ dsaz.read_azure_ovf, invalid_xml)
def test_load_with_pubkeys(self):
mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': ''}]
pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist]
content = construct_valid_ovf_env(pubkeys=pubkeys)
- (_md, _ud, cfg) = DataSourceAzure.read_azure_ovf(content)
+ (_md, _ud, cfg) = dsaz.read_azure_ovf(content)
for mypk in mypklist:
self.assertIn(mypk, cfg['_pubkeys'])
+
+class TestCanDevBeReformatted(TestCase):
+ def _domock(self, mockpath, sattr=None):
+ patcher = mock.patch(mockpath)
+ setattr(self, sattr, patcher.start())
+ self.addCleanup(patcher.stop)
+
+ def setUp(self):
+ super(TestCanDevBeReformatted, self).setUp()
+
+ def patchup(self, devs):
+ def realpath(d):
+ return bypath[d].get('realpath', d)
+
+ def partitions_on_device(devpath):
+ parts = bypath.get(devpath, {}).get('partitions', {})
+ ret = []
+ for path, data in parts.items():
+ ret.append((data.get('num'), realpath(path)))
+ print("for %s, returning %s" % (devpath, ret))
+ return ret
+
+ def mount_cb(device, callback):
+ p = self.temp_path()
+ for f in bypath.get(device).get('files', []):
+ write_file(os.path.join(p, f), f)
+ return callback(p)
+
+ def has_ntfs_fs(device):
+ return bypath.get(device, {}).get('fs') == 'ntfs'
+
+ bypath = {}
+ for path, data in devs.items():
+ bypath[path] = data
+ if 'realpath' in data:
+ bypath[data['realpath']] = data
+ for ppath, pdata in data.get('partitions', {}).items():
+ bypath[ppath] = pdata
+ if 'realpath' in data:
+ bypath[pdata['realpath']] = pdata
+
+ p = 'cloudinit.sources.DataSourceAzure'
+ self._domock(p + "._partitions_on_device", 'm_partitions_on_device')
+ self._domock(p + "._has_ntfs_filesystem", 'm_has_ntfs_filesystem')
+ self._domock(p + ".util.mount_cb", 'm_mount_cb')
+ self._domock(p + ".os.path.realpath", 'm_realpath')
+ self._domock(p + ".os.path.exists", 'm_exists')
+
+ self.m_exists.side_effect = lambda p: p in bypath
+ self.m_realpath.side_effect = lambda p: realpath(p)
+ self.m_has_ntfs_filesystem.side_effect = has_ntfs_fs
+ self.m_mount_cb.side_effect = mount_cb
+ self.m_partitions_on_device.side_effect = partitions_on_device
+
+ def test_three_partitions_is_false(self):
+ self.patchup({
+ '/dev/sda': {
+ 'partitions': {
+ '/dev/sda1': {'num': 1},
+ '/dev/sda2': {'num': 2},
+ '/dev/sda3': {'num': 3},
+ }}})
+ value, msg = dsaz.can_dev_be_reformatted("/dev/sda")
+ self.assertFalse(False, value)
+ self.assertIn("3 or more", msg.lower())
+
+ def test_no_partitions_is_false(self):
+ self.patchup({'/dev/sda': {'realpath': '/dev/sda'}})
+ value, msg = dsaz.can_dev_be_reformatted("/dev/sda")
+ self.assertEqual(False, value)
+ self.assertIn("not partitioned", msg.lower())
+
+ def test_two_partitions_not_ntfs_false(self):
+ pass
+
+ def test_two_partitions_ntfs_populated_false(self):
+ pass
+
+ def test_two_partitions_ntfs_empty_is_true(self):
+ pass
+
+ def test_one_partition_not_ntfs_false(self):
+ pass
+
+ def test_one_partition_ntfs_populated_false(self):
+ pass
+
+ def test_one_partition_ntfs_empty_is_true(self):
+ pass
+
# vi: ts=4 expandtab
Follow ups