sts-sponsors team mailing list archive
-
sts-sponsors team
-
Mailing list archive
-
Message #09006
[Merge] ~mfo/maas:2.9 into maas:2.9
Mauricio Faria de Oliveira has proposed merging ~mfo/maas:2.9 into maas:2.9.
Requested reviews:
Mauricio Faria de Oliveira (mfo)
For more details, see:
https://code.launchpad.net/~mfo/maas/+git/maas/+merge/444216
Please ignore; this is for testing only.
--
Your team MAAS Committers is subscribed to branch maas:2.9.
diff --git a/debian/changelog b/debian/changelog
index fda16a9..64f9a3c 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,8 +1,31 @@
-maas (1:2.9.3~beta2-0ubuntu2) UNRELEASED; urgency=medium
+maas (1:2.9.3~rc1-0ubuntu1) focal; urgency=medium
- * Test.
+ * New upstream release, MAAS 2.9.3 RC1.
- -- Mauricio Faria de Oliveira <mfo@xxxxxxxxxxxxx> Thu, 17 Nov 2022 12:29:11 -0300
+ -- Mauricio Faria de Oliveira <mfo@xxxxxxxxxxxxx> Tue, 06 Jun 2023 21:13:12 -0300
+
+maas (1:2.9.3~beta5-0ubuntu1) focal; urgency=medium
+
+ * New upstream release, MAAS 2.9.3 beta5.
+ * Reapply test-related commits since MAAS 2.9.2.
+
+ -- Mauricio Faria de Oliveira <mfo@xxxxxxxxxxxxx> Fri, 13 Jan 2023 19:03:13 -0300
+
+maas (1:2.9.3~beta4-0ubuntu1) focal; urgency=medium
+
+ * New upstream release, MAAS 2.9.3 beta4.
+ * Reapply build-related commits since MAAS 2.9.2.
+ * Apply 'fix: github.com/Supervisor/supervisor master->main'.
+ * Targets `make snap` and `make snap-prime` work.
+
+ -- Mauricio Faria de Oliveira <mfo@xxxxxxxxxxxxx> Fri, 13 Jan 2023 18:37:11 -0300
+
+maas (1:2.9.3~beta3-0ubuntu1) focal; urgency=medium
+
+ * New upstream release, MAAS 2.9.3 beta3.
+ * Revert all unreleased commits since MAAS 2.9.2.
+
+ -- Mauricio Faria de Oliveira <mfo@xxxxxxxxxxxxx> Wed, 11 Jan 2023 17:43:26 -0300
maas (1:2.9.3~beta2-0ubuntu1) focal; urgency=medium
diff --git a/setup.py b/setup.py
index 392dcdd..6489e5e 100644
--- a/setup.py
+++ b/setup.py
@@ -17,7 +17,7 @@ def read(filename):
setup(
name="maas",
- version="2.9.3b2",
+ version="2.9.3rc1",
url="https://maas.io/",
license="AGPLv3",
description="Metal As A Service",
diff --git a/snap/local/requirements.txt b/snap/local/requirements.txt
index 60d3794..3532632 100644
--- a/snap/local/requirements.txt
+++ b/snap/local/requirements.txt
@@ -1,2 +1,2 @@
pyvmomi==6.0.0.2016.6
-git+https://github.com/Supervisor/supervisor@master#egg=supervisor
+git+https://github.com/Supervisor/supervisor@main#egg=supervisor
diff --git a/snap/local/tree/bin/maas-deb-migrate b/snap/local/tree/bin/maas-deb-migrate
index f646f0f..febca85 100755
--- a/snap/local/tree/bin/maas-deb-migrate
+++ b/snap/local/tree/bin/maas-deb-migrate
@@ -65,21 +65,17 @@ cleanup_data() {
}
apply_db_patches() {
- if ! maas_snap_mode | grep -q "region"; then
- return
+ if maas_snap_mode | grep -q "region"; then
+ snap_run "maas-region migrate"
fi
- snap_run "maas-region migrate"
# patch the value of the default cloud images keyring to point to the one
# in the snap
- cat <<EOF | snap_run "maas-region shell"
-from maasserver.models import BootSource
-keyring = "/usr/share/keyrings/ubuntu-cloudimage-keyring.gpg"
-(
- BootSource.objects
- .filter(keyring_filename=keyring)
- .update(keyring_filename=f"/snap/maas/current{keyring}")
-)
+ local keyring="/usr/share/keyrings/ubuntu-cloudimage-keyring.gpg"
+ cat <<EOF | pg_do psql maasdb
+UPDATE maasserver_bootsource
+SET keyring_filename = '${MAAS_SNAP}${keyring}'
+WHERE keyring_filename = '${keyring}'
EOF
}
diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml
index 42e0803..f6bbc77 100644
--- a/snap/snapcraft.yaml
+++ b/snap/snapcraft.yaml
@@ -31,7 +31,7 @@ plugs:
apps:
supervisor:
daemon: simple
- command: bin/ld-preload $SNAP/bin/run-supervisord
+ command: bin/run-supervisord
environment:
PYTHONPATH: $SNAP/usr/lib/python3/dist-packages:$SNAP/usr/lib/python3.8/dist-packages
LD_PRELOAD: $SNAP/usr/lib/stub_initgroups.so
@@ -100,7 +100,6 @@ parts:
- libxtables12
- lshw
- nginx-core
- - nmap
- openssh-client
- python3-attr
- python3-bson
diff --git a/src/maasserver/api/machines.py b/src/maasserver/api/machines.py
index ebae0f2..e84adac 100644
--- a/src/maasserver/api/machines.py
+++ b/src/maasserver/api/machines.py
@@ -1,4 +1,4 @@
-# Copyright 2015-2021 Canonical Ltd. This software is licensed under the
+# Copyright 2015-2020 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
__all__ = [
@@ -2575,7 +2575,6 @@ class MachinesHandler(NodesHandler, PowersMixin):
- ``mscm``: Moonshot Chassis Manager.
- ``msftocs``: Microsoft OCS Chassis Manager.
- ``powerkvm``: Virtual Machines on Power KVM, managed by Virsh.
- - ``proxmox``: Virtual Machines managed by Proxmox
- ``recs_box``: Christmann RECS|Box servers.
- ``sm15k``: Seamicro 1500 Chassis.
- ``ucsm``: Cisco UCS Manager.
@@ -2605,31 +2604,18 @@ class MachinesHandler(NodesHandler, PowersMixin):
machine added should use.
@param (string) "prefix_filter" [required=false] (``virsh``,
- ``vmware``, ``powerkvm``, ``proxmox`` only.) Filter machines with
- supplied prefix.
+ ``vmware``, ``powerkvm`` only.) Filter machines with supplied prefix.
@param (string) "power_control" [required=false] (``seamicro15k`` only)
The power_control to use, either ipmi (default), restapi, or restapi2.
- The following are optional if you are adding a proxmox chassis.
-
- @param (string) "token_name" [required=false] The name the
- authentication token to be used instead of a password.
-
- @param (string) "token_secret" [required=false] The token secret
- to be used in combination with the power_token_name used in place of
- a password.
-
- @param (boolean) "verify_ssl" [required=false] Whether SSL
- connections should be verified.
-
The following are optional if you are adding a recs_box, vmware or
msftocs chassis.
@param (int) "port" [required=false] (``recs_box``, ``vmware``,
``msftocs`` only) The port to use when accessing the chassis.
- The following are optional if you are adding a vmware chassis:
+ The following are optioanl if you are adding a vmware chassis:
@param (string) "protocol" [required=false] (``vmware`` only) The
protocol to use when accessing the VMware chassis (default: https).
@@ -2669,31 +2655,9 @@ class MachinesHandler(NodesHandler, PowersMixin):
):
username = get_mandatory_param(request.POST, "username")
password = get_mandatory_param(request.POST, "password")
- token_name = None
- token_secret = None
- elif chassis_type == "proxmox":
- username = get_mandatory_param(request.POST, "username")
- password = get_optional_param(request.POST, "password")
- token_name = get_optional_param(request.POST, "token_name")
- token_secret = get_optional_param(request.POST, "token_secret")
- if not any([password, token_name, token_secret]):
- return HttpResponseBadRequest(
- "You must use a password or token with Proxmox."
- )
- elif all([password, token_name, token_secret]):
- return HttpResponseBadRequest(
- "You may only use a password or token with Proxmox, "
- "not both."
- )
- elif password is None and not all([token_name, token_secret]):
- return HttpResponseBadRequest(
- "Proxmox requires both a token_name and token_secret."
- )
else:
username = get_optional_param(request.POST, "username")
password = get_optional_param(request.POST, "password")
- token_name = None
- token_secret = None
if username is not None and chassis_type in ("powerkvm", "virsh"):
return HttpResponseBadRequest(
"username can not be specified when using the %s chassis."
@@ -2709,13 +2673,12 @@ class MachinesHandler(NodesHandler, PowersMixin):
else:
accept_all = False
- # Only available with virsh, vmware, powerkvm, and proxmox
+ # Only available with virsh, vmware, and powerkvm
prefix_filter = get_optional_param(request.POST, "prefix_filter")
if prefix_filter is not None and chassis_type not in (
"powerkvm",
"virsh",
"vmware",
- "proxmox",
):
return HttpResponseBadRequest(
"prefix_filter is unavailable with the %s chassis type"
@@ -2767,10 +2730,6 @@ class MachinesHandler(NodesHandler, PowersMixin):
),
)
- verify_ssl = get_optional_param(
- request.POST, "verify_ssl", default=False, validator=StringBool
- )
-
# If given a domain make sure it exists first
domain_name = get_optional_param(request.POST, "domain")
if domain_name is not None:
@@ -2827,9 +2786,6 @@ class MachinesHandler(NodesHandler, PowersMixin):
power_control,
port,
protocol,
- token_name,
- token_secret,
- verify_ssl,
)
return HttpResponse(
diff --git a/src/maasserver/api/tests/test_machines.py b/src/maasserver/api/tests/test_machines.py
index 7eba13f..0736ffe 100644
--- a/src/maasserver/api/tests/test_machines.py
+++ b/src/maasserver/api/tests/test_machines.py
@@ -1,4 +1,4 @@
-# Copyright 2015-2021 Canonical Ltd. This software is licensed under the
+# Copyright 2015-2020 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Tests for the machines API."""
@@ -2843,85 +2843,6 @@ class TestMachinesAPI(APITestCase.ForUser):
)
self.assertEqual(b"No provided password!", response.content)
- def test_POST_add_chassis_proxmox_requires_password_or_token(self):
- self.become_admin()
- rack = factory.make_RackController()
- chassis_mock = self.patch(rack, "add_chassis")
- response = self.client.post(
- reverse("machines_handler"),
- {
- "op": "add_chassis",
- "rack_controller": rack.system_id,
- "chassis_type": "proxmox",
- "hostname": factory.make_url(),
- "username": factory.make_name("username"),
- },
- )
- self.assertEqual(
- http.client.BAD_REQUEST, response.status_code, response.content
- )
- self.assertEqual(
- ("You must use a password or token with Proxmox.").encode("utf-8"),
- response.content,
- )
- self.assertEqual(chassis_mock.call_count, 0)
-
- def test_POST_add_chassis_proxmox_requires_password_xor_token(self):
- self.become_admin()
- rack = factory.make_RackController()
- chassis_mock = self.patch(rack, "add_chassis")
- response = self.client.post(
- reverse("machines_handler"),
- {
- "op": "add_chassis",
- "rack_controller": rack.system_id,
- "chassis_type": "proxmox",
- "hostname": factory.make_url(),
- "username": factory.make_name("username"),
- "password": factory.make_name("password"),
- "token_name": factory.make_name("token_name"),
- "token_secret": factory.make_name("token_secret"),
- },
- )
- self.assertEqual(
- http.client.BAD_REQUEST, response.status_code, response.content
- )
- self.assertEqual(
- (
- "You may only use a password or token with Proxmox, not both."
- ).encode("utf-8"),
- response.content,
- )
- self.assertEqual(chassis_mock.call_count, 0)
-
- def test_POST_add_chassis_proxmox_requires_token_name_and_secret(self):
- self.become_admin()
- rack = factory.make_RackController()
- chassis_mock = self.patch(rack, "add_chassis")
- response = self.client.post(
- reverse("machines_handler"),
- {
- "op": "add_chassis",
- "rack_controller": rack.system_id,
- "chassis_type": "proxmox",
- "hostname": factory.make_url(),
- "username": factory.make_name("username"),
- random.choice(
- ["token_name", "token_secret"]
- ): factory.make_name("token"),
- },
- )
- self.assertEqual(
- http.client.BAD_REQUEST, response.status_code, response.content
- )
- self.assertEqual(
- ("Proxmox requires both a token_name and token_secret.").encode(
- "utf-8"
- ),
- response.content,
- )
- self.assertEqual(chassis_mock.call_count, 0)
-
def test_POST_add_chassis_username_disallowed_on_virsh_and_powerkvm(self):
self.become_admin()
rack = factory.make_RackController()
@@ -2986,9 +2907,6 @@ class TestMachinesAPI(APITestCase.ForUser):
None,
None,
None,
- None,
- None,
- False,
),
)
@@ -3027,9 +2945,6 @@ class TestMachinesAPI(APITestCase.ForUser):
None,
None,
None,
- None,
- None,
- False,
),
)
@@ -3042,7 +2957,7 @@ class TestMachinesAPI(APITestCase.ForUser):
accessible_by_url.return_value = rack
add_chassis = self.patch(rack, "add_chassis")
hostname = factory.make_url()
- for chassis_type in ("powerkvm", "virsh", "vmware", "proxmox"):
+ for chassis_type in ("powerkvm", "virsh", "vmware"):
prefix_filter = factory.make_name("prefix_filter")
password = factory.make_name("password")
params = {
@@ -3052,7 +2967,7 @@ class TestMachinesAPI(APITestCase.ForUser):
"password": password,
"prefix_filter": prefix_filter,
}
- if chassis_type in {"vmware", "proxmox"}:
+ if chassis_type == "vmware":
username = factory.make_name("username")
params["username"] = username
else:
@@ -3075,9 +2990,6 @@ class TestMachinesAPI(APITestCase.ForUser):
None,
None,
None,
- None,
- None,
- False,
),
)
@@ -3174,7 +3086,6 @@ class TestMachinesAPI(APITestCase.ForUser):
"virsh",
"vmware",
"powerkvm",
- "proxmox",
):
params = {
"op": "add_chassis",
@@ -3238,9 +3149,6 @@ class TestMachinesAPI(APITestCase.ForUser):
None,
port,
None,
- None,
- None,
- False,
),
)
@@ -3358,9 +3266,6 @@ class TestMachinesAPI(APITestCase.ForUser):
None,
None,
protocol,
- None,
- None,
- False,
),
)
@@ -3431,9 +3336,6 @@ class TestMachinesAPI(APITestCase.ForUser):
None,
None,
None,
- None,
- None,
- False,
),
)
@@ -3473,9 +3375,6 @@ class TestMachinesAPI(APITestCase.ForUser):
None,
None,
None,
- None,
- None,
- False,
),
)
@@ -3536,9 +3435,6 @@ class TestMachinesAPI(APITestCase.ForUser):
None,
None,
None,
- None,
- None,
- False,
),
)
@@ -3579,9 +3475,6 @@ class TestMachinesAPI(APITestCase.ForUser):
None,
None,
None,
- None,
- None,
- False,
),
)
@@ -3651,9 +3544,6 @@ class TestMachinesAPI(APITestCase.ForUser):
None,
None,
None,
- None,
- None,
- False,
),
)
self.assertThat(
@@ -3670,9 +3560,6 @@ class TestMachinesAPI(APITestCase.ForUser):
None,
None,
None,
- None,
- None,
- False,
),
)
diff --git a/src/maasserver/deprecations.py b/src/maasserver/deprecations.py
index e7ee5d7..0f18fa5 100644
--- a/src/maasserver/deprecations.py
+++ b/src/maasserver/deprecations.py
@@ -28,25 +28,12 @@ class Deprecation:
# all known deprecation notices
-DEPRECATIONS = {
- "RSD_REMOVAL": Deprecation(
- id="MD2",
- since="2.9.3",
- description=(
- "Support for RSD pods will be dropped in the next major release."
- ),
- )
-}
+DEPRECATIONS = {}
def get_deprecations():
"""Return a list of currently active deprecation notices."""
- deprecations = []
- from maasserver.models import Pod
-
- if Pod.objects.filter(power_type="rsd").exists():
- deprecations.append(DEPRECATIONS["RSD_REMOVAL"])
- return deprecations
+ return []
def log_deprecations(logger=None):
diff --git a/src/maasserver/eventloop.py b/src/maasserver/eventloop.py
index f71814a..16019eb 100644
--- a/src/maasserver/eventloop.py
+++ b/src/maasserver/eventloop.py
@@ -41,6 +41,7 @@ from twisted.application.service import MultiService, Service
from twisted.internet import reactor
from twisted.internet.defer import DeferredList, inlineCallbacks, maybeDeferred
+from maasserver.deprecations import log_deprecations
from maasserver.utils.orm import disable_all_database_connections
from maasserver.utils.threads import deferToDatabase
from provisioningserver.prometheus.metrics import set_global_labels
@@ -65,6 +66,7 @@ def make_DatabaseTaskService():
def make_RegionControllerService(postgresListener):
from maasserver.region_controller import RegionControllerService
+ log_deprecations()
return RegionControllerService(postgresListener)
diff --git a/src/maasserver/models/interface.py b/src/maasserver/models/interface.py
index b5d9625..b7738dc 100644
--- a/src/maasserver/models/interface.py
+++ b/src/maasserver/models/interface.py
@@ -111,7 +111,7 @@ class InterfaceQueriesMixin(MAASQueriesMixin):
specifiers,
specifier_types=specifier_types,
separator=separator,
- **kwargs,
+ **kwargs
)
def _add_interface_id_query(self, current_q, op, item):
@@ -1522,10 +1522,20 @@ class Interface(CleanSave, TimestampedModel):
% (self.get_log_string(), vid, vlan.fabric.get_name())
)
- def update_neighbour(self, ip, mac, time, vid=None):
- """Updates the neighbour table for this interface."""
+ def update_neighbour(self, neighbour_json: dict):
+ """Updates the neighbour table for this interface.
+
+ Input is expected to be the neighbour JSON from the controller.
+ """
+ # Circular imports
from maasserver.models.neighbour import Neighbour
+ if self.neighbour_discovery_state is False:
+ return None
+ ip = neighbour_json["ip"]
+ mac = neighbour_json["mac"]
+ time = neighbour_json["time"]
+ vid = neighbour_json.get("vid", None)
deleted = Neighbour.objects.delete_and_log_obsolete_neighbours(
ip, mac, interface=self, vid=vid
)
@@ -1542,10 +1552,13 @@ class Interface(CleanSave, TimestampedModel):
# generated a log statement about this neighbour.
if not deleted:
maaslog.info(
- f"{self.get_log_string()}: "
- "New MAC, IP binding "
- f"observed{Neighbour.objects.get_vid_log_snippet(vid)}: "
- f"{mac}, {ip}"
+ "%s: New MAC, IP binding observed%s: %s, %s"
+ % (
+ self.get_log_string(),
+ Neighbour.objects.get_vid_log_snippet(vid),
+ mac,
+ ip,
+ )
)
else:
neighbour.time = time
diff --git a/src/maasserver/models/node.py b/src/maasserver/models/node.py
index d175d47..5d506b4 100644
--- a/src/maasserver/models/node.py
+++ b/src/maasserver/models/node.py
@@ -2253,14 +2253,11 @@ class Node(CleanSave, TimestampedModel):
# MAAS can log that they were skipped. This avoids user confusion when
# BMC detection is run previously on the node but they don't want BMC
# detection to run again.
- if skip_bmc_config or self.split_arch()[0] == "s390x":
- if self.split_arch()[0] == "s390x":
- result = "INFO: BMC detection not supported on S390X".encode()
- else:
- result = (
- "INFO: User %s (%s) has choosen to skip BMC configuration "
- "during commissioning\n" % (user.get_username(), user.id)
- ).encode()
+ if skip_bmc_config:
+ result = (
+ "INFO: User %s (%s) has choosen to skip BMC configuration "
+ "during commissioning\n" % (user.get_username(), user.id)
+ ).encode()
for script_result in commis_script_set.scriptresult_set.filter(
script__tags__contains=["bmc-config"]
):
@@ -2950,7 +2947,7 @@ class Node(CleanSave, TimestampedModel):
client_idents = pod.get_client_identifiers()
@transactional
- def _save(machine_id, pod_id, result):
+ def _save(machine_id, pod_id, hints):
from maasserver.models.bmc import Pod
machine = Machine.objects.filter(id=machine_id).first()
@@ -2966,16 +2963,9 @@ class Node(CleanSave, TimestampedModel):
machine_id=machine_id
).delete()
super(Node, machine).delete()
-
- if isinstance(result, Failure):
- maaslog.warning(
- f"{self.hostname}: Failure decomposing machine: {result.value}"
- )
- return
-
pod = Pod.objects.filter(id=pod_id).first()
if pod is not None:
- pod.sync_hints(result)
+ pod.sync_hints(hints)
maaslog.info("%s: Decomposing machine", self.hostname)
@@ -2988,10 +2978,8 @@ class Node(CleanSave, TimestampedModel):
pod_id=pod.id,
name=pod.name,
)
- d.addBoth(
- lambda result: (
- deferToDatabase(_save, self.id, pod.id, result)
- )
+ d.addCallback(
+ lambda hints: (deferToDatabase(_save, self.id, pod.id, hints))
)
d.addCallback(lambda _: request_commissioning_results(pod))
else:
@@ -4349,11 +4337,9 @@ class Node(CleanSave, TimestampedModel):
bridge_fd=bridge_fd,
)
- def claim_auto_ips(self, exclude_addresses=None, temp_expires_after=None):
+ def claim_auto_ips(self, temp_expires_after=None):
"""Assign IP addresses to all interface links set to AUTO."""
- exclude_addresses = (
- exclude_addresses.copy() if exclude_addresses else set()
- )
+ exclude_addresses = set()
allocated_ips = set()
# Query for the interfaces again here; if we use the cached
# interface_set, we could skip a newly-created bridge if it was created
@@ -4494,9 +4480,11 @@ class Node(CleanSave, TimestampedModel):
rack_interface = rack_interface.order_by("id")
rack_interface = rack_interface.first()
rack_interface.update_neighbour(
- ip_obj.ip,
- ip_result.get("mac_address"),
- time.time(),
+ {
+ "ip": ip_obj.ip,
+ "mac": ip_result.get("mac_address"),
+ "time": time.time(),
+ }
)
ip_obj.ip = None
ip_obj.temp_expires_on = None
@@ -4514,7 +4502,6 @@ class Node(CleanSave, TimestampedModel):
yield deferToDatabase(clean_expired)
allocated_ips = yield deferToDatabase(
transactional(self.claim_auto_ips),
- exclude_addresses=attempted_ips,
temp_expires_after=timedelta(minutes=5),
)
if not allocated_ips:
@@ -6736,14 +6723,8 @@ class Controller(Node):
for neighbour in neighbours:
interface = interfaces.get(neighbour["interface"], None)
if interface is not None:
+ interface.update_neighbour(neighbour)
vid = neighbour.get("vid", None)
- if interface.neighbour_discovery_state:
- interface.update_neighbour(
- neighbour["ip"],
- neighbour["mac"],
- neighbour["time"],
- vid=vid,
- )
if vid is not None:
interface.report_vid(vid)
@@ -6792,13 +6773,12 @@ class Controller(Node):
VLAN. Otherwise, creates the interfaces but does not create any
links or VLANs.
"""
+ # Avoid circular imports
from metadataserver.builtin_scripts.hooks import (
parse_interfaces_details,
update_interface_details,
)
- numa_ids_map = dict(self.numanode_set.values_list("index", "id"))
-
# Get all of the current interfaces on this node.
current_interfaces = {
interface.id: interface
@@ -6835,9 +6815,7 @@ class Controller(Node):
if interface is not None:
interface.update_discovery_state(discovery_mode, settings)
if interface.type == INTERFACE_TYPE.PHYSICAL:
- update_interface_details(
- interface, interfaces_details, numa_ids_map
- )
+ update_interface_details(interface, interfaces_details)
if interface.id in current_interfaces:
del current_interfaces[interface.id]
@@ -6994,9 +6972,6 @@ class RackController(Controller):
power_control=None,
port=None,
protocol=None,
- token_name=None,
- token_secret=None,
- verify_ssl=False,
):
self._register_request_event(
self.owner,
@@ -7017,9 +6992,6 @@ class RackController(Controller):
power_control=power_control,
port=port,
protocol=protocol,
- token_name=token_name,
- token_secret=token_secret,
- verify_ssl=verify_ssl,
)
call.wait(30)
diff --git a/src/maasserver/models/tests/test_interface.py b/src/maasserver/models/tests/test_interface.py
index ee16f56..b86dbfb 100644
--- a/src/maasserver/models/tests/test_interface.py
+++ b/src/maasserver/models/tests/test_interface.py
@@ -1363,7 +1363,7 @@ class InterfaceTest(MAASServerTestCase):
self.assertEquals(0, interface.link_speed)
-class TestInterfaceUpdateNeighbour(MAASServerTestCase):
+class InterfaceUpdateNeighbourTest(MAASServerTestCase):
"""Tests for `Interface.update_neighbour`."""
def make_neighbour_json(self, ip=None, mac=None, time=None, **kwargs):
@@ -1384,15 +1384,22 @@ class TestInterfaceUpdateNeighbour(MAASServerTestCase):
vid = None
return {"ip": ip, "mac": mac, "time": time, "vid": vid}
- def test_adds_new_neighbour(self):
+ def test_ignores_updates_if_neighbour_discovery_state_is_false(self):
iface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL)
- iface.update_neighbour(**self.make_neighbour_json())
+ iface.update_neighbour(self.make_neighbour_json())
+ self.assertThat(Neighbour.objects.count(), Equals(0))
+
+ def test_adds_new_neighbour_if_neighbour_discovery_state_is_true(self):
+ iface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL)
+ iface.neighbour_discovery_state = True
+ iface.update_neighbour(self.make_neighbour_json())
self.assertThat(Neighbour.objects.count(), Equals(1))
def test_updates_existing_neighbour(self):
iface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL)
+ iface.neighbour_discovery_state = True
json = self.make_neighbour_json()
- iface.update_neighbour(**json)
+ iface.update_neighbour(json)
neighbour = get_one(Neighbour.objects.all())
# Pretend this was updated one day ago.
yesterday = datetime.datetime.now() - datetime.timedelta(days=1)
@@ -1403,7 +1410,7 @@ class TestInterfaceUpdateNeighbour(MAASServerTestCase):
Equals(int(yesterday.timestamp())),
)
json["time"] += 1
- iface.update_neighbour(**json)
+ iface.update_neighbour(json)
neighbour = reload_object(neighbour)
self.assertThat(Neighbour.objects.count(), Equals(1))
self.assertThat(neighbour.time, Equals(json["time"]))
@@ -1415,12 +1422,13 @@ class TestInterfaceUpdateNeighbour(MAASServerTestCase):
def test_replaces_obsolete_neighbour(self):
iface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL)
+ iface.neighbour_discovery_state = True
json = self.make_neighbour_json()
- iface.update_neighbour(**json)
+ iface.update_neighbour(json)
# Have a different MAC address claim ownership of the IP.
json["time"] += 1
json["mac"] = factory.make_mac_address()
- iface.update_neighbour(**json)
+ iface.update_neighbour(json)
self.assertThat(Neighbour.objects.count(), Equals(1))
self.assertThat(
list(Neighbour.objects.all())[0].mac_address, Equals(json["mac"])
@@ -1431,8 +1439,10 @@ class TestInterfaceUpdateNeighbour(MAASServerTestCase):
def test_logs_new_binding(self):
iface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL)
+ iface.neighbour_discovery_state = True
+ json = self.make_neighbour_json()
with FakeLogger("maas.interface") as maaslog:
- iface.update_neighbour(**self.make_neighbour_json())
+ iface.update_neighbour(json)
self.assertDocTestMatches(
"...: New MAC, IP binding observed...", maaslog.output
)
@@ -1441,18 +1451,18 @@ class TestInterfaceUpdateNeighbour(MAASServerTestCase):
iface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL)
iface.neighbour_discovery_state = True
json = self.make_neighbour_json()
- iface.update_neighbour(**json)
+ iface.update_neighbour(json)
# Have a different MAC address claim ownership of the IP.
json["time"] += 1
json["mac"] = factory.make_mac_address()
with FakeLogger("maas.neighbour") as maaslog:
- iface.update_neighbour(**json)
+ iface.update_neighbour(json)
self.assertDocTestMatches(
"...: IP address...moved from...to...", maaslog.output
)
-class TestInterfaceUpdateMDNSEntry(MAASServerTestCase):
+class InterfaceUpdateMDNSEntryTest(MAASServerTestCase):
"""Tests for `Interface.update_mdns_entry`."""
def make_mdns_entry_json(self, ip=None, hostname=None):
@@ -1467,7 +1477,7 @@ class TestInterfaceUpdateMDNSEntry(MAASServerTestCase):
def test_ignores_updates_if_mdns_discovery_state_is_false(self):
iface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL)
- iface.update_mdns_entry(self.make_mdns_entry_json())
+ iface.update_neighbour(self.make_mdns_entry_json())
self.assertThat(MDNS.objects.count(), Equals(0))
def test_adds_new_entry_if_mdns_discovery_state_is_true(self):
diff --git a/src/maasserver/models/tests/test_node.py b/src/maasserver/models/tests/test_node.py
index ab07168..a5a5aa7 100644
--- a/src/maasserver/models/tests/test_node.py
+++ b/src/maasserver/models/tests/test_node.py
@@ -74,11 +74,11 @@ from maasserver.enum import (
from maasserver.exceptions import (
IPAddressCheckFailed,
NodeStateViolation,
+ PodProblem,
PowerProblem,
StaticIPAddressExhaustion,
)
from maasserver.models import (
- BMCRoutableRackControllerRelationship,
BondInterface,
BootResource,
BridgeInterface,
@@ -86,13 +86,11 @@ from maasserver.models import (
Controller,
Device,
Domain,
- Event,
EventType,
Fabric,
Interface,
LicenseKey,
Machine,
- Neighbour,
Node,
)
from maasserver.models import (
@@ -102,7 +100,6 @@ from maasserver.models import (
RAID,
RegionController,
RegionRackRPCConnection,
- ResourcePool,
Service,
StaticIPAddress,
Subnet,
@@ -111,10 +108,12 @@ from maasserver.models import (
VLANInterface,
VolumeGroup,
)
-from maasserver.models import Bcache, BMC
+from maasserver.models import Bcache
from maasserver.models import bmc as bmc_module
from maasserver.models import node as node_module
+from maasserver.models.bmc import BMC, BMCRoutableRackControllerRelationship
from maasserver.models.config import NetworkDiscoveryConfig
+from maasserver.models.event import Event
import maasserver.models.interface as interface_module
from maasserver.models.node import (
DEFAULT_BIOS_BOOT_METHOD,
@@ -124,6 +123,7 @@ from maasserver.models.node import (
PowerInfo,
)
from maasserver.models.partitiontable import PARTITION_TABLE_EXTRA_SPACE
+from maasserver.models.resourcepool import ResourcePool
from maasserver.models.signals import power as node_query
from maasserver.models.timestampedmodel import now
from maasserver.node_status import (
@@ -3471,25 +3471,6 @@ class TestNode(MAASServerTestCase):
self.assertEqual(0, script_result.exit_status)
self.assertEqual(SCRIPT_STATUS.SKIPPED, script_result.status)
- def test_start_commissioning_skip_bmc_config_s390x(self):
- script = factory.make_Script(
- script_type=SCRIPT_TYPE.COMMISSIONING, tags=["bmc-config"]
- )
- node = factory.make_Node(architecture="s390x/generic")
- admin = factory.make_admin()
- self.patch(Node, "_start").return_value = None
-
- node.start_commissioning(admin)
- post_commit_hooks.reset() # Ignore these for now.
-
- script_result = (
- node.current_commissioning_script_set.scriptresult_set.get(
- script=script
- )
- )
- self.assertEqual(0, script_result.exit_status)
- self.assertEqual(SCRIPT_STATUS.SKIPPED, script_result.status)
-
def test_start_commissioning_reverts_to_sane_state_on_error(self):
# When start_commissioning encounters an error when trying to
# start the node, it will revert the node to its previous
@@ -6087,20 +6068,16 @@ class TestDecomposeMachineTransactional(
interface = transactional(reload_object)(interface)
self.assertIsNone(interface)
- def test_delete_doesnt_fail_removal(self):
- mock_log_warning = self.patch(node_module.maaslog, "warning")
+ def test_errors_raised_up(self):
pod, machine, hints, client = self.create_pod_machine_and_hints(
creation_type=NODE_CREATION_TYPE.MANUAL
)
- client.return_value = defer.fail(PodActionFail("bang!"))
- with post_commit_hooks:
- machine.delete()
- mock_log_warning.assert_called_with(
- f"{machine.hostname}: Failure decomposing machine: "
- "Unable to decompose machine because: bang!"
- )
- # the machine is still deleted
- self.assertIsNone(transactional(reload_object)(machine))
+ client.return_value = defer.fail(PodActionFail())
+ with ExpectedException(PodProblem):
+ with post_commit_hooks:
+ machine.delete()
+ machine = transactional(reload_object)(machine)
+ self.assertIsNotNone(machine)
def test_release_deletes_dynamic_machine(self):
owner = transactional(factory.make_User)()
@@ -8717,74 +8694,6 @@ class TestNode_Start(MAASTransactionServerTestCase):
self.assertThat(auto_ip.ip, Equals(third_ip.ip))
self.assertThat(auto_ip.temp_expires_on, Is(None))
- def test_claims_auto_ip_addresses_skips_used_ip_discovery_disabled(self):
- user = factory.make_User()
- node = self.make_acquired_node_with_interface(
- user, power_type="manual"
- )
- node_interface = node.get_boot_interface()
- [auto_ip] = node_interface.ip_addresses.filter(
- alloc_type=IPADDRESS_TYPE.AUTO
- )
-
- # Create a rack controller that has an interface on the same subnet
- # as the node. Don't enable neighbour discovery
- rack = factory.make_RackController()
- rack.interface_set.all().delete()
- rackif = factory.make_Interface(vlan=node_interface.vlan, node=rack)
- rackif_ip = factory.pick_ip_in_Subnet(auto_ip.subnet)
- rackif.link_subnet(
- INTERFACE_LINK_TYPE.STATIC, auto_ip.subnet, rackif_ip
- )
-
- # Mock the rack controller connected to the region controller.
- client = Mock()
- client.ident = rack.system_id
- self.patch(node_module, "getAllClients").return_value = [client]
-
- # Must be executed in a transaction as `allocate_new` uses savepoints.
- with transaction.atomic():
- # Get two IPs and remove them so they're unknown
- ip = StaticIPAddress.objects.allocate_new(
- subnet=auto_ip.subnet, alloc_type=IPADDRESS_TYPE.AUTO
- )
- ip1 = ip.ip
- ip.delete()
- ip = StaticIPAddress.objects.allocate_new(
- subnet=auto_ip.subnet,
- alloc_type=IPADDRESS_TYPE.AUTO,
- exclude_addresses=[ip1],
- )
- ip2 = ip.ip
- ip.delete()
-
- client.side_effect = [
- defer.succeed(
- {
- "ip_addresses": [
- {
- "ip_address": ip1,
- "used": True,
- "mac_address": factory.make_mac_address(),
- }
- ]
- }
- ),
- defer.succeed(
- {"ip_addresses": [{"ip_address": ip2, "used": False}]}
- ),
- ]
-
- with post_commit_hooks:
- node.start(user)
-
- auto_ip = reload_object(auto_ip)
- self.assertThat(auto_ip.ip, Equals(ip2))
- self.assertThat(auto_ip.temp_expires_on, Is(None))
- self.assertCountEqual(
- [ip1], Neighbour.objects.values_list("ip", flat=True)
- )
-
def test_claims_auto_ip_addresses_retries_on_failure_from_rack(self):
user = factory.make_User()
node = self.make_acquired_node_with_interface(
@@ -10005,51 +9914,21 @@ class TestControllerUpdateDiscoveryState(MAASServerTestCase):
class TestReportNeighbours(MAASServerTestCase):
"""Tests for `Controller.report_neighbours()."""
- def test_no_update_neighbours_calls_if_discovery_disabled(self):
- rack = factory.make_RackController()
- factory.make_Interface(name="eth0", node=rack)
- update_neighbour = self.patch(
- interface_module.Interface, "update_neighbour"
- )
- neighbours = [
- {
- "interface": "eth0",
- "mac": factory.make_mac_address(),
- "ip": factory.make_ipv4_address(),
- "time": datetime.now(),
- },
- ]
- rack.report_neighbours(neighbours)
- update_neighbour.assert_not_called()
-
def test_calls_update_neighbour_for_each_neighbour(self):
rack = factory.make_RackController()
- if1 = factory.make_Interface(name="eth0", node=rack)
- if1.neighbour_discovery_state = True
- if1.save()
- if2 = factory.make_Interface(name="eth1", node=rack)
- if2.neighbour_discovery_state = True
- if2.save()
+ factory.make_Interface(name="eth0", node=rack)
+ factory.make_Interface(name="eth1", node=rack)
update_neighbour = self.patch(
interface_module.Interface, "update_neighbour"
)
neighbours = [
- {
- "interface": "eth0",
- "mac": factory.make_mac_address(),
- "ip": factory.make_ipv4_address(),
- "time": datetime.now(),
- },
- {
- "interface": "eth1",
- "mac": factory.make_mac_address(),
- "ip": factory.make_ipv4_address(),
- "time": datetime.now(),
- },
+ {"interface": "eth0", "mac": factory.make_mac_address()},
+ {"interface": "eth1", "mac": factory.make_mac_address()},
]
rack.report_neighbours(neighbours)
- update_neighbour.assert_has_calls(
- [call(n["ip"], n["mac"], n["time"], vid=None) for n in neighbours]
+ self.assertThat(
+ update_neighbour,
+ MockCallsMatch(*[call(neighbour) for neighbour in neighbours]),
)
def test_calls_report_vid_for_each_vid(self):
@@ -10060,23 +9939,11 @@ class TestReportNeighbours(MAASServerTestCase):
self.patch(interface_module.Interface, "update_neighbour")
report_vid = self.patch(interface_module.Interface, "report_vid")
neighbours = [
- {
- "interface": "eth0",
- "ip": factory.make_ipv4_address(),
- "time": datetime.now(),
- "mac": factory.make_mac_address(),
- "vid": 3,
- },
- {
- "interface": "eth1",
- "ip": factory.make_ipv4_address(),
- "time": datetime.now(),
- "mac": factory.make_mac_address(),
- "vid": 7,
- },
+ {"interface": "eth0", "mac": factory.make_mac_address(), "vid": 3},
+ {"interface": "eth1", "mac": factory.make_mac_address(), "vid": 7},
]
rack.report_neighbours(neighbours)
- report_vid.assert_has_calls([call(3), call(7)])
+ self.assertThat(report_vid, MockCallsMatch(call(3), call(7)))
class TestReportMDNSEntries(MAASServerTestCase):
@@ -13680,9 +13547,6 @@ class TestRackController(MAASTransactionServerTestCase):
power_control = factory.make_name("power_control")
port = random.randint(0, 65535)
given_protocol = factory.make_name("protocol")
- token_name = factory.make_name("token_name")
- token_secret = factory.make_name("token_secret")
- verify_ssl = factory.pick_bool()
rackcontroller.add_chassis(
user,
@@ -13696,9 +13560,6 @@ class TestRackController(MAASTransactionServerTestCase):
power_control,
port,
given_protocol,
- token_name,
- token_secret,
- verify_ssl,
)
self.expectThat(
@@ -13716,9 +13577,6 @@ class TestRackController(MAASTransactionServerTestCase):
power_control=power_control,
port=port,
protocol=given_protocol,
- token_name=token_name,
- token_secret=token_secret,
- verify_ssl=verify_ssl,
),
)
@@ -13742,9 +13600,6 @@ class TestRackController(MAASTransactionServerTestCase):
power_control = factory.make_name("power_control")
port = random.randint(0, 65535)
given_protocol = factory.make_name("protocol")
- token_name = factory.make_name("token_name")
- token_secret = factory.make_name("token_secret")
- verify_ssl = factory.pick_bool()
register_event = self.patch(rackcontroller, "_register_request_event")
rackcontroller.add_chassis(
@@ -13759,9 +13614,6 @@ class TestRackController(MAASTransactionServerTestCase):
power_control,
port,
given_protocol,
- token_name,
- token_secret,
- verify_ssl,
)
post_commit_hooks.reset() # Ignore these for now.
self.assertThat(
diff --git a/src/maasserver/start_up.py b/src/maasserver/start_up.py
index 12dd809..cd191fb 100644
--- a/src/maasserver/start_up.py
+++ b/src/maasserver/start_up.py
@@ -12,10 +12,7 @@ from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from maasserver import locks, security
-from maasserver.deprecations import (
- log_deprecations,
- sync_deprecation_notifications,
-)
+from maasserver.deprecations import sync_deprecation_notifications
from maasserver.fields import register_mac_type
from maasserver.models.config import Config
from maasserver.models.domain import dns_kms_setting_changed
@@ -148,8 +145,7 @@ def inner_start_up(master=False):
ident="commissioning_release_deprecated",
)
- # Log deprecations and update related notifications if needed
- log_deprecations(logger=log)
+ # Update deprecation notifications if needed
sync_deprecation_notifications()
# Refresh soon after this transaction is in.
diff --git a/src/maasserver/tests/test_deprecations.py b/src/maasserver/tests/test_deprecations.py
index 226859f..a1f0b63 100644
--- a/src/maasserver/tests/test_deprecations.py
+++ b/src/maasserver/tests/test_deprecations.py
@@ -6,23 +6,17 @@ from maasserver.deprecations import (
sync_deprecation_notifications,
)
from maasserver.models import Notification
-from maasserver.testing.factory import factory
from maasserver.testing.testcase import MAASServerTestCase
+from maastesting.testcase import MAASTestCase
from provisioningserver.logger import LegacyLogger
-class TestGetDeprecations(MAASServerTestCase):
+class TestGetDeprecations(MAASTestCase):
def test_empty(self):
self.assertEqual(get_deprecations(), [])
- def test_md2(self):
- factory.make_Pod(pod_type="rsd")
- [md2] = get_deprecations()
- self.assertEqual(md2.id, "MD2")
- self.assertEqual(md2.since, "2.9.3")
-
-class TestLogDeprecations(MAASServerTestCase):
+class TestLogDeprecations(MAASTestCase):
def test_log_deprecations(self):
self.patch(deprecations, "get_deprecations").return_value = [
Deprecation(
diff --git a/src/maasserver/tests/test_start_up.py b/src/maasserver/tests/test_start_up.py
index 80c0003..0ef1397 100644
--- a/src/maasserver/tests/test_start_up.py
+++ b/src/maasserver/tests/test_start_up.py
@@ -211,14 +211,6 @@ class TestInnerStartUp(MAASServerTestCase):
0,
)
- def test_logs_deprecation_notifications(self):
- # create a deprecated RSD pod
- factory.make_Pod(pod_type="rsd")
- mock_log = self.patch(start_up, "log")
- with post_commit_hooks:
- start_up.inner_start_up(master=True)
- mock_log.msg.assert_called_once()
-
class TestFunctions(MAASServerTestCase):
"""Tests for other functions in the `start_up` module."""
diff --git a/src/maasserver/websockets/handlers/pod.py b/src/maasserver/websockets/handlers/pod.py
index 686f9a9..bfe3676 100644
--- a/src/maasserver/websockets/handlers/pod.py
+++ b/src/maasserver/websockets/handlers/pod.py
@@ -97,37 +97,33 @@ class PodHandler(TimestampedModelHandler):
"""Add extra fields to `data`."""
if self.user.is_superuser:
data.update(obj.power_parameters)
- data.update(
- {
- "type": obj.power_type,
- "total": self.dehydrate_total(obj),
- "used": self.dehydrate_used(obj),
- "available": self.dehydrate_available(obj),
- "composed_machines_count": obj.node_set.filter(
- node_type=NODE_TYPE.MACHINE
- ).count(),
- "owners_count": (
- obj.node_set.exclude(owner=None)
- .values_list("owner")
- .distinct()
- .count()
- ),
- "hints": self.dehydrate_hints(obj.hints),
- "storage_pools": [
- self.dehydrate_storage_pool(pool)
- for pool in obj.storage_pools.all()
- ],
- "default_storage_pool": (
- obj.default_storage_pool.pool_id
- if obj.default_storage_pool
- else None
- ),
- "host": obj.host.system_id if obj.host else None,
- "numa_pinning": self.dehydrate_numa_pinning(obj),
- }
+ data["type"] = obj.power_type
+ data["total"] = self.dehydrate_total(obj)
+ data["used"] = self.dehydrate_used(obj)
+ data["available"] = self.dehydrate_available(obj)
+ data["composed_machines_count"] = obj.node_set.filter(
+ node_type=NODE_TYPE.MACHINE
+ ).count()
+ data["owners_count"] = (
+ obj.node_set.exclude(owner=None)
+ .values_list("owner")
+ .distinct()
+ .count()
)
+ data["hints"] = self.dehydrate_hints(obj.hints)
+ storage_pools = obj.storage_pools.all()
+ if len(storage_pools) > 0:
+ pools_data = []
+ for pool in storage_pools:
+ pools_data.append(self.dehydrate_storage_pool(pool))
+ data["storage_pools"] = pools_data
+ data["default_storage_pool"] = obj.default_storage_pool.pool_id
+ if obj.host is not None:
+ data["host"] = obj.host.system_id
+ else:
+ data["host"] = None
if not for_list:
- if obj.host:
+ if obj.host is not None:
data["attached_vlans"] = list(
obj.host.interface_set.all().values_list(
"vlan_id", flat=True
@@ -145,6 +141,8 @@ class PodHandler(TimestampedModelHandler):
data["attached_vlans"] = []
data["boot_vlans"] = []
+ data["numa_pinning"] = self.dehydrate_numa_pinning(obj)
+
if self.user.has_perm(PodPermission.compose, obj):
data["permissions"].append("compose")
diff --git a/src/maasserver/websockets/handlers/script.py b/src/maasserver/websockets/handlers/script.py
index c46a4f9..19db14c 100644
--- a/src/maasserver/websockets/handlers/script.py
+++ b/src/maasserver/websockets/handlers/script.py
@@ -1,14 +1,9 @@
-# Copyright 2017-2021 Canonical Ltd. This software is licensed under the
+# Copyright 2017-2019 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""The Script handler for the WebSocket connection."""
-from maasserver.permissions import NodePermission
-from maasserver.websockets.base import (
- HandlerDoesNotExistError,
- HandlerPermissionError,
-)
from maasserver.websockets.handlers.timestampedmodel import (
TimestampedModelHandler,
)
@@ -19,33 +14,5 @@ class ScriptHandler(TimestampedModelHandler):
class Meta:
queryset = Script.objects.all()
pk = "id"
- allowed_methods = [
- "delete",
- "get_script",
- "list",
- ]
+ allowed_methods = ["list"]
listen_channels = ["script"]
-
- def delete(self, params):
- script = self.get_object(params)
- if not self.user.has_perm(NodePermission.admin) or script.default:
- raise HandlerPermissionError()
- script.delete()
-
- def get_script(self, params):
- id = params.get("id")
- revision = params.get("revision")
- script = self.get_object(params)
- if not script:
- raise HandlerDoesNotExistError(
- f"Script with id({id}) does not exist!"
- )
- if revision:
- for rev in script.script.previous_versions():
- if rev.id == revision:
- return rev.data
- raise HandlerDoesNotExistError(
- f"Unable to find revision {revision} for {script.name}."
- )
- else:
- return script.script.data
diff --git a/src/maasserver/websockets/handlers/tests/test_pod.py b/src/maasserver/websockets/handlers/tests/test_pod.py
index f4e857f..6137c38 100644
--- a/src/maasserver/websockets/handlers/tests/test_pod.py
+++ b/src/maasserver/websockets/handlers/tests/test_pod.py
@@ -14,7 +14,6 @@ from twisted.internet.defer import inlineCallbacks, succeed
from maasserver.enum import INTERFACE_TYPE
from maasserver.forms import pods
from maasserver.forms.pods import PodForm
-from maasserver.models import PodStoragePool
from maasserver.models.virtualmachine import MB, VirtualMachineInterface
from maasserver.testing.factory import factory
from maasserver.testing.testcase import MAASTransactionServerTestCase
@@ -227,21 +226,6 @@ class TestPodHandler(MAASTransactionServerTestCase):
],
)
- def test_get_with_pod_host_no_storage_pools(self):
- admin = factory.make_admin()
- handler = PodHandler(admin, {}, None)
- node = factory.make_Node()
- pod = self.make_pod_with_hints(
- pod_type="lxd",
- host=node,
- )
- pod.default_storage_pool = None
- pod.save()
- PodStoragePool.objects.all().delete()
- result = handler.get({"id": pod.id})
- self.assertIsNone(result["default_storage_pool"])
- self.assertEqual(result["storage_pools"], [])
-
def test_get_host_interfaces_no_sriov(self):
admin = factory.make_admin()
handler = PodHandler(admin, {}, None)
diff --git a/src/maasserver/websockets/handlers/tests/test_script.py b/src/maasserver/websockets/handlers/tests/test_script.py
index 5c778fe..8ddf5cd 100644
--- a/src/maasserver/websockets/handlers/tests/test_script.py
+++ b/src/maasserver/websockets/handlers/tests/test_script.py
@@ -1,18 +1,12 @@
-# Copyright 2017-2021 Canonical Ltd. This software is licensed under the
+# Copyright 2017-2019 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Tests for `maasserver.websockets.handlers.script`"""
-import random
from maasserver.testing.factory import factory
from maasserver.testing.testcase import MAASServerTestCase
-from maasserver.utils.orm import reload_object
-from maasserver.websockets.base import (
- dehydrate_datetime,
- HandlerDoesNotExistError,
- HandlerPermissionError,
-)
+from maasserver.websockets.base import dehydrate_datetime
from maasserver.websockets.handlers.script import ScriptHandler
@@ -58,77 +52,3 @@ class TestScriptHandler(MAASServerTestCase):
sorted_results = sorted(handler.list({}), key=lambda i: i["id"])
for expected, real in zip(expected_scripts, sorted_results):
self.assertDictEqual(expected, real)
-
- def test_delete(self):
- script = factory.make_Script()
- admin = factory.make_admin()
- handler = ScriptHandler(admin, {}, None)
-
- handler.delete({"id": script.id})
-
- self.assertIsNone(reload_object(script))
-
- def test_delete_admin_only(self):
- script = factory.make_Script()
- user = factory.make_User()
- handler = ScriptHandler(user, {}, None)
-
- self.assertRaises(
- HandlerPermissionError, handler.delete, {"id": script.id}
- )
-
- self.assertIsNotNone(reload_object(script))
-
- def test_delete_cannot_delete_default(self):
- script = factory.make_Script(default=True)
- admin = factory.make_admin()
- handler = ScriptHandler(admin, {}, None)
-
- self.assertRaises(
- HandlerPermissionError, handler.delete, {"id": script.id}
- )
-
- self.assertIsNotNone(reload_object(script))
-
- def test_get_script(self):
- script = factory.make_Script()
- user = factory.make_User()
- handler = ScriptHandler(user, {}, None)
-
- self.assertEqual(
- script.script.data, handler.get_script({"id": script.id})
- )
-
- def test_get_script_not_found(self):
- user = factory.make_User()
- handler = ScriptHandler(user, {}, None)
-
- self.assertRaises(
- HandlerDoesNotExistError,
- handler.get_script,
- {"id": random.randint(1000, 10000)},
- )
-
- def test_get_script_revision(self):
- script = factory.make_Script()
- old_vtf = script.script
- script.script = script.script.update(factory.make_string())
- script.save()
- user = factory.make_User()
- handler = ScriptHandler(user, {}, None)
-
- self.assertEqual(
- old_vtf.data,
- handler.get_script({"id": script.id, "revision": old_vtf.id}),
- )
-
- def test_get_script_revision_not_found(self):
- script = factory.make_Script()
- user = factory.make_User()
- handler = ScriptHandler(user, {}, None)
-
- self.assertRaises(
- HandlerDoesNotExistError,
- handler.get_script,
- {"id": script.id, "revision": random.randint(1000, 10000)},
- )
diff --git a/src/maasui/src b/src/maasui/src
index 12bbc73..f98002f 160000
--- a/src/maasui/src
+++ b/src/maasui/src
@@ -1 +1 @@
-Subproject commit 12bbc73d60dff95dda2b7a0eadc2a566f473e585
+Subproject commit f98002fcee9309001cef58734047f9a054f819f3
diff --git a/src/metadataserver/builtin_scripts/commissioning_scripts/bmc_config.py b/src/metadataserver/builtin_scripts/commissioning_scripts/bmc_config.py
index 4da0512..32138f4 100755
--- a/src/metadataserver/builtin_scripts/commissioning_scripts/bmc_config.py
+++ b/src/metadataserver/builtin_scripts/commissioning_scripts/bmc_config.py
@@ -5,7 +5,7 @@
# Author: Andres Rodriguez <andres.rodriguez@xxxxxxxxxxxxx>
# Lee Trager <lee.trager@xxxxxxxxxxxxx>
#
-# Copyright (C) 2013-2021 Canonical
+# Copyright (C) 2013-2020 Canonical
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
@@ -80,13 +80,6 @@ import urllib
from paramiko.client import MissingHostKeyPolicy, SSHClient
import yaml
-# Most commands execute very quickly. A timeout is used to catch commands which
-# hang. Sometimes a hanging command can be handled, othertimes not. 3 minutes
-# is used as the timeout as some BMCs respond slowly when a large amount of
-# data is being returned. LP:1917652 was due to a slow responding BMC which
-# timed out when IPMI._bmc_get_config() was called.
-COMMAND_TIMEOUT = 60 * 3
-
def exit_skipped():
"""Write a result YAML indicating the test has been skipped."""
@@ -181,7 +174,7 @@ class IPMI(BMCConfig):
proc = run(
cmd,
stdout=PIPE,
- timeout=COMMAND_TIMEOUT,
+ timeout=60,
)
except Exception:
print(
@@ -222,7 +215,7 @@ class IPMI(BMCConfig):
"--commit",
"--key-pair=%s:%s=%s" % (section, key, value),
],
- timeout=COMMAND_TIMEOUT,
+ timeout=60,
)
# If the value was set update the cache.
if section not in self._bmc_config:
@@ -253,7 +246,7 @@ class IPMI(BMCConfig):
@staticmethod
@lru_cache(maxsize=1)
def _get_ipmi_locate_output():
- return check_output(["ipmi-locate"], timeout=COMMAND_TIMEOUT).decode()
+ return check_output(["ipmi-locate"], timeout=60).decode()
@staticmethod
@lru_cache(maxsize=1)
@@ -268,11 +261,13 @@ class IPMI(BMCConfig):
check_output(
["ipmitool", "lan", "print", i],
stderr=DEVNULL,
- timeout=COMMAND_TIMEOUT,
+ timeout=60,
).decode(),
)
except (CalledProcessError, TimeoutExpired):
pass
+ else:
+ return i
return -1, ""
def detected(self):
@@ -501,62 +496,166 @@ class IPMI(BMCConfig):
)
self._bmc_set_keys("Lan_Channel_Auth", ["SOL_Payload_Access"], "Yes")
- def _get_ipmitool_cipher_suite_ids(self):
- print(
- "INFO: Gathering supported cipher suites and current configuration..."
+ def _get_bmc_config_cipher_suite_ids(self):
+ """Return the supported IPMI cipher suite ids from bmc-config."""
+ cipher_suite_ids = {}
+ max_cipher_suite_id = 0
+ regex = re.compile(
+ r"^Maximum_Privilege_Cipher_Suite_Id_(?P<cipher_suite_id>1?\d)$"
)
- supported_cipher_suite_ids = []
- current_cipher_suite_privs = None
- _, output = self._get_ipmitool_lan_print()
+ for key, value in self._bmc_config.get(
+ "Rmcpplus_Conf_Privilege", {}
+ ).items():
+ m = regex.search(key)
+ if m:
+ cipher_suite_id = m.group("cipher_suite_id")
+ cipher_suite_ids[cipher_suite_id] = value
+ max_cipher_suite_id = max(
+ max_cipher_suite_id, int(cipher_suite_id)
+ )
+ return max_cipher_suite_id, cipher_suite_ids
- for line in output.splitlines():
- try:
- key, value = line.split(":", 1)
- except ValueError:
+ def _config_bmc_config_cipher_suite_ids(self, cipher_suite_ids):
+ # First find the most secure cipher suite id MAAS will use to
+ # communicate to the BMC with.
+ # 3 - HMAC-SHA1::HMAC-SHA1-96::AES-CBC-128
+ # 8 - HMAC-MD5::HMAC-MD5-128::AES-CBC-128
+ # 12 - HMAC-MD5::MD5-128::AES-CBC-128
+ # 17 - HMAC-SHA256::HMAC_SHA256_128::AES-CBC-128
+ # This is not in order as MAAS prefers to use the most secure cipher
+ # available.
+ for cipher_suite_id in ["17", "3", "8", "12"]:
+ if cipher_suite_id not in cipher_suite_ids:
continue
+ elif cipher_suite_ids[cipher_suite_id] != "Administrator":
+ print(
+ 'INFO: Enabling IPMI cipher suite id "%s" '
+ "for MAAS use..." % cipher_suite_id
+ )
+ try:
+ self._bmc_set(
+ "Rmcpplus_Conf_Privilege",
+ "Maximum_Privilege_Cipher_Suite_Id_%s"
+ % cipher_suite_id,
+ "Administrator",
+ )
+ except (CalledProcessError, TimeoutExpired):
+ # Some machines will show what ciphers are available
+ # but not allow their value to be changed. The ARM64
+ # machine in the MAAS CI is like this.
+ print(
+ "WARNING: Unable to enable secure IPMI cipher "
+ 'suite id "%s"!' % cipher_suite_id
+ )
+ # Try the next secure cipher
+ continue
+ self._cipher_suite_id = cipher_suite_id
+ # Enable the most secure cipher suite id and leave the
+ # other secure cipher suite ids in their current state.
+ # Most IPMI tools, such as freeipmi-tools, use cipher
+ # suite id 3 as its default. If the user has 3 enabled
+ # while 17 is available we want to keep 3 in the same
+ # state to not break other tools.
+ break
+
+ # Disable insecure IPMI cipher suites.
+ for cipher_suite_id, state in cipher_suite_ids.items():
+ if cipher_suite_id in ["17", "3", "8", "12"]:
+ continue
+ elif state != "Unused":
+ print(
+ 'INFO: Disabling insecure IPMI cipher suite id "%s"'
+ % cipher_suite_id
+ )
+ try:
+ self._bmc_set(
+ "Rmcpplus_Conf_Privilege",
+ "Maximum_Privilege_Cipher_Suite_Id_%s"
+ % cipher_suite_id,
+ "Unused",
+ )
+ except (CalledProcessError, TimeoutExpired):
+ # Some machines will show what ciphers are available
+ # but not allow their value to be changed. The ARM64
+ # machine in the MAAS CI is like this.
+ print(
+ "WARNING: Unable to disable insecure IPMI cipher "
+ 'suite id "%s"!' % cipher_suite_id
+ )
+
+ def _get_ipmitool_cipher_suite_ids(self):
+ supported_cipher_suite_ids = None
+ cipher_suite_privs = None
+ _, output = self._get_ipmitool_lan_print()
+ for line in output.splitlines():
+ key, value = line.split(":", maxsplit=1)
key = key.strip()
value = value.strip()
if key == "RMCP+ Cipher Suites":
try:
- # Some BMCs return an unordered list.
- supported_cipher_suite_ids = sorted(
- [int(i) for i in value.split(",")]
- )
+ supported_cipher_suite_ids = [
+ int(i) for i in value.split(",")
+ ]
except ValueError:
- print(
- "ERROR: ipmitool returned RMCP+ Cipher Suites with "
- "invalid characters: %s" % value,
- file=sys.stderr,
- )
- return [], None
+ return 0, [], ""
elif key == "Cipher Suite Priv Max":
- current_cipher_suite_privs = value
- if supported_cipher_suite_ids and current_cipher_suite_privs:
- break
+ cipher_suite_privs = value
- return supported_cipher_suite_ids, current_cipher_suite_privs
+ if supported_cipher_suite_ids and cipher_suite_privs:
+ return (
+ max(
+ [
+ i
+ for i in supported_cipher_suite_ids
+ if i in [17, 3, 8, 12]
+ ]
+ ),
+ supported_cipher_suite_ids,
+ cipher_suite_privs,
+ )
+ else:
+ return 0, [], ""
- def _configure_ipmitool_cipher_suite_ids(
- self, cipher_suite_id, current_suite_privs
+ def _config_ipmitool_cipher_suite_ids(
+ self,
+ max_cipher_suite_id,
+ supported_cipher_suite_ids,
+ cipher_suite_privs,
):
new_cipher_suite_privs = ""
- for i, c in enumerate(current_suite_privs):
- if i == cipher_suite_id and c != "a":
- print(
- "INFO: Enabling cipher suite %s for MAAS use..."
- % cipher_suite_id
- )
- new_cipher_suite_privs += "a"
- elif i not in [17, 3, 8, 12] and c != "X":
- print("INFO: Disabling insecure cipher suite %s..." % i)
- new_cipher_suite_privs += "X"
+ for i, v in enumerate(cipher_suite_privs):
+ if i < len(supported_cipher_suite_ids):
+ cipher_suite_id = supported_cipher_suite_ids[i]
+ if cipher_suite_id in [17, 3, 8, 12]:
+ if cipher_suite_id == max_cipher_suite_id and v != "a":
+ print(
+ 'INFO: Enabling IPMI cipher suite id "%s" '
+ "for MAAS use..." % cipher_suite_id
+ )
+ new_cipher_suite_privs += "a"
+ else:
+ new_cipher_suite_privs += v
+ else:
+ if v != "X":
+ print(
+ "INFO: Disabling insecure IPMI cipher suite id "
+ '"%s"' % cipher_suite_id
+ )
+ new_cipher_suite_privs = "X"
else:
- # Leave secure ciphers as is. Most tools default to 3 while
- # 17 is considered the most secure.
- new_cipher_suite_privs += c
+ # 15 characters are usually given even if there
+ # aren't 15 ciphers supported. Copy the current value
+ # incase there is some OEM use for them.
+ new_cipher_suite_privs += v
+
+ if cipher_suite_privs == new_cipher_suite_privs:
+ # Cipher suites are already properly configured, nothing
+ # to do.
+ self._cipher_suite_id = str(max_cipher_suite_id)
+ return
- if new_cipher_suite_privs != current_suite_privs:
- channel, _ = self._get_ipmitool_lan_print()
+ channel, _ = self._get_ipmitool_lan_print()
+ try:
check_call(
[
"ipmitool",
@@ -566,69 +665,56 @@ class IPMI(BMCConfig):
"cipher_privs",
new_cipher_suite_privs,
],
- timeout=COMMAND_TIMEOUT,
+ timeout=60,
+ )
+ except (CalledProcessError, TimeoutExpired):
+ print(
+ "WARNING: Unable to configure IPMI cipher suites with "
+ "ipmitool!"
)
- return new_cipher_suite_privs
+ else:
+ self._cipher_suite_id = str(max_cipher_suite_id)
def _config_cipher_suite_id(self):
print("INFO: Configuring IPMI cipher suite ids...")
+ # BMC firmware can be buggy and different tools surface these bugs
+ # in different ways. bmc-config works on all machines in the MAAS
+ # CI while ipmitool doesn't detect anything on the ARM64 machine.
+ # However a user has reported that ipmitool detects cipher 17 on
+ # his system while bmc-config doesn't. To make sure MAAS uses the
+ # most secure cipher suite id check both.
+ # https://discourse.maas.io/t/ipmi-cipher-suite-c17-support/3293/11
+
(
- supported_cipher_suite_ids,
- current_cipher_suite_privs,
+ bmc_config_max,
+ bmc_config_ids,
+ ) = self._get_bmc_config_cipher_suite_ids()
+ (
+ ipmitool_max,
+ ipmitool_ids,
+ ipmitool_privs,
) = self._get_ipmitool_cipher_suite_ids()
- print(
- "INFO: BMC supports the following ciphers - %s"
- % supported_cipher_suite_ids
- )
- # First find the most secure cipher suite id MAAS will use to
- # communicate to the BMC with.
- # 3 - HMAC-SHA1::HMAC-SHA1-96::AES-CBC-128
- # 8 - HMAC-MD5::HMAC-MD5-128::AES-CBC-128
- # 12 - HMAC-MD5::MD5-128::AES-CBC-128
- # 17 - HMAC-SHA256::HMAC_SHA256_128::AES-CBC-128
- # This is not in order as MAAS prefers to use the most secure cipher
- # available.
- cipher_suite_id = None
- for i in [17, 3, 8, 12]:
- if i in supported_cipher_suite_ids:
- cipher_suite_id = i
- break
- if cipher_suite_id is None:
- # Some BMC's don't allow this to be viewed or configured, such
- # as the PPC64 machine in the MAAS CI.
- print(
- "WARNING: No IPMI supported cipher suite found! "
- "MAAS will use freeipmi-tools default."
+ if bmc_config_max >= ipmitool_max:
+ self._config_bmc_config_cipher_suite_ids(bmc_config_ids)
+ else:
+ self._config_ipmitool_cipher_suite_ids(
+ ipmitool_max, ipmitool_ids, ipmitool_privs
)
- return
- print(
- "INFO: Current cipher suite configuration - %s"
- % current_cipher_suite_privs
- )
- try:
- new_cipher_suite_privs = self._configure_ipmitool_cipher_suite_ids(
- cipher_suite_id, current_cipher_suite_privs
+ if self._cipher_suite_id:
+ print(
+ 'INFO: MAAS will use IPMI cipher suite id "%s" for '
+ "BMC communication" % self._cipher_suite_id
)
- except (CalledProcessError, TimeoutExpired):
+ else:
# Some BMC's don't allow this to be viewed or configured, such
# as the PPC64 machine in the MAAS CI.
print(
- "WARNING: Unable to configure IPMI cipher suites! "
+ "WARNING: No IPMI cipher suite found! "
"MAAS will use freeipmi-tools default."
)
- else:
- print(
- "INFO: New cipher suite configuration - %s"
- % new_cipher_suite_privs
- )
- print(
- 'INFO: MAAS will use IPMI cipher suite id "%s" for '
- "BMC communication" % cipher_suite_id
- )
- self._cipher_suite_id = str(cipher_suite_id)
def _config_kg(self):
if self._kg:
@@ -801,9 +887,7 @@ class HPMoonshot(BMCConfig):
def detected(self):
try:
output = check_output(
- ["ipmitool", "raw", "06", "01"],
- timeout=COMMAND_TIMEOUT,
- stderr=DEVNULL,
+ ["ipmitool", "raw", "06", "01"], timeout=60, stderr=DEVNULL
).decode()
except Exception:
return False
@@ -815,7 +899,7 @@ class HPMoonshot(BMCConfig):
def _get_local_address(self):
output = check_output(
- ["ipmitool", "raw", "0x2c", "1", "0"], timeout=COMMAND_TIMEOUT
+ ["ipmitool", "raw", "0x2c", "1", "0"], timeout=60
).decode()
return "0x%s" % output.split()[2]
@@ -834,7 +918,7 @@ class HPMoonshot(BMCConfig):
"1",
"0",
],
- timeout=COMMAND_TIMEOUT,
+ timeout=60,
).decode()
return "0x%s" % output.split()[2]
@@ -867,7 +951,7 @@ class HPMoonshot(BMCConfig):
"print",
"2",
],
- timeout=COMMAND_TIMEOUT,
+ timeout=60,
).decode()
m = re.search(
r"IP Address\s+:\s+"
@@ -897,7 +981,7 @@ class HPMoonshot(BMCConfig):
"mcloc",
"-v",
],
- timeout=COMMAND_TIMEOUT,
+ timeout=60,
).decode()
local_chan = self._get_channel_number(local_address, output)
cartridge_chan = self._get_channel_number(node_address, output)
@@ -941,14 +1025,13 @@ class Wedge(BMCConfig):
# XXX ltrager 2020-09-16 - It would be better to get these values from
# /sys but no test system is available.
sys_manufacturer = check_output(
- ["dmidecode", "-s", "system-manufacturer"], timeout=COMMAND_TIMEOUT
+ ["dmidecode", "-s", "system-manufacturer"], timeout=60
).decode()
prod_name = check_output(
- ["dmidecode", "-s", "system-product-name"], timeout=COMMAND_TIMEOUT
+ ["dmidecode", "-s", "system-product-name"], timeout=60
).decode()
baseboard_prod_name = check_output(
- ["dmidecode", "-s", "baseboard-product-name"],
- timeout=COMMAND_TIMEOUT,
+ ["dmidecode", "-s", "baseboard-product-name"], timeout=60
).decode()
if (
(sys_manufacturer == "Intel" and prod_name == "EPGSVR")
@@ -971,8 +1054,7 @@ class Wedge(BMCConfig):
# "fe80::ff:fe00:2" is the address for the device to the internal
# BMC network.
output = check_output(
- ["ip", "-o", "a", "show", "to", "fe80::ff:fe00:2"],
- timeout=COMMAND_TIMEOUT,
+ ["ip", "-o", "a", "show", "to", "fe80::ff:fe00:2"], timeout=60
).decode()
# fe80::1 is the BMC's LLA.
return "fe80::1%%%s" % output.split()[1]
@@ -981,13 +1063,8 @@ class Wedge(BMCConfig):
def detected(self):
# First detect this is a known switch
- try:
- switch_type = self._detect_known_switch()
- except (CalledProcessError, TimeoutExpired, FileNotFoundError):
+ if not self._detect_known_switch():
return False
- else:
- if switch_type is None:
- return False
try:
# Second, lets verify if this is a known endpoint
# First try to hit the API. This would work on Wedge 100.
@@ -1014,7 +1091,7 @@ class Wedge(BMCConfig):
password=self.password,
)
_, stdout, _ = client.exec_command(
- "ip -o -4 addr show", timeout=COMMAND_TIMEOUT
+ "ip -o -4 addr show", timeout=60
)
return (
stdout.read().decode().splitlines()[1].split()[3].split("/")[0]
@@ -1114,7 +1191,7 @@ def main():
# XXX: andreserl 2013-04-09 bug=1064527: Try to detect if node
# is a Virtual Machine. If it is, do not try to detect IPMI.
try:
- check_call(["systemd-detect-virt", "-q"], timeout=COMMAND_TIMEOUT)
+ check_call(["systemd-detect-virt", "-q"], timeout=60)
except CalledProcessError:
pass
else:
@@ -1131,11 +1208,11 @@ def main():
# The IPMI modules will fail to load if loaded on unsupported
# hardware.
try:
- run(["sudo", "-E", "modprobe", module], timeout=COMMAND_TIMEOUT)
+ run(["sudo", "-E", "modprobe", module], timeout=60)
except TimeoutExpired:
pass
try:
- run(["sudo", "-E", "udevadm", "settle"], timeout=COMMAND_TIMEOUT)
+ run(["sudo", "-E", "udevadm", "settle"], timeout=60)
except TimeoutExpired:
pass
detect_and_configure(args, bmc_config_path)
diff --git a/src/metadataserver/builtin_scripts/commissioning_scripts/tests/test_bmc_config.py b/src/metadataserver/builtin_scripts/commissioning_scripts/tests/test_bmc_config.py
index 7fb5cc5..b74a6c0 100644
--- a/src/metadataserver/builtin_scripts/commissioning_scripts/tests/test_bmc_config.py
+++ b/src/metadataserver/builtin_scripts/commissioning_scripts/tests/test_bmc_config.py
@@ -1,4 +1,4 @@
-# Copyright 2020-2021 Canonical Ltd. This software is licensed under the
+# Copyright 2020 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Test bmc_config functions."""
@@ -167,7 +167,7 @@ EndSection
"--commit",
f"--key-pair={section}:{key}={value}",
],
- timeout=bmc_config.COMMAND_TIMEOUT,
+ timeout=60,
),
)
self.assertEqual({section: {key: value}}, self.ipmi._bmc_config)
@@ -201,7 +201,7 @@ EndSection
"--commit",
"--key-pair=User2:SOL_Payload_Access=Yes",
],
- timeout=bmc_config.COMMAND_TIMEOUT,
+ timeout=60,
),
)
# Verify cache has been updated
@@ -248,9 +248,7 @@ EndSection
self.assertEqual(ret, self.ipmi._get_ipmi_locate_output())
self.assertThat(
self.mock_check_output,
- MockCalledOnceWith(
- ["ipmi-locate"], timeout=bmc_config.COMMAND_TIMEOUT
- ),
+ MockCalledOnceWith(["ipmi-locate"], timeout=60),
)
def test_get_ipmitool_lan_print(self):
@@ -273,17 +271,17 @@ EndSection
call(
["ipmitool", "lan", "print", "0"],
stderr=DEVNULL,
- timeout=bmc_config.COMMAND_TIMEOUT,
+ timeout=60,
),
call(
["ipmitool", "lan", "print", "1"],
stderr=DEVNULL,
- timeout=bmc_config.COMMAND_TIMEOUT,
+ timeout=60,
),
call(
["ipmitool", "lan", "print", "2"],
stderr=DEVNULL,
- timeout=bmc_config.COMMAND_TIMEOUT,
+ timeout=60,
),
),
)
@@ -643,159 +641,178 @@ EndSection
self.assertThat(mock_bmc_set_keys, MockNotCalled())
- def test_get_ipmitool_cipher_suite_ids(self):
- supported_cipher_suite_ids = [
- i for i in range(0, 20) if factory.pick_bool()
- ]
- cipher_suite_privs = "".join(
- [
- random.choice(["X", "c", "u", "o", "a", "O"])
- for _ in range(0, 16)
- ]
- )
- ipmitool_output = (
- # Validate bmc-config ignores lines which are not key value
- # pairs.
- factory.make_string()
- + "\n"
- # Validate bmc-config ignores unknown key value pairs.
- + factory.make_string()
- + " : "
- + factory.make_string()
- + "\n"
- + "RMCP+ Cipher Suites : "
- + ",".join([str(i) for i in supported_cipher_suite_ids])
- + "\n"
- + "Cipher Suite Priv Max : "
- + cipher_suite_privs
- + "\n"
- + factory.make_string()
- + " : "
- + factory.make_string()
- + "\n"
- )
- self.patch(self.ipmi, "_get_ipmitool_lan_print").return_value = (
- random.randint(0, 10),
- ipmitool_output,
- )
+ def test_get_bmc_config_cipher_suite_ids(self):
+ self.ipmi._bmc_config = {
+ "Rmcpplus_Conf_Privilege": {
+ "Maximum_Privilege_Cipher_Suite_Id_0": "Administrator",
+ "Maximum_Privilege_Cipher_Suite_Id_17": "Unused",
+ "Maximum_Privilege_Cipher_Suite_Id_3": "Unused",
+ "Maximum_Privilege_Cipher_Suite_Id_42": "Unused",
+ "foo": "bar",
+ }
+ }
(
- detected_cipher_suite_ids,
- detected_cipher_suite_privs,
- ) = self.ipmi._get_ipmitool_cipher_suite_ids()
+ max_cipher_suite_id,
+ cipher_suite_ids,
+ ) = self.ipmi._get_bmc_config_cipher_suite_ids()
+ self.assertEqual(17, max_cipher_suite_id)
self.assertEqual(
- supported_cipher_suite_ids,
- detected_cipher_suite_ids,
- ipmitool_output,
+ {
+ "0": "Administrator",
+ "17": "Unused",
+ "3": "Unused",
+ },
+ cipher_suite_ids,
)
- self.assertEqual(
- cipher_suite_privs, detected_cipher_suite_privs, ipmitool_output
+
+ def test_config_bmc_config_cipher_suite_ids(self):
+ mock_bmc_set = self.patch(self.ipmi, "_bmc_set")
+ cipher_suite_ids = {
+ "3": "Unused",
+ "17": "Unused",
+ "0": "Administrator",
+ }
+
+ self.ipmi._config_bmc_config_cipher_suite_ids(cipher_suite_ids)
+
+ self.assertThat(
+ mock_bmc_set,
+ MockCallsMatch(
+ call(
+ "Rmcpplus_Conf_Privilege",
+ "Maximum_Privilege_Cipher_Suite_Id_17",
+ "Administrator",
+ ),
+ call(
+ "Rmcpplus_Conf_Privilege",
+ "Maximum_Privilege_Cipher_Suite_Id_0",
+ "Unused",
+ ),
+ ),
)
+ self.assertEqual("17", self.ipmi._cipher_suite_id)
+
+ def test_config_bmc_config_cipher_suite_ids_does_nothing(self):
+ mock_bmc_set = self.patch(self.ipmi, "_bmc_set")
+ cipher_suite_ids = {
+ "0": "Unused",
+ "3": "Unused",
+ "17": "Administrator",
+ }
+
+ self.ipmi._config_bmc_config_cipher_suite_ids(cipher_suite_ids)
+
+ self.assertThat(mock_bmc_set, MockNotCalled())
+ self.assertEqual("17", self.ipmi._cipher_suite_id)
- def test_get_ipmitool_cipher_suite_ids_ignores_bad_data(self):
- self.patch(self.ipmi, "_get_ipmitool_lan_print").return_value = (
- random.randint(0, 10),
- "RMCP+ Cipher Suites : abc\n",
+ def test_config_bmc_config_cipher_suite_ids_ignores_failures(self):
+ mock_bmc_set = self.patch(self.ipmi, "_bmc_set")
+ mock_bmc_set.side_effect = random.choice(
+ [
+ CalledProcessError(
+ cmd="cmd", returncode=random.randint(1, 255)
+ ),
+ TimeoutExpired(cmd="cmd", timeout=random.randint(1, 100)),
+ ]
)
+ cipher_suite_ids = {
+ "3": "Unused",
+ "17": "Unused",
+ "0": "Administrator",
+ }
- (
- detected_cipher_suite_ids,
- detected_cipher_suite_privs,
- ) = self.ipmi._get_ipmitool_cipher_suite_ids()
+ self.ipmi._config_bmc_config_cipher_suite_ids(cipher_suite_ids)
- self.assertEqual([], detected_cipher_suite_ids)
- self.assertIsNone(detected_cipher_suite_privs)
+ self.assertEqual("", self.ipmi._cipher_suite_id)
- def test_get_ipmitool_cipher_suite_ids_returns_none_when_not_found(self):
- self.patch(self.ipmi, "_get_ipmitool_lan_print").return_value = (
- random.randint(0, 10),
- factory.make_string() + " : " + factory.make_string() + "\n",
+ def test_get_ipmitool_cipher_suite_ids(self):
+ mock_get_ipmitool_lan_print = self.patch(
+ self.ipmi, "_get_ipmitool_lan_print"
+ )
+ mock_get_ipmitool_lan_print.return_value = (
+ "2",
+ "RMCP+ Cipher Suites : 0,3,17\n"
+ "Cipher Suite Priv Max : aXXXXXXXXXXXXXX\n",
)
(
- detected_cipher_suite_ids,
- detected_cipher_suite_privs,
+ max_cipher_suite_id,
+ supported_cipher_suite_ids,
+ cipher_suite_privs,
) = self.ipmi._get_ipmitool_cipher_suite_ids()
- self.assertEqual([], detected_cipher_suite_ids)
- self.assertIsNone(detected_cipher_suite_privs)
+ self.assertEqual(17, max_cipher_suite_id)
+ self.assertEqual([0, 3, 17], supported_cipher_suite_ids)
+ self.assertEqual("aXXXXXXXXXXXXXX", cipher_suite_privs)
- def test_configure_ipmitool_cipher_suite_ids(self):
- channel = random.randint(0, 10)
- self.patch(self.ipmi, "_get_ipmitool_lan_print").return_value = (
- channel,
- "",
+ def test_get_ipmitool_cipher_suite_ids_ignores_invalid(self):
+ mock_get_ipmitool_lan_print = self.patch(
+ self.ipmi, "_get_ipmitool_lan_print"
)
-
- new_cipher_suite_privs = (
- self.ipmi._configure_ipmitool_cipher_suite_ids(
- 3, "aaaXaaaaaaaaaaaaa"
- )
+ mock_get_ipmitool_lan_print.return_value = (
+ "2",
+ "RMCP+ Cipher Suites : Not Available\n"
+ "Cipher Suite Priv Max : Not Available\n",
)
- self.assertEqual("XXXaXXXXaXXXaXXXX", new_cipher_suite_privs)
- self.mock_check_call.assert_called_once_with(
- [
- "ipmitool",
- "lan",
- "set",
- channel,
- "cipher_privs",
- "XXXaXXXXaXXXaXXXX",
- ],
- timeout=bmc_config.COMMAND_TIMEOUT,
- )
+ (
+ max_cipher_suite_id,
+ supported_cipher_suite_ids,
+ cipher_suite_privs,
+ ) = self.ipmi._get_ipmitool_cipher_suite_ids()
- def test_configure_ipmitool_cipher_suite_ids_does_nothing_when_set(self):
- channel = random.randint(0, 10)
- self.patch(self.ipmi, "_get_ipmitool_lan_print").return_value = (
- channel,
- "",
- )
+ self.assertEqual(0, max_cipher_suite_id)
+ self.assertEqual([], supported_cipher_suite_ids)
+ self.assertEqual("", cipher_suite_privs)
- new_cipher_suite_privs = (
- self.ipmi._configure_ipmitool_cipher_suite_ids(
- 3, "XXXaXXXXXXXXXXXX"
- )
+ def test_config_ipmitool_cipher_suite_ids(self):
+ mock_get_ipmitool_lan_print = self.patch(
+ self.ipmi, "_get_ipmitool_lan_print"
+ )
+ mock_get_ipmitool_lan_print.return_value = (
+ "2",
+ factory.make_name("output"),
)
- self.assertEqual("XXXaXXXXXXXXXXXX", new_cipher_suite_privs)
- self.mock_check_call.assert_not_called()
+ self.ipmi._config_ipmitool_cipher_suite_ids(
+ 17, [0, 3, 17], "aXXXXXXXXXXXXXX"
+ )
- def test_config_cipher_suite_id(self):
- self.patch(self.ipmi, "_get_ipmitool_lan_print").return_value = (
- random.randint(0, 10),
- (
- "RMCP+ Cipher Suites : 0,3,17\n"
- + "Cipher Suite Priv Max : XXXaXXXXXXXXXXXX\n"
+ self.assertThat(
+ self.mock_check_call,
+ MockCalledOnceWith(
+ [
+ "ipmitool",
+ "lan",
+ "set",
+ "2",
+ "cipher_privs",
+ "XXaXXXXXXXXXXXX",
+ ],
+ timeout=60,
),
)
-
- self.ipmi._config_cipher_suite_id()
-
self.assertEqual("17", self.ipmi._cipher_suite_id)
- def test_config_cipher_suite_id_does_nothing_if_not_detected(self):
- self.patch(self.ipmi, "_get_ipmitool_lan_print").return_value = (
- random.randint(0, 10),
- "",
+ def test_config_ipmitool_cipher_suite_ids_does_nothing(self):
+ mock_get_ipmitool_lan_print = self.patch(
+ self.ipmi, "_get_ipmitool_lan_print"
+ )
+ mock_get_ipmitool_lan_print.return_value = (
+ "2",
+ factory.make_name("output"),
)
- self.ipmi._config_cipher_suite_id()
+ self.ipmi._config_ipmitool_cipher_suite_ids(
+ 17, [0, 3, 17], "XXaXXXXXXXXXXXX"
+ )
- self.mock_check_call.assert_not_called()
- self.assertEqual("", self.ipmi._cipher_suite_id)
+ self.assertThat(self.mock_check_call, MockNotCalled())
+ self.assertEqual("17", self.ipmi._cipher_suite_id)
- def test_config_cipher_suite_id_doesnt_set_id_on_error(self):
- channel = random.randint(0, 10)
- self.patch(self.ipmi, "_get_ipmitool_lan_print").return_value = (
- channel,
- (
- "RMCP+ Cipher Suites : 0,3\n"
- + "Cipher Suite Priv Max : aXXXXXXXXXXXXXXX\n"
- ),
- )
+ def test_config_ipmitool_cipher_suite_ids_ignores_errors(self):
self.mock_check_call.side_effect = random.choice(
[
CalledProcessError(
@@ -804,22 +821,101 @@ EndSection
TimeoutExpired(cmd="cmd", timeout=random.randint(1, 100)),
]
)
+ mock_get_ipmitool_lan_print = self.patch(
+ self.ipmi, "_get_ipmitool_lan_print"
+ )
+ mock_get_ipmitool_lan_print.return_value = (
+ "2",
+ factory.make_name("output"),
+ )
- self.ipmi._config_cipher_suite_id()
+ self.ipmi._config_ipmitool_cipher_suite_ids(
+ 17, [0, 3, 17], "aXXXXXXXXXXXXXX"
+ )
- self.mock_check_call.assert_called_once_with(
- [
- "ipmitool",
- "lan",
- "set",
- channel,
- "cipher_privs",
- "XXXaXXXXXXXXXXXX",
- ],
- timeout=bmc_config.COMMAND_TIMEOUT,
+ self.assertThat(
+ self.mock_check_call,
+ MockCalledOnceWith(
+ [
+ "ipmitool",
+ "lan",
+ "set",
+ "2",
+ "cipher_privs",
+ "XXaXXXXXXXXXXXX",
+ ],
+ timeout=60,
+ ),
)
self.assertEqual("", self.ipmi._cipher_suite_id)
+ def test_config_cipher_suite_id_bmc_config(self):
+ self.ipmi._bmc_config = {
+ "Rmcpplus_Conf_Privilege": {
+ "Maximum_Privilege_Cipher_Suite_Id_0": "Administrator",
+ "Maximum_Privilege_Cipher_Suite_Id_3": "Unused",
+ "Maximum_Privilege_Cipher_Suite_Id_17": "Unused",
+ }
+ }
+ mock_get_ipmitool_lan_print = self.patch(
+ self.ipmi, "_get_ipmitool_lan_print"
+ )
+ mock_get_ipmitool_lan_print.return_value = (
+ "2",
+ "RMCP+ Cipher Suites : 0,3,17\n"
+ "Cipher Suite Priv Max : aXXXXXXXXXXXXXX\n",
+ )
+ mock_config_bmc_config_cipher_suite_ids = self.patch(
+ self.ipmi, "_config_bmc_config_cipher_suite_ids"
+ )
+ mock_config_ipmitool_cipher_suite_ids = self.patch(
+ self.ipmi, "_config_ipmitool_cipher_suite_ids"
+ )
+
+ self.ipmi._config_cipher_suite_id()
+
+ self.assertThat(
+ mock_config_bmc_config_cipher_suite_ids,
+ MockCalledOnceWith(
+ {"0": "Administrator", "3": "Unused", "17": "Unused"}
+ ),
+ )
+ self.assertThat(mock_config_ipmitool_cipher_suite_ids, MockNotCalled())
+
+ def test_config_cipher_suite_id_ipmitool(self):
+ # Regression test for
+ # https://discourse.maas.io/t/ipmi-cipher-suite-c17-support/3293/11
+ self.ipmi._bmc_config = {
+ "Rmcpplus_Conf_Privilege": {
+ "Maximum_Privilege_Cipher_Suite_Id_0": "Administrator",
+ "Maximum_Privilege_Cipher_Suite_Id_3": "Unused",
+ }
+ }
+ mock_get_ipmitool_lan_print = self.patch(
+ self.ipmi, "_get_ipmitool_lan_print"
+ )
+ mock_get_ipmitool_lan_print.return_value = (
+ "2",
+ "RMCP+ Cipher Suites : 0,3,17\n"
+ "Cipher Suite Priv Max : aXXXXXXXXXXXXXX\n",
+ )
+ mock_config_bmc_config_cipher_suite_ids = self.patch(
+ self.ipmi, "_config_bmc_config_cipher_suite_ids"
+ )
+ mock_config_ipmitool_cipher_suite_ids = self.patch(
+ self.ipmi, "_config_ipmitool_cipher_suite_ids"
+ )
+
+ self.ipmi._config_cipher_suite_id()
+
+ self.assertThat(
+ mock_config_bmc_config_cipher_suite_ids, MockNotCalled()
+ )
+ self.assertThat(
+ mock_config_ipmitool_cipher_suite_ids,
+ MockCalledOnceWith(17, [0, 3, 17], "aXXXXXXXXXXXXXX"),
+ )
+
def test_config_kg_set(self):
mock_bmc_set = self.patch(self.ipmi, "_bmc_set")
kg = factory.make_name("kg")
@@ -1134,9 +1230,7 @@ class TestHPMoonshot(MAASTestCase):
self.assertThat(
self.mock_check_output,
MockCalledOnceWith(
- ["ipmitool", "raw", "06", "01"],
- timeout=bmc_config.COMMAND_TIMEOUT,
- stderr=DEVNULL,
+ ["ipmitool", "raw", "06", "01"], timeout=60, stderr=DEVNULL
),
)
@@ -1154,8 +1248,7 @@ class TestHPMoonshot(MAASTestCase):
self.assertThat(
self.mock_check_output,
MockCalledOnceWith(
- ["ipmitool", "raw", "0x2c", "1", "0"],
- timeout=bmc_config.COMMAND_TIMEOUT,
+ ["ipmitool", "raw", "0x2c", "1", "0"], timeout=60
),
)
@@ -1184,7 +1277,7 @@ class TestHPMoonshot(MAASTestCase):
"print",
"2",
],
- timeout=bmc_config.COMMAND_TIMEOUT,
+ timeout=60,
),
)
@@ -1212,7 +1305,7 @@ class TestHPMoonshot(MAASTestCase):
"print",
"2",
],
- timeout=bmc_config.COMMAND_TIMEOUT,
+ timeout=60,
),
)
@@ -1281,7 +1374,7 @@ class TestHPMoonshot(MAASTestCase):
"mcloc",
"-v",
],
- timeout=bmc_config.COMMAND_TIMEOUT,
+ timeout=60,
),
)
self.assertThat(
@@ -1342,20 +1435,6 @@ class TestWedge(MAASTestCase):
self.patch(self.wedge, "_detect_known_switch").return_value = None
self.assertFalse(self.wedge.detected())
- def test_detected_dmidecode_error(self):
- self.patch(
- self.wedge, "_detect_known_switch"
- ).side_effect = random.choice(
- [
- CalledProcessError(
- cmd="cmd", returncode=random.randint(1, 255)
- ),
- TimeoutExpired(cmd="cmd", timeout=random.randint(1, 100)),
- FileNotFoundError(),
- ]
- )
- self.assertFalse(self.wedge.detected())
-
def test_detected_rest_api(self):
self.patch(self.wedge, "_detect_known_switch").return_value = "accton"
mock_urlopen = self.patch(bmc_config.urllib.request, "urlopen")
@@ -1549,34 +1628,18 @@ class TestMain(MAASTestCase):
self.assertThat(
self.mock_check_call,
- MockCalledOnceWith(
- ["systemd-detect-virt", "-q"],
- timeout=bmc_config.COMMAND_TIMEOUT,
- ),
+ MockCalledOnceWith(["systemd-detect-virt", "-q"], timeout=60),
)
self.assertThat(
mock_run,
MockCallsMatch(
call(
- ["sudo", "-E", "modprobe", "ipmi_msghandler"],
- timeout=bmc_config.COMMAND_TIMEOUT,
- ),
- call(
- ["sudo", "-E", "modprobe", "ipmi_devintf"],
- timeout=bmc_config.COMMAND_TIMEOUT,
- ),
- call(
- ["sudo", "-E", "modprobe", "ipmi_si"],
- timeout=bmc_config.COMMAND_TIMEOUT,
- ),
- call(
- ["sudo", "-E", "modprobe", "ipmi_ssif"],
- timeout=bmc_config.COMMAND_TIMEOUT,
- ),
- call(
- ["sudo", "-E", "udevadm", "settle"],
- timeout=bmc_config.COMMAND_TIMEOUT,
+ ["sudo", "-E", "modprobe", "ipmi_msghandler"], timeout=60
),
+ call(["sudo", "-E", "modprobe", "ipmi_devintf"], timeout=60),
+ call(["sudo", "-E", "modprobe", "ipmi_si"], timeout=60),
+ call(["sudo", "-E", "modprobe", "ipmi_ssif"], timeout=60),
+ call(["sudo", "-E", "udevadm", "settle"], timeout=60),
),
)
self.assertThat(mock_detect_and_configure, MockCalledOnce())
diff --git a/src/metadataserver/builtin_scripts/hooks.py b/src/metadataserver/builtin_scripts/hooks.py
index d89dc75..96d7d12 100644
--- a/src/metadataserver/builtin_scripts/hooks.py
+++ b/src/metadataserver/builtin_scripts/hooks.py
@@ -199,12 +199,11 @@ def parse_interfaces_details(node):
return _parse_interfaces(node, details)
-def update_interface_details(interface, details, numa_ids_map):
+def update_interface_details(interface, details):
"""Update details for an existing interface from commissioning data.
- :params details: details from the _parse_interfaces call
- :params numa_ids_map: dict mapping numa node indexes to their IDs for the
- node the interface belongs to
+ This should be passed details from the _parse_interfaces call.
+
"""
iface_details = details.get(interface.mac_address)
if not iface_details:
@@ -224,19 +223,10 @@ def update_interface_details(interface, details, numa_ids_map):
setattr(interface, field, value)
update_fields.append(field)
- numa_node_idx = iface_details["numa_node"]
- if (
- interface.numa_node is None
- or interface.numa_node.index != numa_node_idx
- ):
- interface.numa_node_id = numa_ids_map[numa_node_idx]
- update_fields.append("numa_node")
-
sriov_max_vf = iface_details.get("sriov_max_vf")
if interface.sriov_max_vf != sriov_max_vf:
interface.sriov_max_vf = sriov_max_vf
update_fields.append("sriov_max_vf")
-
if update_fields:
interface.save(update_fields=["updated", *update_fields])
@@ -305,12 +295,8 @@ def update_node_network_information(node, data, numa_nodes):
# Duplicate MACs are not expected on machines, raise the
# exception so this can be handled.
raise
-
- numa_ids_map = dict(
- NUMANode.objects.filter(node=node).values_list("index", "id")
- )
-
current_interfaces = set()
+
for mac, iface in interfaces_info.items():
ifname = iface.get("name")
link_connected = iface.get("link_connected")
@@ -327,9 +313,7 @@ def update_node_network_information(node, data, numa_nodes):
if interface.node == node:
# Interface already exists on this node, so just update the NIC
# info
- update_interface_details(
- interface, interfaces_info, numa_ids_map
- )
+ update_interface_details(interface, interfaces_info)
else:
logger.warning(
"Interface with MAC %s moved from node %s to %s. "
diff --git a/src/metadataserver/builtin_scripts/tests/test_hooks.py b/src/metadataserver/builtin_scripts/tests/test_hooks.py
index bb11f5f..bac7d2a 100644
--- a/src/metadataserver/builtin_scripts/tests/test_hooks.py
+++ b/src/metadataserver/builtin_scripts/tests/test_hooks.py
@@ -1675,20 +1675,6 @@ class TestProcessLXDResults(MAASServerTestCase):
self.assertEqual(0, iface3.link_speed)
self.assertEqual(0, iface3.interface_speed)
- def test_updates_interface_numa_node(self):
- node = factory.make_Node()
- iface = factory.make_Interface(
- node=node,
- mac_address="00:00:00:00:00:01",
- )
- create_IPADDR_OUTPUT_NAME_script(node, IP_ADDR_OUTPUT)
-
- lxd_output = make_lxd_output()
- lxd_output["resources"]["network"]["cards"][0]["numa_node"] = 1
- process_lxd_results(node, json.dumps(lxd_output).encode(), 0)
- iface1 = reload_object(iface)
- self.assertEqual(iface1.numa_node.index, 1)
-
def test_ipaddr_script_before(self):
self.assertLess(
IPADDR_OUTPUT_NAME,
diff --git a/src/metadataserver/tests/test_vendor_data.py b/src/metadataserver/tests/test_vendor_data.py
index 509f49a..52ce6ee 100644
--- a/src/metadataserver/tests/test_vendor_data.py
+++ b/src/metadataserver/tests/test_vendor_data.py
@@ -1,4 +1,4 @@
-# Copyright 2016-2021 Canonical Ltd. This software is licensed under the
+# Copyright 2016-2019 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Tests for `metadataserver.vendor_data`."""
@@ -296,7 +296,6 @@ class TestGenerateRackControllerConfiguration(MAASServerTestCase):
self.assertThat(config, Contains("libvirt-daemon-system"))
self.assertThat(config, Contains("ForceCommand"))
self.assertThat(config, Contains("libvirt-clients"))
- self.assertThat(config, Not(Contains("qemu-efi-aarch64")))
# Check that a password was saved for the pod-to-be.
virsh_password_meta = NodeMetadata.objects.filter(
node=node, key="virsh_password"
@@ -331,30 +330,6 @@ class TestGenerateRackControllerConfiguration(MAASServerTestCase):
config["runcmd"], Contains(["chmod", "+x", "/etc/rc.local"])
)
self.assertThat(config["runcmd"], Contains(["/etc/rc.local"]))
- self.assertThat(config, Not(Contains("qemu-efi-aarch64")))
-
- def test_yields_configuration_when_arm64_kvm(self):
- node = factory.make_Node(
- status=NODE_STATUS.DEPLOYING,
- osystem="ubuntu",
- netboot=False,
- architecture="arm64/generic",
- )
- node.install_kvm = True
- configuration = get_vendor_data(node, None)
- config = str(dict(configuration))
- self.assertThat(config, Contains("virsh"))
- self.assertThat(config, Contains("ssh_pwauth"))
- self.assertThat(config, Contains("rbash"))
- self.assertThat(config, Contains("libvirt-daemon-system"))
- self.assertThat(config, Contains("ForceCommand"))
- self.assertThat(config, Contains("libvirt-clients"))
- self.assertThat(config, Contains("qemu-efi-aarch64"))
- # Check that a password was saved for the pod-to-be.
- virsh_password_meta = NodeMetadata.objects.filter(
- node=node, key="virsh_password"
- ).first()
- self.assertThat(virsh_password_meta.value, HasLength(32))
class TestGenerateEphemeralNetplanLockRemoval(MAASServerTestCase):
diff --git a/src/metadataserver/vendor_data.py b/src/metadataserver/vendor_data.py
index 4da5c8c..e0b5a42 100644
--- a/src/metadataserver/vendor_data.py
+++ b/src/metadataserver/vendor_data.py
@@ -1,4 +1,4 @@
-# Copyright 2016-2021 Canonical Ltd. This software is licensed under the
+# Copyright 2016-2019 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""vendor-data for cloud-init's use."""
@@ -247,10 +247,6 @@ def generate_kvm_pod_configuration(node):
},
]
packages = ["libvirt-daemon-system", "libvirt-clients"]
- # libvirt emulates UEFI on ARM64 however qemu-efi-aarch64 is only
- # a suggestion on ARM64 so cloud-init doesn't install it.
- if node.split_arch()[0] == "arm64":
- packages.append("qemu-efi-aarch64")
yield "packages", packages
diff --git a/src/provisioningserver/__main__.py b/src/provisioningserver/__main__.py
index 5f22393..6cf81d9 100644
--- a/src/provisioningserver/__main__.py
+++ b/src/provisioningserver/__main__.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python3
-# Copyright 2012-2021 Canonical Ltd. This software is licensed under the
+# Copyright 2012-2017 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Command-line interface for the MAAS provisioning component."""
@@ -7,6 +7,7 @@
import sys
from provisioningserver import security
+import provisioningserver.boot.install_grub
import provisioningserver.cluster_config_command
import provisioningserver.dns.commands.edit_named_options
import provisioningserver.dns.commands.get_named_conf
@@ -38,6 +39,7 @@ RACK_ONLY_COMMANDS = {
"check-for-shared-secret": security.CheckForSharedSecretScript,
"config": provisioningserver.cluster_config_command,
"install-shared-secret": security.InstallSharedSecretScript,
+ "install-uefi-config": provisioningserver.boot.install_grub,
"register": provisioningserver.register_command,
"support-dump": provisioningserver.support_dump,
"upgrade-cluster": provisioningserver.upgrade_cluster,
diff --git a/src/provisioningserver/boot/__init__.py b/src/provisioningserver/boot/__init__.py
index 5c4f4b6..d4b13e2 100644
--- a/src/provisioningserver/boot/__init__.py
+++ b/src/provisioningserver/boot/__init__.py
@@ -17,7 +17,6 @@ from twisted.internet.defer import inlineCallbacks, returnValue
from zope.interface import implementer
from provisioningserver.boot.tftppath import compose_image_path
-from provisioningserver.config import debug_enabled
from provisioningserver.events import EVENT_TYPES, try_send_rack_event
from provisioningserver.kernel_opts import compose_kernel_command_line
from provisioningserver.logger import get_maas_logger
@@ -419,7 +418,6 @@ class BootMethod(metaclass=ABCMeta):
"kernel_path": kernel_path,
"kernel_name": kernel_name,
"dtb_path": dtb_path,
- "debug": debug_enabled(),
}
return namespace
diff --git a/src/provisioningserver/boot/install_grub.py b/src/provisioningserver/boot/install_grub.py
new file mode 100644
index 0000000..27ebd36
--- /dev/null
+++ b/src/provisioningserver/boot/install_grub.py
@@ -0,0 +1,36 @@
+# Copyright 2014-2015 Canonical Ltd. This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+"""Install a GRUB2 pre-boot loader config for TFTP download."""
+
+
+import os
+
+from provisioningserver.config import ClusterConfiguration
+from provisioningserver.utils.fs import write_text_file
+
+CONFIG_FILE = """
+# MAAS GRUB2 pre-loader configuration file
+
+# Load based on MAC address first.
+configfile /grub/grub.cfg-${net_default_mac}
+
+# Failed to load based on MAC address.
+# Load amd64 by default, UEFI only supported by 64-bit
+configfile /grub/grub.cfg-default-amd64
+"""
+
+
+def add_arguments(parser):
+ pass
+
+
+def run(args):
+ """Install a GRUB2 pre-boot loader config into the TFTP
+ directory structure.
+ """
+ with ClusterConfiguration.open() as config:
+ if not os.path.exists(config.grub_root):
+ os.makedirs(config.grub_root)
+ destination_file = os.path.join(config.grub_root, "grub.cfg")
+ write_text_file(destination_file, CONFIG_FILE)
diff --git a/src/provisioningserver/boot/tests/test_boot.py b/src/provisioningserver/boot/tests/test_boot.py
index f5711ec..7e591ce 100644
--- a/src/provisioningserver/boot/tests/test_boot.py
+++ b/src/provisioningserver/boot/tests/test_boot.py
@@ -1,4 +1,4 @@
-# Copyright 2014-2021 Canonical Ltd. This software is licensed under the
+# Copyright 2014-2016 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Tests for `provisioningserver.boot`."""
@@ -30,7 +30,6 @@ from provisioningserver.boot.tftppath import compose_image_path
from provisioningserver.kernel_opts import compose_kernel_command_line
from provisioningserver.rpc import region
from provisioningserver.rpc.testing import MockLiveClusterToRegionRPCFixture
-from provisioningserver.testing.config import ClusterConfigurationFixture
from provisioningserver.tests.test_kernel_opts import make_kernel_parameters
from provisioningserver.utils.fs import atomic_symlink, tempdir
@@ -332,18 +331,6 @@ class TestBootMethod(MAASTestCase):
template_namespace["dtb_path"](kernel_params),
)
- def test_compose_template_namespace_include_debug(self):
- debug = factory.pick_bool()
- boot.debug_enabled.cache_clear()
- self.addClassCleanup(boot.debug_enabled.cache_clear)
- self.useFixture(ClusterConfigurationFixture(debug=debug))
- kernel_params = make_kernel_parameters()
- method = FakeBootMethod()
-
- template_namespace = method.compose_template_namespace(kernel_params)
-
- self.assertEqual(debug, template_namespace["debug"])
-
class TestGetArchiveUrl(MAASTestCase):
diff --git a/src/provisioningserver/boot/tests/test_install_grub.py b/src/provisioningserver/boot/tests/test_install_grub.py
new file mode 100644
index 0000000..6cb876f
--- /dev/null
+++ b/src/provisioningserver/boot/tests/test_install_grub.py
@@ -0,0 +1,29 @@
+# Copyright 2014-2016 Canonical Ltd. This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+"""Tests for the install_grub command."""
+
+
+import os.path
+
+from testtools.matchers import FileExists
+
+from maastesting.factory import factory
+from maastesting.testcase import MAASTestCase
+import provisioningserver.boot.install_grub
+from provisioningserver.testing.config import ClusterConfigurationFixture
+from provisioningserver.utils.script import MainScript
+
+
+class TestInstallGrub(MAASTestCase):
+ def test_integration(self):
+ tftproot = self.make_dir()
+ self.useFixture(ClusterConfigurationFixture(tftp_root=tftproot))
+
+ action = factory.make_name("action")
+ script = MainScript(action)
+ script.register(action, provisioningserver.boot.install_grub)
+ script.execute((action,))
+
+ config_filename = os.path.join("grub", "grub.cfg")
+ self.assertThat(os.path.join(tftproot, config_filename), FileExists())
diff --git a/src/provisioningserver/boot/tests/test_uefi_amd64.py b/src/provisioningserver/boot/tests/test_uefi_amd64.py
index 886dca5..328e7dd 100644
--- a/src/provisioningserver/boot/tests/test_uefi_amd64.py
+++ b/src/provisioningserver/boot/tests/test_uefi_amd64.py
@@ -1,11 +1,10 @@
-# Copyright 2014-2021 Canonical Ltd. This software is licensed under the
+# Copyright 2014-2018 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Tests for `provisioningserver.boot.uefi_amd64`."""
import os
-import random
import re
from unittest.mock import sentinel
@@ -20,7 +19,6 @@ from testtools.matchers import (
from maastesting.factory import factory
from maastesting.matchers import FileContains, MockAnyCall, MockCalledOnce
from maastesting.testcase import MAASTestCase
-from provisioningserver import boot
from provisioningserver.boot import BytesReader
from provisioningserver.boot import uefi_amd64 as uefi_amd64_module
from provisioningserver.boot.testing import TFTPPath, TFTPPathAndComponents
@@ -31,7 +29,6 @@ from provisioningserver.boot.uefi_amd64 import (
UEFIAMD64BootMethod,
UEFIAMD64HTTPBootMethod,
)
-from provisioningserver.testing.config import ClusterConfigurationFixture
from provisioningserver.tests.test_kernel_opts import make_kernel_parameters
from provisioningserver.utils import typed
from provisioningserver.utils.fs import tempdir
@@ -71,12 +68,6 @@ class TestUEFIAMD64BootMethodRender(MAASTestCase):
"""Tests for
`provisioningserver.boot_amd64.uefi.UEFIAMD64BootMethod.render`."""
- def setUp(self):
- super().setUp()
- boot.debug_enabled.cache_clear()
- self.addClassCleanup(boot.debug_enabled.cache_clear)
- self.useFixture(ClusterConfigurationFixture(debug=False))
-
def test_get_reader(self):
# Given the right configuration options, the UEFI configuration is
# correctly rendered.
@@ -110,7 +101,7 @@ class TestUEFIAMD64BootMethodRender(MAASTestCase):
re.MULTILINE | re.DOTALL,
),
MatchesRegex(
- r".*^\s+linux %s/%s/%s .+?$"
+ r".*^\s+linuxefi %s/%s/%s .+?$"
% (
re.escape(fs_host),
re.escape(image_dir),
@@ -119,7 +110,7 @@ class TestUEFIAMD64BootMethodRender(MAASTestCase):
re.MULTILINE | re.DOTALL,
),
MatchesRegex(
- r".*^\s+initrd %s/%s/%s$"
+ r".*^\s+initrdefi %s/%s/%s$"
% (
re.escape(fs_host),
re.escape(image_dir),
@@ -135,7 +126,7 @@ class TestUEFIAMD64BootMethodRender(MAASTestCase):
method = UEFIAMD64BootMethod()
options = {
"backend": None,
- "kernel_params": make_kernel_parameters(purpose="xinstall"),
+ "kernel_params": make_kernel_parameters(purpose="install"),
}
# Capture the output before sprinking in some random options.
output_before = method.get_reader(**options).read(10000)
@@ -176,7 +167,7 @@ class TestUEFIAMD64BootMethodRender(MAASTestCase):
output,
ContainsAll(
[
- "menuentry 'Ephemeral'",
+ "menuentry 'Enlist'",
"%s/%s/%s" % (params.osystem, params.arch, params.subarch),
params.kernel,
]
@@ -194,7 +185,7 @@ class TestUEFIAMD64BootMethodRender(MAASTestCase):
output,
ContainsAll(
[
- "menuentry 'Ephemeral'",
+ "menuentry 'Commission'",
"%s/%s/%s" % (params.osystem, params.arch, params.subarch),
params.kernel,
]
@@ -270,8 +261,6 @@ class TestUEFIAMD64BootMethodRegex(MAASTestCase):
)
def test_re_config_file_does_not_match_default_grub_config_file(self):
- # The default grub.cfg is on the filesystem let the normal handler
- # grab it.
self.assertIsNone(re_config_file.match(b"grub/grub.cfg"))
def test_re_config_file_with_default(self):
@@ -304,86 +293,6 @@ class TestUEFIAMD64BootMethodRegex(MAASTestCase):
class TestUEFIAMD64BootMethod(MAASTestCase):
"""Tests `provisioningserver.boot.uefi_amd64.UEFIAMD64BootMethod`."""
- def test_match_path_none(self):
- method = UEFIAMD64BootMethod()
- backend = random.choice(["http", "tftp"])
- self.assertIsNone(
- method.match_path(backend, factory.make_string().encode())
- )
-
- def test_match_path_mac_colon(self):
- method = UEFIAMD64BootMethod()
- backend = random.choice(["http", "tftp"])
- mac = factory.make_mac_address()
- self.assertEqual(
- {"mac": mac.replace(":", "-")},
- method.match_path(backend, f"/grub/grub.cfg-{mac}".encode()),
- )
-
- def test_match_path_mac_dash(self):
- method = UEFIAMD64BootMethod()
- backend = random.choice(["http", "tftp"])
- mac = factory.make_mac_address().replace(":", "-")
- self.assertEqual(
- {"mac": mac},
- method.match_path(backend, f"/grub/grub.cfg-{mac}".encode()),
- )
-
- def test_match_path_arch(self):
- method = UEFIAMD64BootMethod()
- backend = random.choice(["http", "tftp"])
- arch = factory.make_string()
- self.assertEqual(
- {"arch": arch},
- method.match_path(
- backend, f"/grub/grub.cfg-default-{arch}".encode()
- ),
- )
-
- def test_match_path_arch_x86_64(self):
- method = UEFIAMD64BootMethod()
- backend = random.choice(["http", "tftp"])
- self.assertEqual(
- {"arch": "amd64"},
- method.match_path(backend, b"/grub/grub.cfg-default-x86_64"),
- )
-
- def test_match_path_arch_powerpc(self):
- method = UEFIAMD64BootMethod()
- backend = random.choice(["http", "tftp"])
- self.assertEqual(
- {"arch": "ppc64el"},
- method.match_path(backend, b"/grub/grub.cfg-default-powerpc"),
- )
-
- def test_match_path_arch_ppc64(self):
- method = UEFIAMD64BootMethod()
- backend = random.choice(["http", "tftp"])
- self.assertEqual(
- {"arch": "ppc64el"},
- method.match_path(backend, b"/grub/grub.cfg-default-ppc64"),
- )
-
- def test_match_path_arch_ppc64le(self):
- method = UEFIAMD64BootMethod()
- backend = random.choice(["http", "tftp"])
- self.assertEqual(
- {"arch": "ppc64el"},
- method.match_path(backend, b"/grub/grub.cfg-default-ppc64le"),
- )
-
- def test_match_path_arch_subarch(self):
- method = UEFIAMD64BootMethod()
- backend = random.choice(["http", "tftp"])
- arch = factory.make_string()
- subarch = factory.make_string()
- self.assertEqual(
- {"arch": arch, "subarch": subarch},
- method.match_path(
- backend, f"/grub/grub.cfg-default-{arch}-{subarch}".encode()
- ),
- )
-
def test_link_bootloader_creates_grub_cfg(self):
method = UEFIAMD64BootMethod()
with tempdir() as tmp:
diff --git a/src/provisioningserver/boot/uefi_amd64.py b/src/provisioningserver/boot/uefi_amd64.py
index 1399fa0..4e42c78 100644
--- a/src/provisioningserver/boot/uefi_amd64.py
+++ b/src/provisioningserver/boot/uefi_amd64.py
@@ -1,4 +1,4 @@
-# Copyright 2014-2021 Canonical Ltd. This software is licensed under the
+# Copyright 2014-2016 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""UEFI AMD64 Boot Method"""
@@ -24,11 +24,11 @@ CONFIG_FILE = dedent(
# MAAS GRUB2 pre-loader configuration file
# Load based on MAC address first.
- configfile /grub/grub.cfg-${net_default_mac}
+ configfile (pxe)/grub/grub.cfg-${net_default_mac}
- # Failed to load based on MAC address. Load based on the CPU
- # architecture.
- configfile /grub/grub.cfg-default-${grub_cpu}
+ # Failed to load based on MAC address.
+ # Load amd64 by default, UEFI only supported by 64-bit
+ configfile (pxe)/grub/grub.cfg-default-amd64
"""
)
@@ -36,7 +36,7 @@ CONFIG_FILE = dedent(
# format. Required for UEFI as GRUB2 only presents the MAC address
# in colon-seperated format.
re_mac_address_octet = r"[0-9a-f]{2}"
-re_mac_address = re.compile("[:-]".join(repeat(re_mac_address_octet, 6)))
+re_mac_address = re.compile(":".join(repeat(re_mac_address_octet, 6)))
# Match the grub/grub.cfg-* request for UEFI (aka. GRUB2)
re_config_file = r"""
@@ -48,10 +48,10 @@ re_config_file = r"""
(?P<mac>{re_mac_address.pattern}) # Capture UEFI MAC.
| # or "default"
default
- (?: # perhaps with specified arch, with a separator of '-'
+ (?: # perhaps with specified arch, with a separator of '-'
[-](?P<arch>\w+) # arch
(?:-(?P<subarch>\w+))? # optional subarch
- )?
+ )?
)
$
"""
@@ -90,14 +90,6 @@ class UEFIAMD64BootMethod(BootMethod):
if mac is not None:
params["mac"] = mac.replace(":", "-")
- # MAAS uses Debian architectures while GRUB uses standard Linux
- # architectures.
- arch = params.get("arch")
- if arch == "x86_64":
- params["arch"] = "amd64"
- elif arch in {"powerpc", "ppc64", "ppc64le"}:
- params["arch"] = "ppc64el"
-
return params
def get_reader(self, backend, kernel_params, **extra):
@@ -132,9 +124,7 @@ class UEFIAMD64BootMethod(BootMethod):
# UEFI. And so we fix it here, instead of in the common code. See
# also src/provisioningserver/kernel_opts.py.
namespace["kernel_command"] = kernel_command
- return BytesReader(
- template.substitute(namespace).strip().encode("utf-8")
- )
+ return BytesReader(template.substitute(namespace).encode("utf-8"))
def _find_and_copy_bootloaders(self, destination, log_missing=True):
if not super()._find_and_copy_bootloaders(destination, False):
diff --git a/src/provisioningserver/config.py b/src/provisioningserver/config.py
index 49f4da2..5177450 100644
--- a/src/provisioningserver/config.py
+++ b/src/provisioningserver/config.py
@@ -1,4 +1,4 @@
-# Copyright 2012-2021 Canonical Ltd. This software is licensed under the
+# Copyright 2012-2016 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Configuration for the MAAS cluster.
@@ -104,7 +104,6 @@ It can be used like so::
from contextlib import closing, contextmanager
from copy import deepcopy
-from functools import lru_cache
from itertools import islice
import json
import logging
@@ -813,10 +812,3 @@ def is_dev_environment():
return False
else:
return True
-
-
-@lru_cache(1)
-def debug_enabled():
- """Return and cache whether debug has been enabled."""
- with ClusterConfiguration.open() as config:
- return config.debug
diff --git a/src/provisioningserver/dns/tests/test_actions.py b/src/provisioningserver/dns/tests/test_actions.py
index 72c5b5a..3aae795 100644
--- a/src/provisioningserver/dns/tests/test_actions.py
+++ b/src/provisioningserver/dns/tests/test_actions.py
@@ -258,7 +258,7 @@ class TestConfiguration(MAASTestCase):
};
dnssec-validation %s;
- empty-zones-enable no;
+
allow-query { any; };
allow-recursion { trusted; };
allow-query-cache { trusted; };
@@ -267,6 +267,7 @@ class TestConfiguration(MAASTestCase):
expected_options_content %= tuple(upstream_dns) + (
expected_dnssec_validation,
)
+
self.assertThat(
expected_options_file, FileContains(expected_options_content)
)
diff --git a/src/provisioningserver/drivers/pod/tests/test_virsh.py b/src/provisioningserver/drivers/pod/tests/test_virsh.py
index efa35c8..d00e7fc 100644
--- a/src/provisioningserver/drivers/pod/tests/test_virsh.py
+++ b/src/provisioningserver/drivers/pod/tests/test_virsh.py
@@ -522,16 +522,9 @@ class TestVirshSSH(MAASTestCase):
conn.sendline(line)
return conn
- def configure_virshssh(self, results, dom_prefix=None):
- virshssh = virsh.VirshSSH(dom_prefix=dom_prefix)
- mock_run = self.patch(virshssh, "run")
- if isinstance(results, str):
- mock_run.return_value = results
- else:
- # either a single exception or a list of results/errors
- mock_run.side_effect = results
-
- return virshssh
+ def configure_virshssh(self, output, dom_prefix=None):
+ self.patch(virsh.VirshSSH, "run").return_value = output
+ return virsh.VirshSSH(dom_prefix=dom_prefix)
def test_login_prompt(self):
virsh_outputs = ["virsh # "]
@@ -644,26 +637,11 @@ class TestVirshSSH(MAASTestCase):
self.assertThat(mock_prompt, MockCalledOnceWith())
self.assertEqual("\n".join(names), output)
- def test_run_error(self):
- cmd = ["list", "--all", "--name"]
- message = "something failed"
- conn = self.configure_virshssh_pexpect()
- conn.before = "\n".join([" ".join(cmd), f"error: {message}"]).encode(
- "utf-8"
- )
- self.patch(conn, "sendline")
- self.patch(conn, "prompt")
- mock_maaslog = self.patch(virsh, "maaslog")
- error = self.assertRaises(virsh.VirshError, conn.run, cmd)
- expected_message = "Virsh command ['list', '--all', '--name'] failed: something failed"
- self.assertEqual(str(error), expected_message)
- mock_maaslog.error.assert_called_once_with(expected_message)
-
def test_get_column_values(self):
keys = ["Source", "Model"]
expected = (("br0", "e1000"), ("br1", "e1000"))
conn = self.configure_virshssh("")
- values = conn._get_column_values(SAMPLE_IFLIST, keys)
+ values = conn.get_column_values(SAMPLE_IFLIST, keys)
self.assertItemsEqual(values, expected)
def test_get_key_value(self):
@@ -674,10 +652,13 @@ class TestVirshSSH(MAASTestCase):
self.assertEquals(value, expected)
def test_create_storage_pool(self):
- conn = self.configure_virshssh("")
+ mock_run = self.patch(virsh.VirshSSH, "run")
+ mock_run.return_value = ""
+ conn = virsh.VirshSSH()
conn.create_storage_pool()
- conn.run.assert_has_calls(
- [
+ self.assertThat(
+ mock_run,
+ MockCallsMatch(
call(
[
"pool-define-as",
@@ -690,7 +671,33 @@ class TestVirshSSH(MAASTestCase):
call(["pool-build", "maas"]),
call(["pool-start", "maas"]),
call(["pool-autostart", "maas"]),
- ],
+ ),
+ )
+
+ def test_create_storage_pool_writes_maaslog_on_error(self):
+ mock_maaslog = self.patch(virsh, "maaslog")
+ mock_run = self.patch(virsh.VirshSSH, "run")
+ error_msg = "error: error message here"
+ mock_run.return_value = error_msg
+ conn = virsh.VirshSSH()
+ conn.create_storage_pool()
+ self.assertThat(
+ mock_run,
+ MockCalledOnceWith(
+ [
+ "pool-define-as",
+ "maas",
+ "dir",
+ "- - - -",
+ "/var/lib/libvirt/maas-images",
+ ]
+ ),
+ )
+ self.assertThat(
+ mock_maaslog.error,
+ MockCalledOnceWith(
+ "Failed to create Pod storage pool: %s", error_msg
+ ),
)
def test_list_machines(self):
@@ -730,7 +737,7 @@ class TestVirshSSH(MAASTestCase):
self.assertEqual(state, expected)
def test_get_machine_state_error(self):
- conn = self.configure_virshssh(virsh.VirshError("some error"))
+ conn = self.configure_virshssh("error:")
expected = conn.get_machine_state("")
self.assertEqual(None, expected)
@@ -748,7 +755,7 @@ class TestVirshSSH(MAASTestCase):
)
def test_get_machine_interface_info_error(self):
- conn = self.configure_virshssh(virsh.VirshError("some error"))
+ conn = self.configure_virshssh("error:")
expected = conn.get_machine_state("")
self.assertEqual(None, expected)
@@ -1229,23 +1236,30 @@ class TestVirshSSH(MAASTestCase):
def test_check_machine_can_startup(self):
machine = factory.make_name("machine")
conn = self.configure_virshssh("")
+ mock_run = self.patch(virsh.VirshSSH, "run")
+ mock_run.side_effect = ("", "")
conn.check_machine_can_startup(machine)
- conn.run.assert_has_calls(
- [
+ self.assertThat(
+ mock_run,
+ MockCallsMatch(
call(["start", "--paused", machine]),
call(["destroy", machine]),
- ]
+ ),
)
def test_check_machine_can_startup_raises_exception(self):
machine = factory.make_name("machine")
- conn = self.configure_virshssh([virsh.VirshError("some error"), ""])
+ conn = self.configure_virshssh("")
+ mock_run = self.patch(virsh.VirshSSH, "run")
+ mock_run.side_effect = ("error: some error", "")
mock_delete_domain = self.patch(virsh.VirshSSH, "delete_domain")
self.assertRaises(
virsh.VirshError, conn.check_machine_can_startup, machine
)
- mock_delete_domain.assert_called_once_with(machine)
- conn.run.assert_called_once_with(["start", "--paused", machine])
+ self.assertThat(mock_delete_domain, MockCalledOnceWith(machine))
+ self.assertThat(
+ mock_run, MockCalledOnceWith(["start", "--paused", machine])
+ )
def test_set_machine_autostart(self):
conn = self.configure_virshssh("")
@@ -1253,7 +1267,7 @@ class TestVirshSSH(MAASTestCase):
self.assertEqual(True, expected)
def test_set_machine_autostart_error(self):
- conn = self.configure_virshssh(virsh.VirshError("some error"))
+ conn = self.configure_virshssh("error:")
expected = conn.poweron(factory.make_name("machine"))
self.assertEqual(False, expected)
@@ -1296,7 +1310,7 @@ class TestVirshSSH(MAASTestCase):
self.assertEqual(True, expected)
def test_poweron_error(self):
- conn = self.configure_virshssh(virsh.VirshError("some error"))
+ conn = self.configure_virshssh("error:")
expected = conn.poweron(factory.make_name("machine"))
self.assertEqual(False, expected)
@@ -1306,7 +1320,7 @@ class TestVirshSSH(MAASTestCase):
self.assertEqual(True, expected)
def test_poweroff_error(self):
- conn = self.configure_virshssh(virsh.VirshError("some error"))
+ conn = self.configure_virshssh("error:")
expected = conn.poweroff(factory.make_name("machine"))
self.assertEqual(False, expected)
@@ -1504,11 +1518,11 @@ class TestVirshSSH(MAASTestCase):
self.assertIsNone(conn.create_local_volume(random.randint(1000, 2000)))
def test_create_local_volume_returns_tagged_pool_and_volume(self):
+ conn = self.configure_virshssh("")
tagged_pools = ["pool1", "pool2"]
- conn = self.configure_virshssh(
- (SAMPLE_POOLINFO_FULL, SAMPLE_POOLINFO, None)
- )
- self.patch(conn, "list_pools").return_value = tagged_pools
+ self.patch(virsh.VirshSSH, "list_pools").return_value = tagged_pools
+ mock_run = self.patch(virsh.VirshSSH, "run")
+ mock_run.side_effect = (SAMPLE_POOLINFO_FULL, SAMPLE_POOLINFO, None)
disk = RequestedMachineBlockDevice(size=4096, tags=tagged_pools)
used_pool, _ = conn.create_local_volume(disk)
self.assertEqual(tagged_pools[1], used_pool)
@@ -1521,21 +1535,25 @@ class TestVirshSSH(MAASTestCase):
pool_type,
pool,
)
+ mock_run = self.patch(virsh.VirshSSH, "run")
disk = RequestedMachineBlockDevice(
size=random.randint(1000, 2000), tags=[]
)
used_pool, volume_name = conn.create_local_volume(disk)
- conn.run.assert_called_once_with(
- [
- "vol-create-as",
- used_pool,
- volume_name,
- str(disk.size),
- "--allocation",
- "0",
- "--format",
- "raw",
- ]
+ self.assertThat(
+ mock_run,
+ MockCalledOnceWith(
+ [
+ "vol-create-as",
+ used_pool,
+ volume_name,
+ str(disk.size),
+ "--allocation",
+ "0",
+ "--format",
+ "raw",
+ ]
+ ),
)
self.assertEqual(pool, used_pool)
self.assertIsNotNone(volume_name)
@@ -1547,19 +1565,23 @@ class TestVirshSSH(MAASTestCase):
"logical",
pool,
)
+ mock_run = self.patch(virsh.VirshSSH, "run")
disk = RequestedMachineBlockDevice(
size=random.randint(1000, 2000), tags=[]
)
used_pool, volume_name = conn.create_local_volume(disk)
- conn.run.assert_called_once_with(
- [
- "vol-create-as",
- used_pool,
- volume_name,
- str(disk.size),
- "--format",
- "raw",
- ]
+ self.assertThat(
+ mock_run,
+ MockCalledOnceWith(
+ [
+ "vol-create-as",
+ used_pool,
+ volume_name,
+ str(disk.size),
+ "--format",
+ "raw",
+ ]
+ ),
)
self.assertEqual(pool, used_pool)
self.assertIsNotNone(volume_name)
@@ -1571,22 +1593,26 @@ class TestVirshSSH(MAASTestCase):
"zfs",
pool,
)
+ mock_run = self.patch(virsh.VirshSSH, "run")
disk = RequestedMachineBlockDevice(
size=random.randint(1000, 2000), tags=[]
)
used_pool, volume_name = conn.create_local_volume(disk)
size = int(floor(disk.size / 2 ** 20)) * 2 ** 20
- conn.run.assert_called_once_with(
- [
- "vol-create-as",
- used_pool,
- volume_name,
- str(size),
- "--allocation",
- "0",
- "--format",
- "raw",
- ]
+ self.assertThat(
+ mock_run,
+ MockCalledOnceWith(
+ [
+ "vol-create-as",
+ used_pool,
+ volume_name,
+ str(size),
+ "--allocation",
+ "0",
+ "--format",
+ "raw",
+ ]
+ ),
)
self.assertEqual(pool, used_pool)
self.assertIsNotNone(volume_name)
@@ -1595,19 +1621,24 @@ class TestVirshSSH(MAASTestCase):
conn = self.configure_virshssh("")
pool = factory.make_name("pool")
volume_name = factory.make_name("volume")
+ mock_run = self.patch(virsh.VirshSSH, "run")
conn.delete_local_volume(pool, volume_name)
- conn.run.assert_called_once_with(
- ["vol-delete", volume_name, "--pool", pool]
+ self.assertThat(
+ mock_run,
+ MockCalledOnceWith(["vol-delete", volume_name, "--pool", pool]),
)
def test_get_volume_path(self):
+ conn = self.configure_virshssh("")
pool = factory.make_name("pool")
volume_name = factory.make_name("volume")
volume_path = factory.make_name("path")
- conn = self.configure_virshssh(volume_path)
+ mock_run = self.patch(virsh.VirshSSH, "run")
+ mock_run.return_value = " %s " % volume_path
self.assertEqual(volume_path, conn.get_volume_path(pool, volume_name))
- conn.run.assert_called_once_with(
- ["vol-path", volume_name, "--pool", pool]
+ self.assertThat(
+ mock_run,
+ MockCalledOnceWith(["vol-path", volume_name, "--pool", pool]),
)
def test_attach_local_volume(self):
@@ -1618,24 +1649,28 @@ class TestVirshSSH(MAASTestCase):
volume_path = factory.make_name("/some/path/to_vol_serial")
serial = os.path.basename(volume_path)
device_name = factory.make_name("device")
+ mock_run = self.patch(virsh.VirshSSH, "run")
self.patch(
virsh.VirshSSH, "get_volume_path"
).return_value = volume_path
conn.attach_local_volume(domain, pool, volume_name, device_name)
- conn.run.assert_called_once_with(
- [
- "attach-disk",
- domain,
- volume_path,
- device_name,
- "--targetbus",
- "virtio",
- "--sourcetype",
- "file",
- "--config",
- "--serial",
- serial,
- ]
+ self.assertThat(
+ mock_run,
+ MockCalledOnceWith(
+ [
+ "attach-disk",
+ domain,
+ volume_path,
+ device_name,
+ "--targetbus",
+ "virtio",
+ "--sourcetype",
+ "file",
+ "--config",
+ "--serial",
+ serial,
+ ]
+ ),
)
def test_get_networks_list(self):
@@ -1820,22 +1855,26 @@ class TestVirshSSH(MAASTestCase):
network,
InterfaceAttachType.NETWORK,
)
+ mock_run = self.patch(virsh.VirshSSH, "run")
fake_mac = factory.make_mac_address()
interface = RequestedMachineInterface()
self.patch(virsh, "generate_mac_address").return_value = fake_mac
conn.attach_interface(request, interface, domain)
- conn.run.assert_called_once_with(
- [
- "attach-interface",
- domain,
- "network",
- network,
- "--mac",
- fake_mac,
- "--model",
- "virtio",
- "--config",
- ]
+ self.assertThat(
+ mock_run,
+ MockCalledOnceWith(
+ [
+ "attach-interface",
+ domain,
+ "network",
+ network,
+ "--mac",
+ fake_mac,
+ "--model",
+ "virtio",
+ "--config",
+ ]
+ ),
)
def test_attach_interface_calls_attaches_network(self):
@@ -1851,30 +1890,35 @@ class TestVirshSSH(MAASTestCase):
network,
InterfaceAttachType.NETWORK,
)
+ mock_run = self.patch(virsh.VirshSSH, "run")
fake_mac = factory.make_mac_address()
interface = RequestedMachineInterface(
attach_name=network, attach_type="network"
)
self.patch(virsh, "generate_mac_address").return_value = fake_mac
conn.attach_interface(request, interface, domain)
- conn.run.assert_called_once_with(
- [
- "attach-interface",
- domain,
- "network",
- network,
- "--mac",
- fake_mac,
- "--model",
- "virtio",
- "--config",
- ]
+ self.assertThat(
+ mock_run,
+ MockCalledOnceWith(
+ [
+ "attach-interface",
+ domain,
+ "network",
+ network,
+ "--mac",
+ fake_mac,
+ "--model",
+ "virtio",
+ "--config",
+ ]
+ ),
)
def test_attach_interface_attaches_macvlan(self):
conn = self.configure_virshssh("")
request = make_requested_machine()
domain = factory.make_name("domain")
+ mock_run = self.patch(virsh.VirshSSH, "run")
fake_mac = factory.make_mac_address()
interface = RequestedMachineInterface(
attach_name=factory.make_name("name"),
@@ -1893,24 +1937,31 @@ class TestVirshSSH(MAASTestCase):
"attach_name": interface.attach_name,
"attach_options": interface.attach_options,
}
- conn.run.assert_called_once_with(
- ["attach-device", domain, ANY, "--config"]
+ self.assertThat(
+ mock_run,
+ MockCalledOnceWith(["attach-device", domain, ANY, "--config"]),
)
- tmpfile.write.assert_has_calls(
- [
+ self.assertThat(NamedTemporaryFile, MockCalledOnceWith())
+ self.assertThat(tmpfile.__enter__, MockCalledOnceWith())
+ self.assertThat(
+ tmpfile.write,
+ MockCallsMatch(
call(
DOM_TEMPLATE_MACVLAN_INTERFACE.format(
**device_params
).encode("utf-8")
),
call(b"\n"),
- ]
+ ),
)
+ self.assertThat(tmpfile.flush, MockCalledOnceWith())
+ self.assertThat(tmpfile.__exit__, MockCalledOnceWith(None, None, None))
def test_attach_interface_attaches_bridge(self):
conn = self.configure_virshssh("")
request = make_requested_machine()
domain = factory.make_name("domain")
+ mock_run = self.patch(virsh.VirshSSH, "run")
fake_mac = factory.make_mac_address()
interface = RequestedMachineInterface(
attach_name=factory.make_name("ifname"),
@@ -1928,19 +1979,25 @@ class TestVirshSSH(MAASTestCase):
"mac_address": fake_mac,
"attach_name": interface.attach_name,
}
- conn.run.assert_called_once_with(
- ["attach-device", domain, ANY, "--config"]
+ self.assertThat(
+ mock_run,
+ MockCalledOnceWith(["attach-device", domain, ANY, "--config"]),
)
- tmpfile.write.assert_has_calls(
- [
+ self.assertThat(NamedTemporaryFile, MockCalledOnceWith())
+ self.assertThat(tmpfile.__enter__, MockCalledOnceWith())
+ self.assertThat(
+ tmpfile.write,
+ MockCallsMatch(
call(
DOM_TEMPLATE_BRIDGE_INTERFACE.format(
**device_params
).encode("utf-8")
),
call(b"\n"),
- ]
+ ),
)
+ self.assertThat(tmpfile.flush, MockCalledOnceWith())
+ self.assertThat(tmpfile.__exit__, MockCalledOnceWith(None, None, None))
def test_get_domain_capabilities_for_kvm(self):
conn = self.configure_virshssh(SAMPLE_CAPABILITY_KVM)
@@ -1950,19 +2007,18 @@ class TestVirshSSH(MAASTestCase):
)
def test_get_domain_capabilities_for_qemu(self):
- conn = self.configure_virshssh(
- (
- virsh.VirshError("message for virsh"),
- SAMPLE_CAPABILITY_QEMU,
- )
- )
+ conn = self.configure_virshssh("")
+ self.patch(virsh.VirshSSH, "run").side_effect = [
+ "error: message from virsh",
+ SAMPLE_CAPABILITY_QEMU,
+ ]
self.assertEqual(
{"type": "qemu", "emulator": "/usr/bin/qemu-system-x86_64"},
conn.get_domain_capabilities(),
)
def test_get_domain_capabilities_raises_error(self):
- conn = self.configure_virshssh(virsh.VirshError("some error"))
+ conn = self.configure_virshssh("error: some error")
self.assertRaises(virsh.VirshError, conn.get_domain_capabilities)
def test_cleanup_disks_deletes_all(self):
@@ -2065,6 +2121,7 @@ class TestVirshSSH(MAASTestCase):
tmpfile = NamedTemporaryFile.return_value
tmpfile.__enter__.return_value = tmpfile
tmpfile.name = factory.make_name("filename")
+ mock_run = self.patch(virsh.VirshSSH, "run")
mock_attach_disk = self.patch(virsh.VirshSSH, "attach_local_volume")
mock_attach_nic = self.patch(virsh.VirshSSH, "attach_interface")
mock_check_machine_can_startup = self.patch(
@@ -2078,15 +2135,20 @@ class TestVirshSSH(MAASTestCase):
mock_discovered.return_value = sentinel.discovered
observed = conn.create_domain(request)
- tmpfile.write.assert_has_calls(
- [
+ self.assertThat(NamedTemporaryFile, MockCalledOnceWith())
+ self.assertThat(tmpfile.__enter__, MockCalledOnceWith())
+ self.assertThat(
+ tmpfile.write,
+ MockCallsMatch(
call(
DOM_TEMPLATE_AMD64.format(**domain_params).encode("utf-8")
),
call(b"\n"),
- ]
+ ),
)
- conn.run.assert_called_once_with(["define", ANY])
+ self.assertThat(tmpfile.flush, MockCalledOnceWith())
+ self.assertThat(tmpfile.__exit__, MockCalledOnceWith(None, None, None))
+ self.assertThat(mock_run, MockCalledOnceWith(["define", ANY]))
self.assertThat(
mock_attach_disk,
MockCalledOnceWith(ANY, disk_info[0], disk_info[1], "vda"),
@@ -2137,6 +2199,7 @@ class TestVirshSSH(MAASTestCase):
tmpfile = NamedTemporaryFile.return_value
tmpfile.__enter__.return_value = tmpfile
tmpfile.name = factory.make_name("filename")
+ mock_run = self.patch(virsh.VirshSSH, "run")
mock_attach_disk = self.patch(virsh.VirshSSH, "attach_local_volume")
mock_attach_nic = self.patch(virsh.VirshSSH, "attach_interface")
mock_check_machine_can_startup = self.patch(
@@ -2150,26 +2213,39 @@ class TestVirshSSH(MAASTestCase):
mock_discovered.return_value = sentinel.discovered
observed = conn.create_domain(request)
- tmpfile.write.assert_has_calls(
- [
+ self.assertThat(NamedTemporaryFile, MockCalledOnceWith())
+ self.assertThat(tmpfile.__enter__, MockCalledOnceWith())
+ self.assertThat(
+ tmpfile.write,
+ MockCallsMatch(
call(
DOM_TEMPLATE_ARM64.format(**domain_params).encode("utf-8")
),
call(b"\n"),
- ]
+ ),
+ )
+ self.assertThat(tmpfile.flush, MockCalledOnceWith())
+ self.assertThat(tmpfile.__exit__, MockCalledOnceWith(None, None, None))
+ self.assertThat(mock_run, MockCalledOnceWith(["define", ANY]))
+ self.assertThat(
+ mock_attach_disk,
+ MockCalledOnceWith(ANY, disk_info[0], disk_info[1], "vda"),
)
- conn.run.asert_called_once_with(["define", ANY])
- mock_attach_disk.assert_called_once_with(
- ANY, disk_info[0], disk_info[1], "vda"
+ self.assertThat(mock_attach_nic, MockCalledOnceWith(request, ANY, ANY))
+ self.assertThat(
+ mock_check_machine_can_startup,
+ MockCalledOnceWith(request.hostname),
+ )
+ self.assertThat(
+ mock_set_machine_autostart, MockCalledOnceWith(request.hostname)
+ )
+ self.assertThat(
+ mock_configure_pxe, MockCalledOnceWith(request.hostname)
)
- mock_attach_nic.assert_called_once_with(request, ANY, ANY)
- mock_check_machine_can_startup.assert_called_once_with(
- request.hostname
+ self.assertThat(
+ mock_discovered, MockCalledOnceWith(ANY, request=request)
)
- mock_set_machine_autostart.assert_called_once_with(request.hostname)
- mock_configure_pxe.assert_called_once_with(request.hostname)
- mock_discovered.assert_called_once_with(ANY, request=request)
- self.assertEqual(sentinel.discovered, observed)
+ self.assertEquals(sentinel.discovered, observed)
def test_create_domain_calls_correct_methods_with_ppc64_arch(self):
conn = self.configure_virshssh("")
@@ -2201,6 +2277,7 @@ class TestVirshSSH(MAASTestCase):
tmpfile = NamedTemporaryFile.return_value
tmpfile.__enter__.return_value = tmpfile
tmpfile.name = factory.make_name("filename")
+ mock_run = self.patch(virsh.VirshSSH, "run")
mock_attach_disk = self.patch(virsh.VirshSSH, "attach_local_volume")
mock_attach_nic = self.patch(virsh.VirshSSH, "attach_interface")
mock_check_machine_can_startup = self.patch(
@@ -2214,26 +2291,39 @@ class TestVirshSSH(MAASTestCase):
mock_discovered.return_value = sentinel.discovered
observed = conn.create_domain(request)
- tmpfile.write.assert_has_calls(
- [
+ self.assertThat(NamedTemporaryFile, MockCalledOnceWith())
+ self.assertThat(tmpfile.__enter__, MockCalledOnceWith())
+ self.assertThat(
+ tmpfile.write,
+ MockCallsMatch(
call(
DOM_TEMPLATE_PPC64.format(**domain_params).encode("utf-8")
),
call(b"\n"),
- ],
+ ),
+ )
+ self.assertThat(tmpfile.flush, MockCalledOnceWith())
+ self.assertThat(tmpfile.__exit__, MockCalledOnceWith(None, None, None))
+ self.assertThat(mock_run, MockCalledOnceWith(["define", ANY]))
+ self.assertThat(
+ mock_attach_disk,
+ MockCalledOnceWith(ANY, disk_info[0], disk_info[1], "vda"),
+ )
+ self.assertThat(mock_attach_nic, MockCalledOnceWith(request, ANY, ANY))
+ self.assertThat(
+ mock_check_machine_can_startup,
+ MockCalledOnceWith(request.hostname),
+ )
+ self.assertThat(
+ mock_set_machine_autostart, MockCalledOnceWith(request.hostname)
)
- conn.run.assert_called_once_with(["define", ANY])
- mock_attach_disk.assert_called_once_with(
- ANY, disk_info[0], disk_info[1], "vda"
+ self.assertThat(
+ mock_configure_pxe, MockCalledOnceWith(request.hostname)
)
- mock_attach_nic.assert_called_once_with(request, ANY, ANY)
- mock_check_machine_can_startup.assert_called_once_with(
- request.hostname
+ self.assertThat(
+ mock_discovered, MockCalledOnceWith(ANY, request=request)
)
- mock_set_machine_autostart.assert_called_once_with(request.hostname)
- mock_configure_pxe.assert_called_once_with(request.hostname)
- mock_discovered.assert_called_once_with(ANY, request=request)
- self.assertEqual(sentinel.discovered, observed)
+ self.assertEquals(sentinel.discovered, observed)
def test_create_domain_calls_correct_methods_with_s390x_arch(self):
conn = self.configure_virshssh("")
@@ -2265,6 +2355,7 @@ class TestVirshSSH(MAASTestCase):
tmpfile = NamedTemporaryFile.return_value
tmpfile.__enter__.return_value = tmpfile
tmpfile.name = factory.make_name("filename")
+ mock_run = self.patch(virsh.VirshSSH, "run")
mock_attach_disk = self.patch(virsh.VirshSSH, "attach_local_volume")
mock_attach_nic = self.patch(virsh.VirshSSH, "attach_interface")
mock_check_machine_can_startup = self.patch(
@@ -2278,34 +2369,49 @@ class TestVirshSSH(MAASTestCase):
mock_discovered.return_value = sentinel.discovered
observed = conn.create_domain(request)
- tmpfile.write.assert_has_calls(
- [
+ self.assertThat(NamedTemporaryFile, MockCalledOnceWith())
+ self.assertThat(tmpfile.__enter__, MockCalledOnceWith())
+ self.assertThat(
+ tmpfile.write,
+ MockCallsMatch(
call(
DOM_TEMPLATE_S390X.format(**domain_params).encode("utf-8")
),
call(b"\n"),
- ]
+ ),
)
- conn.run.assert_called_once_with(["define", ANY])
- mock_attach_disk.assert_called_once_with(
- ANY, disk_info[0], disk_info[1], "vda"
+ self.assertThat(tmpfile.flush, MockCalledOnceWith())
+ self.assertThat(tmpfile.__exit__, MockCalledOnceWith(None, None, None))
+ self.assertThat(mock_run, MockCalledOnceWith(["define", ANY]))
+ self.assertThat(
+ mock_attach_disk,
+ MockCalledOnceWith(ANY, disk_info[0], disk_info[1], "vda"),
+ )
+ self.assertThat(mock_attach_nic, MockCalledOnceWith(request, ANY, ANY))
+ self.assertThat(
+ mock_check_machine_can_startup,
+ MockCalledOnceWith(request.hostname),
+ )
+ self.assertThat(
+ mock_set_machine_autostart, MockCalledOnceWith(request.hostname)
)
- mock_attach_nic.assert_called_once_with(request, ANY, ANY)
- mock_check_machine_can_startup.assert_called_once_with(
- request.hostname
+ self.assertThat(
+ mock_configure_pxe, MockCalledOnceWith(request.hostname)
)
- mock_set_machine_autostart.assert_called_once_with(request.hostname)
- mock_configure_pxe.assert_called_once_with(request.hostname)
- mock_discovered.assert_called_once_with(ANY, request=request)
- self.assertEqual(sentinel.discovered, observed)
+ self.assertThat(
+ mock_discovered, MockCalledOnceWith(ANY, request=request)
+ )
+ self.assertEquals(sentinel.discovered, observed)
def test_delete_domain_calls_correct_methods(self):
conn = self.configure_virshssh("")
+ mock_run = self.patch(virsh.VirshSSH, "run")
domain = factory.make_name("vm")
conn.delete_domain(domain)
- conn.run.assert_has_calls(
- [
- call(["destroy", domain], raise_error=False),
+ self.assertThat(
+ mock_run,
+ MockCallsMatch(
+ call(["destroy", domain]),
call(
[
"undefine",
@@ -2313,10 +2419,9 @@ class TestVirshSSH(MAASTestCase):
"--remove-all-storage",
"--managed-save",
"--nvram",
- ],
- raise_error=False,
+ ]
),
- ]
+ ),
)
diff --git a/src/provisioningserver/drivers/pod/virsh.py b/src/provisioningserver/drivers/pod/virsh.py
index 3a19839..3b86bb9 100644
--- a/src/provisioningserver/drivers/pod/virsh.py
+++ b/src/provisioningserver/drivers/pod/virsh.py
@@ -4,12 +4,12 @@
"""Virsh pod driver."""
+from collections import namedtuple
from math import floor
import os
import string
from tempfile import NamedTemporaryFile
from textwrap import dedent
-from typing import NamedTuple
from urllib.parse import urlparse
from uuid import uuid4
@@ -239,13 +239,7 @@ DOM_TEMPLATE_S390X = dedent(
"""
)
-
-class InterfaceInfo(NamedTuple):
- type: str
- source: str
- model: str
- mac: str
-
+InterfaceInfo = namedtuple("InterfaceInfo", ("type", "source", "model", "mac"))
REQUIRED_PACKAGES = [
["virsh", "libvirt-clients"],
@@ -316,13 +310,15 @@ class VirshSSH(pexpect.spawn):
def _execute(self, poweraddr):
"""Spawns the pexpect command."""
- self._spawn(f"virsh --connect {poweraddr}")
+ cmd = "virsh --connect %s" % poweraddr
+ self._spawn(cmd)
def get_network_xml(self, network):
- try:
- return self.run(["net-dumpxml", network])
- except VirshError:
+ output = self.run(["net-dumpxml", network]).strip()
+ if output.startswith("error:"):
+ maaslog.error("%s: Failed to get XML for network", network)
return None
+ return output
def get_machine_xml(self, machine):
# Check if we have a cached version of the XML.
@@ -332,9 +328,9 @@ class VirshSSH(pexpect.spawn):
return self.xml[machine]
# Grab the XML from virsh if we don't have it already.
- try:
- output = self.run(["dumpxml", machine])
- except VirshError:
+ output = self.run(["dumpxml", machine]).strip()
+ if output.startswith("error:"):
+ maaslog.error("%s: Failed to get XML for machine", machine)
return None
# Cache the XML, since we'll need it later to reconfigure the VM.
@@ -393,31 +389,35 @@ class VirshSSH(pexpect.spawn):
return False
return True
- def run(self, args, raise_error=True):
+ def run(self, args):
cmd = " ".join(args)
self.sendline(cmd)
self.prompt()
- output = self.before.decode("utf-8").strip()
- # remove the first line since it containes the issued command
- output = "\n".join(output.splitlines()[1:])
- if output.startswith("error:"):
- message = f"Virsh command {args} failed: {output[7:]}"
- maaslog.error(message)
- if raise_error:
- raise VirshError(message)
- return "" # return empty output if something failed
- return output
+ result = self.before.decode("utf-8").splitlines()
+ return "\n".join(result[1:])
- def _get_column_values(self, data, keys):
+ def get_column_values(self, data, keys):
"""Return tuple of column value tuples based off keys."""
- entries = [row.split() for row in data.strip().splitlines()]
- columns = entries[0]
- indexes = [columns.index(key) for key in keys]
- # skip header lines
- entries = entries[2:]
- return tuple(
- tuple(entry[index] for index in indexes) for entry in entries
- )
+ data = data.strip().splitlines()
+ cols = data[0].split()
+ indexes = []
+ # Look for column headers matching keys.
+ for k in keys:
+ try:
+ indexes.append(cols.index(k))
+ except Exception:
+ # key was not found, continue searching.
+ continue
+ col_values = []
+ if len(indexes) > 0:
+ # Iterate over data and return column key values.
+ # Skip first two header lines.
+ for line in data[2:]:
+ line_values = []
+ for index in indexes:
+ line_values.append(line.split()[index])
+ col_values.append(tuple(line_values))
+ return tuple(col_values)
def get_key_value(self, data, key):
"""Return value based off of key."""
@@ -448,49 +448,57 @@ class VirshSSH(pexpect.spawn):
["pool-autostart", "maas"],
]
for command in commands:
- try:
- self.run(command)
- except VirshError:
+ output = self.run(command)
+ if output.startswith("error:"):
+ maaslog.error("Failed to create Pod storage pool: %s", output)
return None
def list_machines(self):
"""Lists all VMs by name."""
- machines = self.run(["list", "--all", "--name"]).splitlines()
+ machines = self.run(["list", "--all", "--name"])
+ machines = machines.strip().splitlines()
return [m for m in machines if m.startswith(self.dom_prefix)]
def list_pools(self):
"""Lists all pools in the pod."""
+ keys = ["Name"]
output = self.run(["pool-list"])
- pools = self._get_column_values(output, ["Name"])
+ pools = self.get_column_values(output, keys)
return [p[0] for p in pools]
def list_machine_block_devices(self, machine):
"""Lists all devices for VM."""
+ keys = ["Device", "Target", "Source"]
output = self.run(["domblklist", machine, "--details"])
- devices = self._get_column_values(
- output, ["Device", "Target", "Source"]
- )
+ devices = self.get_column_values(output, keys)
return [(d[1], d[2]) for d in devices if d[0] == "disk"]
def get_machine_state(self, machine):
"""Gets the VM state."""
- try:
- return self.run(["domstate", machine])
- except VirshError:
+ state = self.run(["domstate", machine]).strip()
+ if state.startswith("error:"):
return None
+ return state
def get_machine_interface_info(self, machine):
"""Gets list of mac addressess assigned to the VM."""
- try:
- output = self.run(["domiflist", machine])
- except VirshError:
+ output = self.run(["domiflist", machine]).strip()
+ if output.startswith("error:"):
+ maaslog.error("%s: Failed to get node MAC addresses", machine)
return None
- return [
- InterfaceInfo(*entry)
- for entry in self._get_column_values(
- output, ["Type", "Source", "Model", "MAC"]
- )
- ]
+ # Parse the `virsh domiflist <machine>` output, which will look
+ # something like the following:
+ #
+ # Interface Type Source Model MAC
+ # -------------------------------------------------------
+ # - network default virtio 52:54:00:5b:86:86
+ # - bridge br0 virtio 52:54:00:8f:39:13
+ # - bridge br1 virtio 52:54:00:72:f9:82
+ #
+ # That is, skip the two lines of header, and then extract the type,
+ # source, model, and MAC.
+ output = output.splitlines()[2:]
+ return [InterfaceInfo(*line.split()[1:5]) for line in output]
def get_pod_cpu_count(self, nodeinfo):
"""Gets number of CPUs in the pod."""
@@ -502,7 +510,7 @@ class VirshSSH(pexpect.spawn):
def get_machine_cpu_count(self, machine):
"""Gets the VM CPU count."""
- output = self.run(["dominfo", machine])
+ output = self.run(["dominfo", machine]).strip()
cpu_count = self.get_key_value(output, "CPU(s)")
if cpu_count is None:
maaslog.error("%s: Failed to get machine CPU count", machine)
@@ -528,7 +536,7 @@ class VirshSSH(pexpect.spawn):
def get_machine_memory(self, machine):
"""Gets the VM memory."""
- output = self.run(["dominfo", machine])
+ output = self.run(["dominfo", machine]).strip()
KiB = self.get_key_value_unitless(output, "Max memory")
if KiB is None:
maaslog.error("%s: Failed to get machine memory", machine)
@@ -540,8 +548,8 @@ class VirshSSH(pexpect.spawn):
"""Get the storage pools information."""
pools = []
for pool in self.list_pools():
- output = self.run(["pool-dumpxml", pool])
- if not output:
+ output = self.run(["pool-dumpxml", pool]).strip()
+ if output is None:
# Skip if cannot get more information.
continue
@@ -589,8 +597,8 @@ class VirshSSH(pexpect.spawn):
def get_machine_local_storage(self, machine, device):
"""Gets the VM local storage for device."""
- output = self.run(["domblkinfo", machine, device])
- if not output:
+ output = self.run(["domblkinfo", machine, device]).strip()
+ if output is None:
maaslog.error("Failed to get available pod local storage")
return None
try:
@@ -600,7 +608,7 @@ class VirshSSH(pexpect.spawn):
def get_pod_nodeinfo(self):
"""Gets the general information of the node via 'nodeinfo'"""
- return self.run(["nodeinfo"])
+ return self.run(["nodeinfo"]).strip()
def get_pod_arch(self, nodeinfo):
"""Gets architecture of the pod."""
@@ -769,22 +777,21 @@ class VirshSSH(pexpect.spawn):
If no error is reported, destroy the domain to put it back into a
'shut off' state.
"""
- try:
- self.run(["start", "--paused", machine])
- except VirshError as e:
+ output = self.run(["start", "--paused", machine]).strip()
+ if output.startswith("error:"):
# Delete the domain.
self.delete_domain(machine)
# Raise the error.
- raise VirshError(f"Unable to compose {machine}: {e}")
+ raise VirshError("Unable to compose %s: %s" % (machine, output))
else:
# No errors, so set machine back to 'shut off' state.
self.run(["destroy", machine])
def set_machine_autostart(self, machine):
"""Set machine to autostart."""
- try:
- self.run(["autostart", machine])
- except VirshError:
+ output = self.run(["autostart", machine]).strip()
+ if output.startswith("error:"):
+ maaslog.error("%s: Failed to set autostart", machine)
return False
return True
@@ -824,25 +831,24 @@ class VirshSSH(pexpect.spawn):
f.write(etree.tostring(doc))
f.write(b"\n")
f.flush()
- try:
- self.run(["define", f.name])
- except VirshError:
+ output = self.run(["define", f.name])
+ if output.startswith("error:"):
+ maaslog.error("%s: Failed to set network boot order", machine)
return False
+ maaslog.info("%s: Successfully set network boot order", machine)
return True
def poweron(self, machine):
"""Poweron a VM."""
- try:
- self.run(["start", machine])
- except VirshError:
+ output = self.run(["start", machine]).strip()
+ if output.startswith("error:"):
return False
return True
def poweroff(self, machine):
"""Poweroff a VM."""
- try:
- self.run(["destroy", machine])
- except VirshError:
+ output = self.run(["destroy", machine]).strip()
+ if output.startswith("error:"):
return False
return True
@@ -942,7 +948,8 @@ class VirshSSH(pexpect.spawn):
def get_volume_path(self, pool, volume):
"""Return the path to the file from `pool` and `volume`."""
- return self.run(["vol-path", volume, "--pool", pool])
+ output = self.run(["vol-path", volume, "--pool", pool])
+ return output.strip()
def attach_local_volume(self, domain, pool, volume, device):
"""Attach `volume` in `pool` to `domain` as `device`."""
@@ -966,7 +973,8 @@ class VirshSSH(pexpect.spawn):
def get_network_list(self):
"""Return the list of available networks."""
- return self.run(["net-list", "--name"]).splitlines()
+ output = self.run(["net-list", "--name"])
+ return output.strip().splitlines()
def check_network_maas_dhcp_enabled(self, network, host_interfaces):
xml = self.get_network_xml(network)
@@ -1104,10 +1112,17 @@ class VirshSSH(pexpect.spawn):
f.write(device_xml.encode("utf-8"))
f.write(b"\n")
f.flush()
- try:
- self.run(["attach-device", domain, f.name, "--config"])
- except VirshError:
+ output = self.run(["attach-device", domain, f.name, "--config"])
+ if output.startswith("error:"):
+ maaslog.error(
+ "%s: Failed to attach network device %s"
+ % (domain, interface.attach_name)
+ )
return False
+ maaslog.info(
+ "%s: Successfully attached network device %s"
+ % (domain, interface.attach_name)
+ )
return True
def get_domain_capabilities(self):
@@ -1116,13 +1131,23 @@ class VirshSSH(pexpect.spawn):
Determines the type and emulator of the domain to use.
"""
# Test for KVM support first.
- emulator_type = "kvm"
- try:
- xml = self.run(["domcapabilities", "--virttype", emulator_type])
- except VirshError:
+ xml = self.run(["domcapabilities", "--virttype", "kvm"])
+ if xml.startswith("error"):
# Fallback to qemu support. Fail if qemu not supported.
+ xml = self.run(["domcapabilities", "--virttype", "qemu"])
emulator_type = "qemu"
- xml = self.run(["domcapabilities", "--virttype", emulator_type])
+ else:
+ emulator_type = "kvm"
+
+ # XXX newell 2017-05-18 bug=1690781
+ # Check to see if the XML output was an error.
+ # See bug for details about why and how this can occur.
+ if xml.startswith("error"):
+ raise VirshError(
+ "`virsh domcapabilities --virttype %s` errored. Please "
+ "verify that package qemu-kvm is installed and restart "
+ "libvirt-bin service." % emulator_type
+ )
doc = etree.XML(xml)
evaluator = etree.XPathEvaluator(doc)
@@ -1245,9 +1270,8 @@ class VirshSSH(pexpect.spawn):
def delete_domain(self, domain):
"""Delete `domain` and its volumes."""
- # Ensure that its destroyed first. We ignore errors here not to fail
- # the process, and since there isn't much we can do anyway.
- self.run(["destroy", domain], raise_error=False)
+ # Ensure that its destroyed first.
+ self.run(["destroy", domain])
# Undefine the domains and remove all storage and snapshots.
# XXX newell 2018-02-25 bug=1741165
# Removed the --delete-snapshots flag to workaround the volumes not
@@ -1259,8 +1283,7 @@ class VirshSSH(pexpect.spawn):
"--remove-all-storage",
"--managed-save",
"--nvram",
- ],
- raise_error=False,
+ ]
)
diff --git a/src/provisioningserver/drivers/power/proxmox.py b/src/provisioningserver/drivers/power/proxmox.py
index cc0b720..7e141e9 100644
--- a/src/provisioningserver/drivers/power/proxmox.py
+++ b/src/provisioningserver/drivers/power/proxmox.py
@@ -5,7 +5,6 @@
from io import BytesIO
import json
-import re
from urllib.parse import urlencode, urlparse
from twisted.internet.defer import inlineCallbacks, succeed
@@ -21,18 +20,17 @@ from provisioningserver.drivers.power import PowerActionError
from provisioningserver.drivers.power.webhook import (
SSL_INSECURE_CHOICES,
SSL_INSECURE_NO,
- SSL_INSECURE_YES,
WebhookPowerDriver,
)
-from provisioningserver.rpc.utils import commission_node, create_node
from provisioningserver.utils.twisted import asynchronous
class ProxmoxPowerDriver(WebhookPowerDriver):
name = "proxmox"
- chassis = True
- can_probe = True
+ chassis = False
+ # XXX ltrager - 2021-01-11 - Support for probing and Pods could be added.
+ can_probe = False
description = "Proxmox"
settings = [
make_setting_field(
@@ -54,7 +52,7 @@ class ProxmoxPowerDriver(WebhookPowerDriver):
field_type="password",
),
make_setting_field(
- "power_vm_name", "Node ID", scope=SETTING_SCOPE.NODE, required=True
+ "power_vm_name", "Node ID", scope=SETTING_SCOPE.NODE
),
make_setting_field(
"power_verify_ssl",
@@ -119,7 +117,7 @@ class ProxmoxPowerDriver(WebhookPowerDriver):
{},
{b"Content-Type": [b"application/json; charset=utf-8"]},
),
- context.get("power_verify_ssl") == SSL_INSECURE_YES,
+ context.get("power_verify_ssl") is True,
FileBodyProducer(
BytesIO(
json.dumps(
@@ -152,17 +150,12 @@ class ProxmoxPowerDriver(WebhookPowerDriver):
b"GET",
self._get_url(context, "cluster/resources", {"type": "vm"}),
self._make_auth_headers(system_id, {}, extra_headers),
- context.get("power_verify_ssl") == SSL_INSECURE_YES,
+ context.get("power_verify_ssl") is True,
)
def cb(response_data):
parsed_data = json.loads(response_data)
- vms = parsed_data["data"]
- if not vms:
- raise PowerActionError(
- "No VMs returned! Are permissions set correctly?"
- )
- for vm in vms:
+ for vm in parsed_data["data"]:
if power_vm_name in (str(vm.get("vmid")), vm.get("name")):
return vm
raise PowerActionError("Unable to find virtual machine")
@@ -184,7 +177,7 @@ class ProxmoxPowerDriver(WebhookPowerDriver):
"status/start",
),
self._make_auth_headers(system_id, {}, extra_headers),
- context.get("power_verify_ssl") == SSL_INSECURE_YES,
+ context.get("power_verify_ssl") is True,
)
@asynchronous
@@ -201,7 +194,7 @@ class ProxmoxPowerDriver(WebhookPowerDriver):
"status/stop",
),
self._make_auth_headers(system_id, {}, extra_headers),
- context.get("power_verify_ssl") == SSL_INSECURE_YES,
+ context.get("power_verify_ssl") is True,
)
@asynchronous
@@ -215,114 +208,3 @@ class ProxmoxPowerDriver(WebhookPowerDriver):
return "off"
else:
return "unknown"
-
-
-def probe_proxmox_and_enlist(
- user,
- hostname,
- username,
- password,
- token_name,
- token_secret,
- verify_ssl,
- accept_all,
- domain,
- prefix_filter,
-):
- """Extracts all of the VMs from Proxmox and enlists them into MAAS.
-
- :param user: user for the nodes.
- :param hostname: Hostname for Proxmox
- :param username: The username to connect to Proxmox to
- :param password: The password to connect to Proxmox with.
- :param token_name: The name of the token to use instead of a password.
- :param token_secret: The token secret to use instead of a password.
- :param verify_ssl: Whether SSL connections should be verified.
- :param accept_all: If True, commission enlisted nodes.
- :param domain: What domain discovered machines to be apart of.
- :param prefix_filter: only enlist nodes that have the prefix.
- """
- proxmox = ProxmoxPowerDriver()
- context = {
- "power_address": hostname,
- "power_user": username,
- "power_pass": password,
- "power_token_name": token_name,
- "power_token_secret": token_secret,
- "power_verify_ssl": SSL_INSECURE_YES
- if verify_ssl
- else SSL_INSECURE_NO,
- }
- mac_regex = re.compile(r"(([\dA-F]{2}[:]){5}[\dA-F]{2})", re.I)
-
- d = proxmox._login("", context)
-
- @inlineCallbacks
- def get_vms(extra_headers):
- vms = yield proxmox._webhook_request(
- b"GET",
- proxmox._get_url(context, "cluster/resources", {"type": "vm"}),
- proxmox._make_auth_headers("", {}, extra_headers),
- verify_ssl,
- )
- return extra_headers, vms
-
- d.addCallback(get_vms)
-
- @inlineCallbacks
- def process_vms(data):
- extra_headers, response_data = data
- vms = json.loads(response_data)["data"]
- if not vms:
- raise PowerActionError(
- "No VMs returned! Are permissions set correctly?"
- )
- for vm in vms:
- if prefix_filter and not vm["name"].startswith(prefix_filter):
- continue
- # Proxmox doesn't have an easy way to get the MAC address, it
- # includes it with a bunch of other data in the config.
- vm_config_data = yield proxmox._webhook_request(
- b"GET",
- proxmox._get_url(
- context,
- f"nodes/{vm['node']}/{vm['type']}/{vm['vmid']}/config",
- ),
- proxmox._make_auth_headers("", {}, extra_headers),
- verify_ssl,
- )
- macs = [
- mac[0] for mac in mac_regex.findall(vm_config_data.decode())
- ]
-
- system_id = yield create_node(
- macs,
- "amd64",
- "proxmox",
- {"power_vm_name": vm["vmid"], **context},
- domain,
- hostname=vm["name"].replace(" ", "-"),
- )
-
- # If the system_id is None an error occured when creating the machine.
- # Most likely the error is the node already exists.
- if system_id is None:
- continue
-
- if vm["status"] != "stopped":
- yield proxmox._webhook_request(
- b"POST",
- proxmox._get_url(
- context,
- f"nodes/{vm['node']}/{vm['type']}/{vm['vmid']}/"
- "status/stop",
- ),
- proxmox._make_auth_headers(system_id, {}, extra_headers),
- context.get("power_verify_ssl") == SSL_INSECURE_YES,
- )
-
- if accept_all:
- yield commission_node(system_id, user)
-
- d.addCallback(process_vms)
- return d
diff --git a/src/provisioningserver/drivers/power/tests/test_proxmox.py b/src/provisioningserver/drivers/power/tests/test_proxmox.py
index 2903e8c..881ac5f 100644
--- a/src/provisioningserver/drivers/power/tests/test_proxmox.py
+++ b/src/provisioningserver/drivers/power/tests/test_proxmox.py
@@ -4,22 +4,16 @@
"""Tests for `provisioningserver.drivers.power.proxmox`."""
import json
import random
-from unittest.mock import ANY, call
+from unittest.mock import ANY
from testtools import ExpectedException
from twisted.internet.defer import inlineCallbacks, succeed
from maastesting.factory import factory
-from maastesting.matchers import (
- MockCalledOnceWith,
- MockCalledWith,
- MockCallsMatch,
- MockNotCalled,
-)
+from maastesting.matchers import MockCalledOnceWith, MockNotCalled
from maastesting.testcase import MAASTestCase, MAASTwistedRunTest
from provisioningserver.drivers.power import PowerActionError
import provisioningserver.drivers.power.proxmox as proxmox_module
-from provisioningserver.drivers.power.webhook import SSL_INSECURE_NO
class TestProxmoxPowerDriver(MAASTestCase):
@@ -108,17 +102,20 @@ class TestProxmoxPowerDriver(MAASTestCase):
},
extra_headers,
)
- self.mock_webhook_request.assert_called_once_with(
- b"POST",
- self.proxmox._get_url(context, "access/ticket"),
- self.proxmox._make_auth_headers(
- system_id,
- {},
- {b"Content-Type": [b"application/json; charset=utf-8"]},
+ self.assertThat(
+ self.mock_webhook_request,
+ MockCalledOnceWith(
+ b"POST",
+ self.proxmox._get_url(context, "access/ticket"),
+ self.proxmox._make_auth_headers(
+ system_id,
+ {},
+ {b"Content-Type": [b"application/json; charset=utf-8"]},
+ ),
+ False,
+ # unittest doesn't know how to compare FileBodyProducer
+ ANY,
),
- False,
- # unittest doesn't know how to compare FileBodyProducer
- ANY,
)
@inlineCallbacks
@@ -194,43 +191,16 @@ class TestProxmoxPowerDriver(MAASTestCase):
)
self.assertEqual(vm, found_vm)
- self.mock_webhook_request.assert_called_once_with(
- b"GET",
- self.proxmox._get_url(
- context, "cluster/resources", {"type": "vm"}
- ),
- self.proxmox._make_auth_headers(system_id, {}, extra_headers),
- False,
- )
-
- @inlineCallbacks
- def test_find_vm_doesnt_find_any_vms(self):
- system_id = factory.make_name("system_id")
- context = {
- "power_address": factory.make_name("power_address"),
- "power_vm_name": factory.make_name("power_vm_name"),
- }
- extra_headers = {
- factory.make_name("key").encode(): [
- factory.make_name("value").encode()
- ]
- for _ in range(3)
- }
- self.mock_webhook_request.return_value = succeed(
- json.dumps({"data": []})
- )
-
- with ExpectedException(
- PowerActionError, "No VMs returned! Are permissions set correctly?"
- ):
- yield self.proxmox._find_vm(system_id, context, extra_headers)
- self.mock_webhook_request.assert_called_once_with(
- b"GET",
- self.proxmox._get_url(
- context, "cluster/resources", {"type": "vm"}
+ self.assertThat(
+ self.mock_webhook_request,
+ MockCalledOnceWith(
+ b"GET",
+ self.proxmox._get_url(
+ context, "cluster/resources", {"type": "vm"}
+ ),
+ self.proxmox._make_auth_headers(system_id, {}, extra_headers),
+ False,
),
- self.proxmox._make_auth_headers(system_id, {}, extra_headers),
- False,
)
@inlineCallbacks
@@ -255,17 +225,18 @@ class TestProxmoxPowerDriver(MAASTestCase):
json.dumps({"data": [vm]})
)
- with ExpectedException(
- PowerActionError, "Unable to find virtual machine"
- ):
+ with ExpectedException(PowerActionError):
yield self.proxmox._find_vm(system_id, context, extra_headers)
- self.mock_webhook_request.assert_called_once_with(
- b"GET",
- self.proxmox._get_url(
- context, "cluster/resources", {"type": "vm"}
+ self.assertThat(
+ self.mock_webhook_request,
+ MockCalledOnceWith(
+ b"GET",
+ self.proxmox._get_url(
+ context, "cluster/resources", {"type": "vm"}
+ ),
+ self.proxmox._make_auth_headers(system_id, {}, extra_headers),
+ False,
),
- self.proxmox._make_auth_headers(system_id, {}, extra_headers),
- False,
)
@inlineCallbacks
@@ -291,15 +262,18 @@ class TestProxmoxPowerDriver(MAASTestCase):
yield self.proxmox.power_on(system_id, context)
- self.mock_webhook_request.assert_called_once_with(
- b"POST",
- self.proxmox._get_url(
- context,
- f"nodes/{vm['node']}/{vm['type']}/{vm['vmid']}/"
- "status/start",
+ self.assertThat(
+ self.mock_webhook_request,
+ MockCalledOnceWith(
+ b"POST",
+ self.proxmox._get_url(
+ context,
+ f"nodes/{vm['node']}/{vm['type']}/{vm['vmid']}/"
+ "status/start",
+ ),
+ self.proxmox._make_auth_headers(system_id, {}, extra_headers),
+ False,
),
- self.proxmox._make_auth_headers(system_id, {}, extra_headers),
- False,
)
@inlineCallbacks
@@ -350,14 +324,18 @@ class TestProxmoxPowerDriver(MAASTestCase):
yield self.proxmox.power_off(system_id, context)
- self.mock_webhook_request.assert_called_once_with(
- b"POST",
- self.proxmox._get_url(
- context,
- f"nodes/{vm['node']}/{vm['type']}/{vm['vmid']}/" "status/stop",
+ self.assertThat(
+ self.mock_webhook_request,
+ MockCalledOnceWith(
+ b"POST",
+ self.proxmox._get_url(
+ context,
+ f"nodes/{vm['node']}/{vm['type']}/{vm['vmid']}/"
+ "status/stop",
+ ),
+ self.proxmox._make_auth_headers(system_id, {}, extra_headers),
+ False,
),
- self.proxmox._make_auth_headers(system_id, {}, extra_headers),
- False,
)
@inlineCallbacks
@@ -459,406 +437,3 @@ class TestProxmoxPowerDriver(MAASTestCase):
status = yield self.proxmox.power_query(system_id, context)
self.assertEqual("unknown", status)
-
-
-class TestProxmoxProbeAndEnlist(MAASTestCase):
-
- run_tests_with = MAASTwistedRunTest.make_factory(timeout=5)
-
- def setUp(self):
- super().setUp()
- self.mock_login = self.patch(
- proxmox_module.ProxmoxPowerDriver, "_login"
- )
- self.mock_login.return_value = succeed({})
- self.system_id = factory.make_name("system_id")
- self.mock_create_node = self.patch(proxmox_module, "create_node")
- self.mock_create_node.return_value = succeed(self.system_id)
- self.mock_commission_node = self.patch(
- proxmox_module, "commission_node"
- )
- self.mock_commission_node.return_value = succeed(None)
-
- @inlineCallbacks
- def test_probe_and_enlist(self):
- user = factory.make_name("user")
- hostname = factory.make_ipv4_address()
- username = factory.make_name("username")
- password = factory.make_name("password")
- token_name = factory.make_name("token_name")
- token_secret = factory.make_name("token_secret")
- domain = factory.make_name("domain")
- node1 = factory.make_name("node1")
- vmid1 = random.randint(0, 100)
- mac11 = factory.make_mac_address()
- mac12 = factory.make_mac_address()
- node2 = factory.make_name("node2")
- vmid2 = random.randint(0, 100)
- mac21 = factory.make_mac_address()
- mac22 = factory.make_mac_address()
- mock_webhook_request = self.patch(
- proxmox_module.ProxmoxPowerDriver, "_webhook_request"
- )
- mock_webhook_request.side_effect = [
- succeed(
- json.dumps(
- {
- "data": [
- {
- "node": node1,
- "vmid": vmid1,
- "name": f"vm {vmid1}",
- "type": "qemu",
- "status": "stopped",
- },
- {
- "node": node2,
- "vmid": vmid2,
- "name": f"vm {vmid2}",
- "type": "qemu",
- "status": "stopped",
- },
- ]
- }
- ).encode()
- ),
- succeed(
- b"{'data': {"
- b"'net1':'virtio=%s,bridge=vmbr0,firewall=1'"
- b"'net2':'virtio=%s,bridge=vmbr0,firewall=1'"
- b"}}" % (mac11.encode(), mac12.encode())
- ),
- succeed(
- b"{'data': {"
- b"'net1':'virtio=%s,bridge=vmbr0,firewall=1'"
- b"'net2':'virtio=%s,bridge=vmbr0,firewall=1'"
- b"}}" % (mac21.encode(), mac22.encode())
- ),
- ]
-
- yield proxmox_module.probe_proxmox_and_enlist(
- user,
- hostname,
- username,
- password,
- token_name,
- token_secret,
- False,
- False,
- domain,
- None,
- )
-
- self.assertThat(
- self.mock_create_node,
- MockCallsMatch(
- call(
- [mac11, mac12],
- "amd64",
- "proxmox",
- {
- "power_vm_name": vmid1,
- "power_address": hostname,
- "power_user": username,
- "power_pass": password,
- "power_token_name": token_name,
- "power_token_secret": token_secret,
- "power_verify_ssl": SSL_INSECURE_NO,
- },
- domain,
- hostname=f"vm-{vmid1}",
- ),
- call(
- [mac21, mac22],
- "amd64",
- "proxmox",
- {
- "power_vm_name": vmid2,
- "power_address": hostname,
- "power_user": username,
- "power_pass": password,
- "power_token_name": token_name,
- "power_token_secret": token_secret,
- "power_verify_ssl": SSL_INSECURE_NO,
- },
- domain,
- hostname=f"vm-{vmid2}",
- ),
- ),
- )
- self.assertThat(self.mock_commission_node, MockNotCalled())
-
- @inlineCallbacks
- def test_probe_and_enlist_doesnt_find_any_vms(self):
- user = factory.make_name("user")
- hostname = factory.make_ipv4_address()
- username = factory.make_name("username")
- password = factory.make_name("password")
- token_name = factory.make_name("token_name")
- token_secret = factory.make_name("token_secret")
- domain = factory.make_name("domain")
- mock_webhook_request = self.patch(
- proxmox_module.ProxmoxPowerDriver, "_webhook_request"
- )
- mock_webhook_request.return_value = succeed(json.dumps({"data": []}))
-
- with ExpectedException(
- PowerActionError, "No VMs returned! Are permissions set correctly?"
- ):
- yield proxmox_module.probe_proxmox_and_enlist(
- user,
- hostname,
- username,
- password,
- token_name,
- token_secret,
- False,
- False,
- domain,
- None,
- )
-
- @inlineCallbacks
- def test_probe_and_enlist_filters(self):
- user = factory.make_name("user")
- hostname = factory.make_ipv4_address()
- username = factory.make_name("username")
- password = factory.make_name("password")
- token_name = factory.make_name("token_name")
- token_secret = factory.make_name("token_secret")
- domain = factory.make_name("domain")
- node1 = factory.make_name("node1")
- mac11 = factory.make_mac_address()
- mac12 = factory.make_mac_address()
- node2 = factory.make_name("node2")
- mac21 = factory.make_mac_address()
- mac22 = factory.make_mac_address()
- mock_webhook_request = self.patch(
- proxmox_module.ProxmoxPowerDriver, "_webhook_request"
- )
- mock_webhook_request.side_effect = [
- succeed(
- json.dumps(
- {
- "data": [
- {
- "node": node1,
- "vmid": 100,
- "name": f"vm 100",
- "type": "qemu",
- "status": "stopped",
- },
- {
- "node": node2,
- "vmid": 200,
- "name": f"vm 200",
- "type": "qemu",
- "status": "stopped",
- },
- ]
- }
- ).encode()
- ),
- succeed(
- b"{'data': {"
- b"'net1':'virtio=%s,bridge=vmbr0,firewall=1'"
- b"'net2':'virtio=%s,bridge=vmbr0,firewall=1'"
- b"}}" % (mac11.encode(), mac12.encode())
- ),
- succeed(
- b"{'data': {"
- b"'net1':'virtio=%s,bridge=vmbr0,firewall=1'"
- b"'net2':'virtio=%s,bridge=vmbr0,firewall=1'"
- b"}}" % (mac21.encode(), mac22.encode())
- ),
- ]
-
- yield proxmox_module.probe_proxmox_and_enlist(
- user,
- hostname,
- username,
- password,
- token_name,
- token_secret,
- False,
- False,
- domain,
- "vm 1",
- )
-
- self.assertThat(
- self.mock_create_node,
- MockCalledOnceWith(
- [mac11, mac12],
- "amd64",
- "proxmox",
- {
- "power_vm_name": 100,
- "power_address": hostname,
- "power_user": username,
- "power_pass": password,
- "power_token_name": token_name,
- "power_token_secret": token_secret,
- "power_verify_ssl": SSL_INSECURE_NO,
- },
- domain,
- hostname=f"vm-100",
- ),
- )
- self.assertThat(self.mock_commission_node, MockNotCalled())
-
- @inlineCallbacks
- def test_probe_and_enlist_stops_and_commissions(self):
- user = factory.make_name("user")
- hostname = factory.make_ipv4_address()
- username = factory.make_name("username")
- password = factory.make_name("password")
- token_name = factory.make_name("token_name")
- token_secret = factory.make_name("token_secret")
- domain = factory.make_name("domain")
- node1 = factory.make_name("node1")
- vmid1 = random.randint(0, 100)
- mac11 = factory.make_mac_address()
- mac12 = factory.make_mac_address()
- mock_webhook_request = self.patch(
- proxmox_module.ProxmoxPowerDriver, "_webhook_request"
- )
- mock_webhook_request.side_effect = [
- succeed(
- json.dumps(
- {
- "data": [
- {
- "node": node1,
- "vmid": vmid1,
- "name": f"vm {vmid1}",
- "type": "qemu",
- "status": "running",
- },
- ]
- }
- ).encode()
- ),
- succeed(
- b"{'data': {"
- b"'net1':'virtio=%s,bridge=vmbr0,firewall=1'"
- b"'net2':'virtio=%s,bridge=vmbr0,firewall=1'"
- b"}}" % (mac11.encode(), mac12.encode())
- ),
- succeed(None),
- ]
-
- yield proxmox_module.probe_proxmox_and_enlist(
- user,
- hostname,
- username,
- password,
- token_name,
- token_secret,
- False,
- True,
- domain,
- None,
- )
-
- self.assertThat(
- self.mock_create_node,
- MockCalledOnceWith(
- [mac11, mac12],
- "amd64",
- "proxmox",
- {
- "power_vm_name": vmid1,
- "power_address": hostname,
- "power_user": username,
- "power_pass": password,
- "power_token_name": token_name,
- "power_token_secret": token_secret,
- "power_verify_ssl": SSL_INSECURE_NO,
- },
- domain,
- hostname=f"vm-{vmid1}",
- ),
- )
- self.assertThat(
- mock_webhook_request, MockCalledWith(b"POST", ANY, ANY, False)
- )
- self.assertThat(
- self.mock_commission_node, MockCalledOnceWith(self.system_id, user)
- )
-
- @inlineCallbacks
- def test_probe_and_enlist_ignores_create_node_error(self):
- user = factory.make_name("user")
- hostname = factory.make_ipv4_address()
- username = factory.make_name("username")
- password = factory.make_name("password")
- token_name = factory.make_name("token_name")
- token_secret = factory.make_name("token_secret")
- domain = factory.make_name("domain")
- node1 = factory.make_name("node1")
- vmid1 = random.randint(0, 100)
- mac11 = factory.make_mac_address()
- mac12 = factory.make_mac_address()
- self.mock_create_node.return_value = succeed(None)
- mock_webhook_request = self.patch(
- proxmox_module.ProxmoxPowerDriver, "_webhook_request"
- )
- mock_webhook_request.side_effect = [
- succeed(
- json.dumps(
- {
- "data": [
- {
- "node": node1,
- "vmid": vmid1,
- "name": f"vm {vmid1}",
- "type": "qemu",
- "status": "running",
- },
- ]
- }
- ).encode()
- ),
- succeed(
- b"{'data': {"
- b"'net1':'virtio=%s,bridge=vmbr0,firewall=1'"
- b"'net2':'virtio=%s,bridge=vmbr0,firewall=1'"
- b"}}" % (mac11.encode(), mac12.encode())
- ),
- succeed(None),
- ]
-
- yield proxmox_module.probe_proxmox_and_enlist(
- user,
- hostname,
- username,
- password,
- token_name,
- token_secret,
- False,
- True,
- domain,
- None,
- )
-
- self.assertThat(
- self.mock_create_node,
- MockCalledOnceWith(
- [mac11, mac12],
- "amd64",
- "proxmox",
- {
- "power_vm_name": vmid1,
- "power_address": hostname,
- "power_user": username,
- "power_pass": password,
- "power_token_name": token_name,
- "power_token_secret": token_secret,
- "power_verify_ssl": SSL_INSECURE_NO,
- },
- domain,
- hostname=f"vm-{vmid1}",
- ),
- )
- self.assertThat(self.mock_commission_node, MockNotCalled())
diff --git a/src/provisioningserver/drivers/power/webhook.py b/src/provisioningserver/drivers/power/webhook.py
index 22c3d04..2deccac 100644
--- a/src/provisioningserver/drivers/power/webhook.py
+++ b/src/provisioningserver/drivers/power/webhook.py
@@ -180,7 +180,7 @@ class WebhookPowerDriver(PowerDriver):
b"POST",
context["power_on_uri"].encode(),
self._make_auth_headers(system_id, context),
- context.get("power_verify_ssl") == SSL_INSECURE_YES,
+ context.get("power_verify_ssl") is True,
)
@asynchronous
@@ -191,7 +191,7 @@ class WebhookPowerDriver(PowerDriver):
b"POST",
context["power_off_uri"].encode(),
self._make_auth_headers(system_id, context),
- context.get("power_verify_ssl") == SSL_INSECURE_YES,
+ context.get("power_verify_ssl") is True,
)
@asynchronous
@@ -205,7 +205,7 @@ class WebhookPowerDriver(PowerDriver):
b"GET",
context["power_query_uri"].encode(),
self._make_auth_headers(system_id, context),
- context.get("power_verify_ssl") == SSL_INSECURE_YES,
+ context.get("power_verify_ssl") is True,
)
node_data = node_data.decode()
if power_on_regex and re.search(power_on_regex, node_data) is not None:
diff --git a/src/provisioningserver/rpc/cluster.py b/src/provisioningserver/rpc/cluster.py
index b2def33..4b67c45 100644
--- a/src/provisioningserver/rpc/cluster.py
+++ b/src/provisioningserver/rpc/cluster.py
@@ -1,4 +1,4 @@
-# Copyright 2014-2021 Canonical Ltd. This software is licensed under the
+# Copyright 2014-2020 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""RPC declarations for clusters.
@@ -745,9 +745,6 @@ class AddChassis(amp.Command):
(b"power_control", amp.Unicode(optional=True)),
(b"port", amp.Integer(optional=True)),
(b"protocol", amp.Unicode(optional=True)),
- (b"token_name", amp.Unicode(optional=True)),
- (b"token_secret", amp.Unicode(optional=True)),
- (b"verify_ssl", amp.Boolean(optional=True)),
]
errors = {}
diff --git a/src/provisioningserver/rpc/clusterservice.py b/src/provisioningserver/rpc/clusterservice.py
index 279a804..bc8f429 100644
--- a/src/provisioningserver/rpc/clusterservice.py
+++ b/src/provisioningserver/rpc/clusterservice.py
@@ -1,4 +1,4 @@
-# Copyright 2014-2021 Canonical Ltd. This software is licensed under the
+# Copyright 2014-2020 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""RPC implementation for clusters."""
@@ -49,7 +49,6 @@ from provisioningserver.drivers.hardware.vmware import probe_vmware_and_enlist
from provisioningserver.drivers.nos.registry import NOSDriverRegistry
from provisioningserver.drivers.power.mscm import probe_and_enlist_mscm
from provisioningserver.drivers.power.msftocs import probe_and_enlist_msftocs
-from provisioningserver.drivers.power.proxmox import probe_proxmox_and_enlist
from provisioningserver.drivers.power.recs import probe_and_enlist_recs
from provisioningserver.drivers.power.registry import PowerDriverRegistry
from provisioningserver.logger import get_maas_logger, LegacyLogger
@@ -786,9 +785,6 @@ class Cluster(RPCProtocol):
power_control=None,
port=None,
protocol=None,
- token_name=None,
- token_secret=None,
- verify_ssl=False,
):
"""AddChassis()
@@ -806,20 +802,6 @@ class Cluster(RPCProtocol):
domain,
)
d.addErrback(partial(catch_probe_and_enlist_error, "virsh"))
- elif chassis_type == "proxmox":
- d = probe_proxmox_and_enlist(
- user,
- hostname,
- username,
- password,
- token_name,
- token_secret,
- verify_ssl,
- accept_all,
- domain,
- prefix_filter,
- )
- d.addErrback(partial(catch_probe_and_enlist_error, "proxmox"))
elif chassis_type == "vmware":
d = deferToThread(
probe_vmware_and_enlist,
diff --git a/src/provisioningserver/rpc/tests/test_clusterservice.py b/src/provisioningserver/rpc/tests/test_clusterservice.py
index 3512523..647ed6d 100644
--- a/src/provisioningserver/rpc/tests/test_clusterservice.py
+++ b/src/provisioningserver/rpc/tests/test_clusterservice.py
@@ -1,4 +1,4 @@
-# Copyright 2014-2021 Canonical Ltd. This software is licensed under the
+# Copyright 2014-2020 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Tests for the cluster's RPC implementation."""
@@ -3486,96 +3486,6 @@ class TestClusterProtocol_AddChassis(MAASTestCase):
),
)
- def test_chassis_type_proxmox_calls_probe_proxmoxand_enlist(self):
- mock_proxmox = self.patch_autospec(
- clusterservice, "probe_proxmox_and_enlist"
- )
- user = factory.make_name("user")
- hostname = factory.make_hostname()
- username = factory.make_name("username")
- password = factory.make_name("password")
- token_name = factory.make_name("token_name")
- token_secret = factory.make_name("token_secret")
- verify_ssl = factory.pick_bool()
- accept_all = factory.pick_bool()
- domain = factory.make_name("domain")
- prefix_filter = factory.make_name("prefix_filter")
- call_responder(
- Cluster(),
- cluster.AddChassis,
- {
- "user": user,
- "chassis_type": "proxmox",
- "hostname": hostname,
- "username": username,
- "password": password,
- "token_name": token_name,
- "token_secret": token_secret,
- "verify_ssl": verify_ssl,
- "accept_all": accept_all,
- "domain": domain,
- "prefix_filter": prefix_filter,
- },
- )
- self.assertThat(
- mock_proxmox,
- MockCalledOnceWith(
- user,
- hostname,
- username,
- password,
- token_name,
- token_secret,
- verify_ssl,
- accept_all,
- domain,
- prefix_filter,
- ),
- )
-
- def test_chassis_type_proxmox_logs_error_to_maaslog(self):
- fake_error = factory.make_name("error")
- self.patch(clusterservice, "maaslog")
- mock_proxmox = self.patch_autospec(
- clusterservice, "probe_proxmox_and_enlist"
- )
- mock_proxmox.return_value = fail(Exception(fake_error))
- user = factory.make_name("user")
- hostname = factory.make_hostname()
- username = factory.make_name("username")
- password = factory.make_name("password")
- token_name = factory.make_name("token_name")
- token_secret = factory.make_name("token_secret")
- verify_ssl = factory.pick_bool()
- accept_all = factory.pick_bool()
- domain = factory.make_name("domain")
- prefix_filter = factory.make_name("prefix_filter")
- call_responder(
- Cluster(),
- cluster.AddChassis,
- {
- "user": user,
- "chassis_type": "proxmox",
- "hostname": hostname,
- "username": username,
- "password": password,
- "token_name": token_name,
- "token_secret": token_secret,
- "verify_ssl": verify_ssl,
- "accept_all": accept_all,
- "domain": domain,
- "prefix_filter": prefix_filter,
- },
- )
- self.assertThat(
- clusterservice.maaslog.error,
- MockAnyCall(
- "Failed to probe and enlist %s nodes: %s",
- "proxmox",
- fake_error,
- ),
- )
-
def test_chassis_type_vmware_calls_probe_vmware_and_enlist(self):
mock_deferToThread = self.patch_autospec(
clusterservice, "deferToThread"
diff --git a/src/provisioningserver/templates/dns/named.conf.options.inside.maas.template b/src/provisioningserver/templates/dns/named.conf.options.inside.maas.template
index d76fcfa..ba1aee3 100644
--- a/src/provisioningserver/templates/dns/named.conf.options.inside.maas.template
+++ b/src/provisioningserver/templates/dns/named.conf.options.inside.maas.template
@@ -7,7 +7,7 @@ forwarders {
{{endif}}
dnssec-validation {{dnssec_validation}};
-empty-zones-enable no;
+
{{if not upstream_allow_query}}
allow-query { any; };
diff --git a/src/provisioningserver/templates/uefi/config.commissioning.amd64.template b/src/provisioningserver/templates/uefi/config.commissioning.amd64.template
new file mode 100644
index 0000000..47dbec9
--- /dev/null
+++ b/src/provisioningserver/templates/uefi/config.commissioning.amd64.template
@@ -0,0 +1,8 @@
+set default="0"
+set timeout=0
+
+menuentry 'Commission' {
+ echo 'Booting under MAAS direction...'
+ linuxefi {{kernel_params | fs_efihost}}{{kernel_params | kernel_path }} {{kernel_params | kernel_command}} BOOTIF=01-${net_default_mac}
+ initrdefi {{kernel_params | fs_efihost}}{{kernel_params | initrd_path }}
+}
diff --git a/src/provisioningserver/templates/uefi/config.commissioning.template b/src/provisioningserver/templates/uefi/config.commissioning.template
index cc3f2fc..c860a79 100644
--- a/src/provisioningserver/templates/uefi/config.commissioning.template
+++ b/src/provisioningserver/templates/uefi/config.commissioning.template
@@ -1,8 +1,7 @@
-{{if debug}}set debug="all"{{endif}}
set default="0"
set timeout=0
-menuentry 'Ephemeral' {
+menuentry 'Commission' {
echo 'Booting under MAAS direction...'
linux {{kernel_params | fs_efihost}}{{kernel_params | kernel_path }} {{kernel_params | kernel_command}} BOOTIF=01-${net_default_mac}
initrd {{kernel_params | fs_efihost}}{{kernel_params | initrd_path }}
diff --git a/src/provisioningserver/templates/uefi/config.enlist.amd64.template b/src/provisioningserver/templates/uefi/config.enlist.amd64.template
new file mode 100644
index 0000000..edf800e
--- /dev/null
+++ b/src/provisioningserver/templates/uefi/config.enlist.amd64.template
@@ -0,0 +1,8 @@
+set default="0"
+set timeout=0
+
+menuentry 'Enlist' {
+ echo 'Booting under MAAS direction...'
+ linuxefi {{kernel_params | fs_efihost}}{{kernel_params | kernel_path }} {{kernel_params | kernel_command}} BOOTIF=01-${net_default_mac}
+ initrdefi {{kernel_params | fs_efihost}}{{kernel_params | initrd_path }}
+}
diff --git a/src/provisioningserver/templates/uefi/config.enlist.template b/src/provisioningserver/templates/uefi/config.enlist.template
deleted file mode 120000
index f0f1b87..0000000
--- a/src/provisioningserver/templates/uefi/config.enlist.template
+++ /dev/null
@@ -1 +0,0 @@
-config.commissioning.template
\ No newline at end of file
diff --git a/src/provisioningserver/templates/uefi/config.enlist.template b/src/provisioningserver/templates/uefi/config.enlist.template
new file mode 100644
index 0000000..3546960
--- /dev/null
+++ b/src/provisioningserver/templates/uefi/config.enlist.template
@@ -0,0 +1,8 @@
+set default="0"
+set timeout=0
+
+menuentry 'Enlist' {
+ echo 'Booting under MAAS direction...'
+ linux {{kernel_params | fs_efihost}}{{kernel_params | kernel_path }} {{kernel_params | kernel_command}} BOOTIF=01-${net_default_mac}
+ initrd {{kernel_params | fs_efihost}}{{kernel_params | initrd_path }}
+}
diff --git a/src/provisioningserver/templates/uefi/config.install.amd64.template b/src/provisioningserver/templates/uefi/config.install.amd64.template
new file mode 100644
index 0000000..eea84e0
--- /dev/null
+++ b/src/provisioningserver/templates/uefi/config.install.amd64.template
@@ -0,0 +1,8 @@
+set default="0"
+set timeout=0
+
+menuentry 'Install' {
+ echo 'Booting under MAAS direction...'
+ linuxefi {{kernel_params | fs_efihost}}{{kernel_params | kernel_path }} {{kernel_params | kernel_command}} BOOTIF=01-${net_default_mac}
+ initrdefi {{kernel_params | fs_efihost}}{{kernel_params | initrd_path }}
+}
diff --git a/src/provisioningserver/templates/uefi/config.install.template b/src/provisioningserver/templates/uefi/config.install.template
new file mode 100644
index 0000000..7bd9538
--- /dev/null
+++ b/src/provisioningserver/templates/uefi/config.install.template
@@ -0,0 +1,8 @@
+set default="0"
+set timeout=0
+
+menuentry 'Install' {
+ echo 'Booting under MAAS direction...'
+ linux {{kernel_params | fs_efihost}}{{kernel_params | kernel_path }} {{kernel_params | kernel_command}} BOOTIF=01-${net_default_mac}
+ initrd {{kernel_params | fs_efihost}}{{kernel_params | initrd_path }}
+}
diff --git a/src/provisioningserver/templates/uefi/config.local.amd64.template b/src/provisioningserver/templates/uefi/config.local.amd64.template
index 7cfc382..c7d3d4c 100644
--- a/src/provisioningserver/templates/uefi/config.local.amd64.template
+++ b/src/provisioningserver/templates/uefi/config.local.amd64.template
@@ -1,4 +1,3 @@
-{{if debug}}set debug="all"{{endif}}
set default="0"
set timeout=0
diff --git a/src/provisioningserver/templates/uefi/config.local.arm64.template b/src/provisioningserver/templates/uefi/config.local.arm64.template
index ab10c41..cb34b1b 100644
--- a/src/provisioningserver/templates/uefi/config.local.arm64.template
+++ b/src/provisioningserver/templates/uefi/config.local.arm64.template
@@ -1,4 +1,3 @@
-{{if debug}}set debug="all"{{endif}}
set default="0"
set timeout=0
diff --git a/src/provisioningserver/templates/uefi/config.local.ppc64el.template b/src/provisioningserver/templates/uefi/config.local.ppc64el.template
index 406451b..b592f26 100644
--- a/src/provisioningserver/templates/uefi/config.local.ppc64el.template
+++ b/src/provisioningserver/templates/uefi/config.local.ppc64el.template
@@ -1,4 +1,3 @@
-{{if debug}}set debug="all"{{endif}}
set default="0"
set timeout=0
diff --git a/src/provisioningserver/templates/uefi/config.poweroff.template b/src/provisioningserver/templates/uefi/config.poweroff.template
index 95016c7..451be03 100644
--- a/src/provisioningserver/templates/uefi/config.poweroff.template
+++ b/src/provisioningserver/templates/uefi/config.poweroff.template
@@ -1,4 +1,3 @@
-{{if debug}}set debug="all"{{endif}}
set default="0"
set timeout=0
diff --git a/src/provisioningserver/templates/uefi/config.xinstall.amd64.template b/src/provisioningserver/templates/uefi/config.xinstall.amd64.template
new file mode 120000
index 0000000..d321baa
--- /dev/null
+++ b/src/provisioningserver/templates/uefi/config.xinstall.amd64.template
@@ -0,0 +1 @@
+config.install.amd64.template
\ No newline at end of file
diff --git a/src/provisioningserver/templates/uefi/config.xinstall.template b/src/provisioningserver/templates/uefi/config.xinstall.template
index f0f1b87..44389e4 120000
--- a/src/provisioningserver/templates/uefi/config.xinstall.template
+++ b/src/provisioningserver/templates/uefi/config.xinstall.template
@@ -1 +1 @@
-config.commissioning.template
\ No newline at end of file
+config.install.template
\ No newline at end of file
diff --git a/src/provisioningserver/tests/test_config.py b/src/provisioningserver/tests/test_config.py
index 90c4841..1488283 100644
--- a/src/provisioningserver/tests/test_config.py
+++ b/src/provisioningserver/tests/test_config.py
@@ -1,4 +1,4 @@
-# Copyright 2012-2021 Canonical Ltd. This software is licensed under the
+# Copyright 2012-2016 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Tests for provisioning configuration."""
@@ -29,7 +29,6 @@ from maastesting.factory import factory
from maastesting.fixtures import ImportErrorFixture
from maastesting.matchers import MockCalledOnceWith, MockNotCalled
from maastesting.testcase import MAASTestCase
-from provisioningserver import config as config_module
from provisioningserver.config import (
ClusterConfiguration,
Configuration,
@@ -38,7 +37,6 @@ from provisioningserver.config import (
ConfigurationImmutable,
ConfigurationMeta,
ConfigurationOption,
- debug_enabled,
is_dev_environment,
)
from provisioningserver.path import get_data_path
@@ -726,30 +724,3 @@ class TestConfig(MAASTestCase):
def test_is_dev_environment_returns_true(self):
self.assertTrue(is_dev_environment())
-
-
-class TestDebugEnabled(MAASTestCase):
- """Tests for `debug_enabled`."""
-
- def setUp(self):
- super().setUp()
- # Make sure things aren't pulled from cache
- debug_enabled.cache_clear()
-
- def test_debug_enabled_false(self):
- # Verifies that the default state of debug is false.
- self.assertFalse(debug_enabled())
-
- def test_debug_enabled(self):
- debug = factory.pick_bool()
- self.useFixture(ClusterConfigurationFixture(debug=debug))
- self.assertEqual(debug, debug_enabled())
-
- def test_debug_enabled_cached(self):
- debug = factory.pick_bool()
- self.useFixture(ClusterConfigurationFixture(debug=debug))
- # Prime cache
- debug_enabled()
- mock_open = self.patch(config_module.ClusterConfiguration, "open")
- self.assertEqual(debug, debug_enabled())
- mock_open.assert_not_called()
Follow ups