← Back to team overview

canonical-ubuntu-qa team mailing list archive

[Merge] ~andersson123/autopkgtest-cloud:lint_tools_and_scripts into autopkgtest-cloud:master

 

Tim Andersson has proposed merging ~andersson123/autopkgtest-cloud:lint_tools_and_scripts into autopkgtest-cloud:master.

Commit message:
Lint tools and scripts of autopkgtest-cloud, compartmentalise linting changes for easier testing.

Requested reviews:
  Canonical's Ubuntu QA (canonical-ubuntu-qa)

For more details, see:
https://code.launchpad.net/~andersson123/autopkgtest-cloud/+git/autopkgtest-cloud/+merge/444160

Lint tools and scripts of autopkgtest-cloud, compartmentalise linting changes for easier testing.
-- 
Your team Canonical's Ubuntu QA is requested to review the proposed merge of ~andersson123/autopkgtest-cloud:lint_tools_and_scripts into autopkgtest-cloud:master.
diff --git a/.launchpad.yaml b/.launchpad.yaml
index 6dca924..4f6669e 100755
--- a/.launchpad.yaml
+++ b/.launchpad.yaml
@@ -6,4 +6,8 @@ jobs:
     series: focal
     architectures: amd64
     packages: [pylint, python3, shellcheck, yamllint]
+<<<<<<< .launchpad.yaml
     run: ./ci/lint_test
+=======
+    run: ./ci/lint_test -v
+>>>>>>> .launchpad.yaml
diff --git a/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/build-adt-image b/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/build-adt-image
index be6b4d3..ca9097a 100755
--- a/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/build-adt-image
+++ b/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/build-adt-image
@@ -1,9 +1,11 @@
 #!/bin/bash
 # Build adt cloud images with create-nova-image-new-release for the given
 # cloud, release and arch
+# shellcheck disable=SC1090
 
 set -eu
 
+# shellcheck disable=SC2034
 IFS="[- ]" read -r RELEASE REGION ARCH bootstrap <<< "$@"
 
 if [ -z "${RELEASE}" ] || [ -z "${REGION}" ] || [ -z "${ARCH}" ]; then
@@ -12,8 +14,8 @@ if [ -z "${RELEASE}" ] || [ -z "${REGION}" ] || [ -z "${ARCH}" ]; then
 fi
 
 if [ -z "${MIRROR:-}" ]; then
-        if [ -e ~/mirror-${REGION}.rc ]; then
-                . ~/mirror-${REGION}.rc
+        if [ -e ~/mirror-"${REGION}".rc ]; then
+                . ~/mirror-"${REGION}".rc
         else
                 . ~/mirror.rc
         fi
@@ -24,10 +26,10 @@ export MIRROR
 export NET_NAME
 
 if [ -z "${USE_CLOUD_CONFIG_FROM_ENV:-}" ]; then
-        if [ -e ~/cloudrcs/${REGION}-${ARCH}.rc ]; then
-                . ~/cloudrcs/${REGION}-${ARCH}.rc
+        if [ -e ~/cloudrcs/"${REGION}"-"${ARCH}".rc ]; then
+                . ~/cloudrcs/"${REGION}"-"${ARCH}".rc
         else
-                . ~/cloudrcs/${REGION}.rc
+                . ~/cloudrcs/"${REGION}".rc
         fi
 fi
 
@@ -73,11 +75,12 @@ fi
 
 echo "$REGION-$ARCH: using image $IMG"
 KEYNAME=${KEYNAME:-testbed-$(hostname)}
-$(dirname $0)/create-nova-image-new-release $RELEASE $ARCH $IMG "${KEYNAME}" "$IMAGE_NAME"
+directory=$(dirname "${0}")
+"${directory}"/create-nova-image-new-release "${RELEASE}" "${ARCH}" "${IMG}" "${KEYNAME}" "${IMAGE_NAME}"
 # clean old images
-openstack image list --private -f value | grep --color=none -v "$IMAGE_NAME" | while read id img state; do
-        if $(echo ${img} | grep -qs "adt/ubuntu-${RELEASE}-${ARCH}") && [ ${state} = active ]; then
+openstack image list --private -f value | grep --color=none -v "$IMAGE_NAME" | while read -r id img state; do
+        if echo "${img}" | grep -qs "adt/ubuntu-${RELEASE}-${ARCH}" && [ "${state}" = active ]; then
                 echo "Cleaning up old image $img ($id)"
-                openstack image delete $id
+                openstack image delete "${id}"
         fi
 done
diff --git a/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/cleanup-instances b/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/cleanup-instances
index 14caca8..ba5a64a 100755
--- a/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/cleanup-instances
+++ b/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/cleanup-instances
@@ -1,14 +1,17 @@
 #!/usr/bin/python3
-# clean up broken/orphaned instances
+'''
+clean up broken/orphaned instances
+'''
+# pylint: disable=import-error, invalid-name
 import logging
 import os
 import re
 import socket
 import subprocess
 import time
-from urllib.error import HTTPError
 
-import novaclient.client, novaclient.exceptions
+import novaclient.client
+import novaclient.exceptions
 from influxdb import InfluxDBClient
 from influxdb.exceptions import InfluxDBClientError
 from keystoneauth1 import session
@@ -78,8 +81,8 @@ for instance in nova.servers.list():
         time.strptime(instance.created, "%Y-%m-%dT%H:%M:%SZ")
     )
     logging.debug(
-        "%s: status %s, age %is, networks %s"
-        % (instance.name, instance.status, age, instance.networks)
+        "%s: status %s, age %is, networks %s",
+        instance.name, instance.status, age, instance.networks
     )
 
     # check state
@@ -88,10 +91,8 @@ for instance in nova.servers.list():
             message = str(instance.fault)
         except AttributeError:
             message = "fault message not available"
-        msg = "instance {} ({}) is in error state (message: {})".format(
-            instance.name, instance.id, message
-        )
-        logging.warning("{}, deleting".format(msg))
+        msg = f"instance {instance.name} ({instance.id}) is in error state (message: {message})"
+        logging.warning("%s, deleting", msg)
         measurements.append(
             {
                 "measurement": "autopkgtest_delete_event",
@@ -108,7 +109,6 @@ for instance in nova.servers.list():
             instance.delete()
         except novaclient.exceptions.NotFound:
             logging.warning("Couldn't delete instance: not found")
-            pass
         continue
 
     if not instance.name.startswith("adt-"):
@@ -122,10 +122,9 @@ for instance in nova.servers.list():
 
     # check age
     if age > MAX_AGE:
-        message = "instance {} ({}) is {:.1f} hours old, deleting".format(
-            instance.name, instance.id, (float(age) / 3600)
-        )
-        logging.warning("{}, deleting".format(message))
+        message = f"instance {instance.name} ({instance.id}) is " + \
+                   "{float(age) / 3600} hours old, deleting"
+        logging.warning("%s, deleting", message)
         try:
             instance.delete()
             measurements.append(
@@ -142,26 +141,21 @@ for instance in nova.servers.list():
             )
         except novaclient.exceptions.NotFound:
             logging.warning("Couldn't delete instance: not found")
-            pass
-
     # check matching adt-run process for instance name
     try:
         if (
-            subprocess.call(
-                [
-                    "pgrep",
-                    "-f",
-                    "python.*autopkgtest.* --name %s"
-                    % re.escape(instance.name),
-                ],
-                stdout=subprocess.PIPE,
-            )
-            != 0
+                subprocess.call(
+                    [
+                        "pgrep",
+                        "-f",
+                        f"python.*autopkgtest.* --name {re.escape(instance.name)}",
+                    ],
+                    stdout=subprocess.PIPE,
+                )
+                != 0
         ):
-            message = "instance {} ({}) has no associated autopkgtest".format(
-                instance.name, instance.id
-            )
-            logging.warning("{}, deleting".format(message))
+            message = f"instance {instance.name} ({instance.id}) has no associated autopkgtest"
+            logging.warning("%s, deleting", message)
             try:
                 instance.delete()
                 measurements.append(
@@ -178,13 +172,11 @@ for instance in nova.servers.list():
                 )
             except novaclient.exceptions.NotFound:
                 logging.warning("Couldn't delete instance: not found")
-                pass
     except IndexError:
-        logging.warning("instance %s has invalid name" % instance.name)
+        logging.warning("instance %s has invalid name", instance.name)
 
     if measurements and influx_client:
         try:
             influx_client.write_points(measurements)
         except InfluxDBClientError as err:
-            logging.warning("Write to InfluxDB failed: %s" % err)
-            pass
+            logging.warning("Write to InfluxDB failed: %s", err)
diff --git a/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/cleanup-lxd b/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/cleanup-lxd
index 9b3d376..16790f1 100755
--- a/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/cleanup-lxd
+++ b/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/cleanup-lxd
@@ -1,4 +1,8 @@
 #!/usr/bin/python3
+'''
+Cleans up old lxd containers in autopkgtest-cloud
+'''
+# pylint: disable=invalid-name
 import glob
 import json
 import os
@@ -10,10 +14,12 @@ MINIMUM_AGE_MINS = 60
 
 
 def parse_lxd_time(s):
+    '''Takes time in iso format and converts to a normal format for comparison'''
     return datetime.datetime.fromisoformat(s.split(".")[0] + "+00:00")
 
 
 def check_remote(remote):
+    '''Checks how many containers we have and deletes containers that are too old'''
     now = datetime.datetime.now(datetime.timezone.utc)
     containers = json.loads(
         subprocess.check_output(["lxc", "list", "-fjson", remote + ":"])
@@ -34,7 +40,7 @@ def check_remote(remote):
 
     for container in containers[to_keep:]:
         if now - parse_lxd_time(container["created_at"]) >= datetime.timedelta(
-            minutes=MINIMUM_AGE_MINS
+                minutes=MINIMUM_AGE_MINS
         ):
             print(f"{remote}:{container['name']} is old - deleting", file=sys.stderr)
             subprocess.check_call(
@@ -43,6 +49,7 @@ def check_remote(remote):
 
 
 def main():
+    '''Main wrapper function for the node'''
     if not os.path.exists("/usr/bin/lxc"):
         return 0
 
@@ -53,6 +60,7 @@ def main():
             continue
 
         check_remote(remote)
+    return 0
 
 
 if __name__ == "__main__":
diff --git a/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/copy-security-group b/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/copy-security-group
index 779a556..cb2fa7a 100755
--- a/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/copy-security-group
+++ b/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/copy-security-group
@@ -6,6 +6,7 @@ usage: copy-security-group [--source SOURCE] [--delete-only] NAME
 Copies SOURCE to NAME, after deleting any existing groups called NAME.
 If --delete-only is given, it only deletes existing groups called NAME.
 """
+# pylint: disable=invalid-name, import-error, broad-except
 
 import os
 import argparse
@@ -22,7 +23,7 @@ RULE_MEMBERS_IGNORE = ["id", "tags", "updated_at",
                        "project_id", "tenant_id", ]
 
 
-def main():
+if __name__ == "__main__":
     parser = argparse.ArgumentParser(description='Copy security groups')
     parser.add_argument('name', metavar='NAME', help='name of security group')
     parser.add_argument('--source', default='default',
@@ -33,71 +34,63 @@ def main():
 
     # we get called from ExecStartPre of lxd units too (where
     # copy-security-group isn't required), just bail out if that's the case
-    if 'lxd' in args.name:
-        return
-
-    if os.environ.get('OS_IDENTITY_API_VERSION') == '3':
-        auth = v3.Password(auth_url=os.environ['OS_AUTH_URL'],
-                           username=os.environ['OS_USERNAME'],
-                           password=os.environ['OS_PASSWORD'],
-                           project_name=os.environ['OS_PROJECT_NAME'],
-                           user_domain_name=os.environ['OS_USER_DOMAIN_NAME'],
-                           project_domain_name=os.environ['OS_PROJECT_DOMAIN_NAME'])
-    else:
-        auth = v2.Password(
-            auth_url=os.environ['OS_AUTH_URL'],
-            username=os.environ['OS_USERNAME'],
-            password=os.environ['OS_PASSWORD'],
-            tenant_name=os.environ['OS_TENANT_NAME'])
-
-    sess = session.Session(auth=auth)
-    neutron = client.Client(session=sess,
-                            tenant_name=os.environ.get("OS_TENANT_NAME"),
-                            region_name=os.environ["OS_REGION_NAME"])
-
-    # Find the source group - crashes if it does not exists
-    source = [g for g in neutron.list_security_groups()
-              ['security_groups'] if g['name'] == args.source][0]
-
-    description = "copy {} of {} ({})".format(args.name, args.source,
-                                              source['description'])
-
-    # Delete any existing group with the same name
-    existing_groups = [g for g in
-                       neutron.list_security_groups()['security_groups']
-                       if g['name'] == args.name]
-    existing_ports = neutron.list_ports()['ports']
-    for target in existing_groups:
-        print("Deleting existing group", target)
-        for port in existing_ports:
-            if target['id'] in port['security_groups']:
-                print("Deleting port in group:", target['id'])
-                try:
-                    neutron.delete_port(port['id'])
-                except Exception as e:
-                    print("Could not delete port:", e)
-        neutron.delete_security_group(target['id'])
-
-    if not args.delete_only:
-        print("Creating", description)
-        target = neutron.create_security_group(
-            {'security_group': {'name': args.name,
-                                'description': description}}
-        )["security_group"]
-
-        for rule in target["security_group_rules"]:
-            neutron.delete_security_group_rule(rule["id"])
-
-        for rule in source["security_group_rules"]:
-            rule = {k: v for k, v in rule.items()
-                    if v is not None and
-                    k not in RULE_MEMBERS_IGNORE}
-
-            rule["security_group_id"] = target["id"]
-
-            print("Copying rule", rule)
-            neutron.create_security_group_rule({'security_group_rule': rule})
-
-
-if __name__ == '__main__':
-    main()
+    if 'lxd' not in args.name:
+        if os.environ.get('OS_IDENTITY_API_VERSION') == '3':
+            auth = v3.Password(auth_url=os.environ['OS_AUTH_URL'],
+                               username=os.environ['OS_USERNAME'],
+                               password=os.environ['OS_PASSWORD'],
+                               project_name=os.environ['OS_PROJECT_NAME'],
+                               user_domain_name=os.environ['OS_USER_DOMAIN_NAME'],
+                               project_domain_name=os.environ['OS_PROJECT_DOMAIN_NAME'])
+        else:
+            auth = v2.Password(
+                auth_url=os.environ['OS_AUTH_URL'],
+                username=os.environ['OS_USERNAME'],
+                password=os.environ['OS_PASSWORD'],
+                tenant_name=os.environ['OS_TENANT_NAME'])
+
+        sess = session.Session(auth=auth)
+        neutron = client.Client(session=sess,
+                                tenant_name=os.environ.get("OS_TENANT_NAME"),
+                                region_name=os.environ["OS_REGION_NAME"])
+
+        # Find the source group - crashes if it does not exists
+        source = [g for g in neutron.list_security_groups()
+                  ['security_groups'] if g['name'] == args.source][0]
+
+        description = f'copy {args.name} of {args.source} ({source["description"]})'
+        # Delete any existing group with the same name
+        existing_groups = [g for g in
+                           neutron.list_security_groups()['security_groups']
+                           if g['name'] == args.name]
+        existing_ports = neutron.list_ports()['ports']
+        for target in existing_groups:
+            print("Deleting existing group", target)
+            for port in existing_ports:
+                if target['id'] in port['security_groups']:
+                    print("Deleting port in group:", target['id'])
+                    try:
+                        neutron.delete_port(port['id'])
+                    except Exception as e:
+                        print("Could not delete port:", e)
+            neutron.delete_security_group(target['id'])
+
+        if not args.delete_only:
+            print("Creating", description)
+            target = neutron.create_security_group(
+                {'security_group': {'name': args.name,
+                                    'description': description}}
+            )["security_group"]
+
+            for rule in target["security_group_rules"]:
+                neutron.delete_security_group_rule(rule["id"])
+
+            for rule in source["security_group_rules"]:
+                rule = {k: v for k, v in rule.items()
+                        if v is not None and
+                        k not in RULE_MEMBERS_IGNORE}
+
+                rule["security_group_id"] = target["id"]
+
+                print("Copying rule", rule)
+                neutron.create_security_group_rule({'security_group_rule': rule})
diff --git a/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/create-nova-image-new-release b/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/create-nova-image-new-release
index bc51ff1..8632dcf 100755
--- a/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/create-nova-image-new-release
+++ b/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/create-nova-image-new-release
@@ -1,6 +1,8 @@
 #!/bin/bash
 # create an autopkgtest nova image for a new release, based on a generic image
 # Author: Martin Pitt <martin.pitt@xxxxxxxxxx>
+# shellcheck disable=SC2154
+# shellcheck disable=SC2034
 set -eu
 RELEASE="${1:-}"
 ARCH="${2:-}"
@@ -48,9 +50,9 @@ else
 fi
 
 # unbreak my server option :-(
-userdata=`mktemp`
-trap "rm $userdata" EXIT TERM INT QUIT PIPE
-cat <<EOF > $userdata
+userdata=$(mktemp)
+trap 'rm ${userdata}' EXIT TERM INT QUIT PIPE
+cat <<EOF > "${userdata}"
 #cloud-config
 
 manage_etc_hosts: true
@@ -67,13 +69,17 @@ EOF
 
 # create new instance
 INSTNAME="${BASEIMG}-adt-prepare"
-eval "$(openstack network show -f shell ${NET_NAME})"
+eval "$(openstack network show -f shell "${NET_NAME}")"
 
-NET_ID=${id}
+NET_ID="${id}"
 
 retries=20
 while true; do
+<<<<<<< charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/create-nova-image-new-release
     eval "$(openstack server create -f shell --flavor autopkgtest --image $BASEIMG --user-data $userdata --key-name $KEYNAME --wait $INSTNAME --nic net-id=${NET_ID})"
+=======
+    eval "$(openstack server create -f shell --flavor m1.small --image "${BASEIMG}" --user-data "${userdata}" --key-name "${KEYNAME}" --wait "${INSTNAME}" --nic net-id="${NET_ID}")"
+>>>>>>> charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/create-nova-image-new-release
     if openstack server show "${id}" >/dev/null 2>/dev/null; then
         break
     fi
@@ -90,27 +96,27 @@ done
 
 SRVID="${id}"
 
-trap "openstack server delete ${SRVID}" EXIT TERM INT QUIT PIPE
+trap 'openstack server delete ${SRVID}' EXIT TERM INT QUIT PIPE
 
 # determine IP address
-eval "$(openstack server show -f shell ${SRVID})"
-ipaddr=$(echo ${addresses} | awk 'BEGIN { FS="=" } { print $2 }')
+eval "$(openstack server show -f shell "${SRVID}")"
+ipaddr=$(echo "${addresses}" | awk 'BEGIN { FS="=" } { print $2 }')
 
 SSH_CMD="ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$ipaddr"
 echo "Waiting for ssh (may cause some error messages)..."
 timeout 300 sh -c "while ! $SSH_CMD true; do sleep 5; done"
 
 echo "Waiting until cloud-init is done..."
-timeout 25m $SSH_CMD 'while [ ! -e /var/lib/cloud/instance/boot-finished ]; do sleep 1; done'
+timeout 25m "${SSH_CMD}" 'while [ ! -e /var/lib/cloud/instance/boot-finished ]; do sleep 1; done'
 
 echo "Running setup script..."
-cat "${SETUP_TESTBED}" | $SSH_CMD "sudo env MIRROR='${MIRROR:-}' RELEASE='$RELEASE' sh -"
+"${SSH_CMD}" "sudo env MIRROR='${MIRROR:-}' RELEASE='$RELEASE' sh -" < "${SETUP_TESTBED}"
 
 echo "Running Canonical setup script..."
-CANONICAL_SCRIPT=$(dirname $(dirname $(readlink -f $0)))/worker-config-production/setup-canonical.sh
-cat "$CANONICAL_SCRIPT" | $SSH_CMD "sudo env MIRROR='${MIRROR:-}' RELEASE='$RELEASE' sh -"
+CANONICAL_SCRIPT="$(dirname "$(dirname "$(readlink -f "${0}")")")"/worker-config-production/setup-canonical.sh
+"${SSH_CMD}" "sudo env MIRROR='${MIRROR:-}' RELEASE='$RELEASE' sh -" < "${CANONICAL_SCRIPT}"
 
-arch=$($SSH_CMD dpkg --print-architecture)
+arch=$("${SSH_CMD}" dpkg --print-architecture)
 
 echo "Check that the upgraded image boots..."
 while true; do
@@ -138,10 +144,10 @@ $SSH_CMD sudo journalctl --rotate --vacuum-time=12h || true
 
 echo "Powering off to get a clean file system..."
 $SSH_CMD sudo poweroff || true
-eval "$(openstack server show -f shell ${SRVID})"
-while [ ${os_ext_sts_vm_state} != "stopped" ]; do
+eval "$(openstack server show -f shell "${SRVID}")"
+while [ "${os_ext_sts_vm_state}" != "stopped" ]; do
         sleep 1
-        eval "$(openstack server show -f shell ${SRVID})"
+        eval "$(openstack server show -f shell "${SRVID}")"
 done
 
 echo "Creating image $IMAGE_NAME ..."
@@ -155,8 +161,8 @@ while true; do
     while [ $inner_retries -gt 0 ]; do
         # server image create often loses its connection but it's actually
         # working - if the image is uploading, wait a bit for it to finish
-        eval $(openstack image show -f shell --prefix=image_ "${IMAGE_NAME}")
-        eval $(openstack server show -f shell --prefix=server_ "${SRVID}")
+        eval "$(openstack image show -f shell --prefix=image_ "${IMAGE_NAME}")"
+        eval "$(openstack server show -f shell --prefix=server_ "${SRVID}")"
         if [ "${server_os_ext_sts_task_state}" = "image_uploading" ] ||
            [ "${image_status}" = "saving" ]; then
             echo "image ${IMAGE_NAME} is uploading, waiting..." >&2
diff --git a/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/create-nova-image-with-proposed-package b/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/create-nova-image-with-proposed-package
index d20bbc2..60d1bdd 100755
--- a/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/create-nova-image-with-proposed-package
+++ b/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/create-nova-image-with-proposed-package
@@ -1,28 +1,31 @@
 #!/usr/bin/python3
-# Create a nova image from an existing one with all installed binaries from a
-# given source package upgraded to -proposed.
-# Author: Martin Pitt <martin.pitt@xxxxxxxxxx>
-#
-# Usage: create-nova-image-with-proposed-package <image RE> <proposed source package name>
-#   <image RE> is a regexp that matches a *substring* of the image name; of all
-#   available active matching ones the latest one (by creation date) is taken
-#   as a base image.
-#
-#   <source> is the source package name to test. All of its binaries which have
-#   a newer version available (usually from enabling -proposed) will be
-#   updated.
-#
-# This creates a new image proposed-<source>/<original image base name>.
+'''
+Create a nova image from an existing one with all installed binaries from a
+given source package upgraded to -proposed.
+Author: Martin Pitt <martin.pitt@xxxxxxxxxx>
+
+Usage: create-nova-image-with-proposed-package <image RE> <proposed source package name>
+  <image RE> is a regexp that matches a *substring* of the image name; of all
+  available active matching ones the latest one (by creation date) is taken
+  as a base image.
+
+  <source> is the source package name to test. All of its binaries which have
+  a newer version available (usually from enabling -proposed) will be
+  updated.
+
+This creates a new image proposed-<source>/<original image base name>.
+'''
+# pylint: disable=anomalous-backslash-in-string, invalid-name, import-error, consider-using-f-string, consider-using-with, bad-option-value
 
 import sys
 import os
-import keystoneauth1.loading
-import glanceclient
-from glanceclient.common import utils
 import re
 import tempfile
 import subprocess
 
+import keystoneauth1.loading
+import glanceclient
+from glanceclient.common import utils
 
 def get_glance():
     '''Return glance client object'''
@@ -39,20 +42,26 @@ def get_glance():
 def find_latest_image(img_re):
     '''find latest image that matches given RE'''
 
-    latest = None
-    for img in glance.images.list():
-        if img.status == 'active' and image_re.search(img.name):
-            if latest is None or img.created_at > latest.created_at:
-                latest = img
-    if not latest:
-        sys.stderr.write('No image matched "%s"\n' % sys.argv[1])
+    latest_image = None
+    for image in glance.images.list():
+        if image.status == 'active' and img_re.search(img.name):
+            if latest_image is None or image.created_at > latest_image.created_at:
+                latest_image = image
+    if not latest_image:
+        sys.stderr.write(f'No image matched "{sys.argv[1]}"\n')
         sys.exit(1)
-    return latest
-
-
-def setup_image(image_path, source):
-    # get a chroot shell into the image
-    img_shell = subprocess.Popen(['sudo', '-n', 'mount-image-callback', '--system-mounts', '--system-resolvconf',
+    return latest_image
+
+
+def setup_image(image_path, binary_source):
+    '''
+    get a chroot shell into the image
+    '''
+    img_shell = subprocess.Popen(['sudo',
+                                  '-n',
+                                  'mount-image-callback',
+                                  '--system-mounts',
+                                  '--system-resolvconf',
                                   image_path, 'chroot', '_MOUNTPOINT_', '/bin/sh'],
                                  stdin=subprocess.PIPE)
 
@@ -79,34 +88,33 @@ DEBIAN_FRONTEND=noninteractive apt-get install -y $SRCS
 echo '* Cleaning up'
 apt-get clean
 rm -f /etc/machine-id /usr/sbin/policy-rc.d
-   ''' % {'src': source}).encode())
+   ''' % {'src': binary_source}).encode())
 
     img_shell.stdin.close()
     img_shell.wait()
 
 
-#
-# main
-#
-
-if len(sys.argv) != 3:
-    sys.stderr.write('Usage: %s <image RE> <proposed source package name>\n' % sys.argv[0])
-    sys.exit(1)
-
-image_re = re.compile(sys.argv[1])
-source = sys.argv[2]
-glance = get_glance()
-latest = find_latest_image(image_re)
-
-print('* Downloading image %s (UUID: %s)...' % (latest.name, latest.id))
-workdir = tempfile.TemporaryDirectory(prefix='make-image-with-proposed-package.')
-img = os.path.join(workdir.name, 'image')
-utils.save_image(glance.images.data(latest.id), img)
-
-setup_image(img, source)
+if __name__ == "__main__":
+    if len(sys.argv) != 3:
+        sys.stderr.write('Usage: %s <image RE> <proposed source package name>\n' % sys.argv[0])
+        sys.exit(1)
 
-newimg_name = 'proposed-%s/%s' % (source, os.path.basename(latest.name))
-newimg = glance.images.create(name=newimg_name, disk_format=latest.disk_format, container_format=latest.container_format)
-print('* Uploading new image %s (UUID: %s)...' % (newimg.name, newimg.id))
-with open(img, 'rb') as f:
-    glance.images.upload(newimg.id, f)
+    image_re = re.compile(sys.argv[1])
+    source = sys.argv[2]
+    glance = get_glance()
+    latest = find_latest_image(image_re)
+
+    print('* Downloading image %s (UUID: %s)...' % (latest.name, latest.id))
+    workdir = tempfile.TemporaryDirectory(prefix='make-image-with-proposed-package.')
+    img = os.path.join(workdir.name, 'image')
+    utils.save_image(glance.images.data(latest.id), img)
+
+    setup_image(img, source)
+
+    newimg_name = 'proposed-%s/%s' % (source, os.path.basename(latest.name))
+    newimg = glance.images.create(name=newimg_name,
+                                  disk_format=latest.disk_format,
+                                  container_format=latest.container_format)
+    print('* Uploading new image %s (UUID: %s)...' % (newimg.name, newimg.id))
+    with open(img, 'rb') as f:
+        glance.images.upload(newimg.id, f)
diff --git a/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/create-test-instances b/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/create-test-instances
old mode 100755
new mode 100644
index f0b7125..c057ce8
--- a/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/create-test-instances
+++ b/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/create-test-instances
@@ -13,5 +13,5 @@ IMAGE=$(openstack image list | grep "adt/ubuntu-$DEVEL-$ARCH" | cut -d' ' -f2)
 NET_ID=$(openstack network list | grep 'net_prod-proposed-migration' | cut -d' ' -f2)
 
 for i in $(seq 1 10); do
-    openstack server create --image $IMAGE --flavor cpu4-ram8-disk50 --nic net-id=$NET_ID -- "creation-test-$i"
+    openstack server create --image "${IMAGE}" --flavor cpu4-ram8-disk50 --nic net-id="${NET_ID}" -- "creation-test-${i}"
 done
diff --git a/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/ensure-keypair b/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/ensure-keypair
index be664d6..31b7d32 100755
--- a/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/ensure-keypair
+++ b/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/ensure-keypair
@@ -2,12 +2,14 @@
 
 set -eu
 
+# shellcheck disable=SC2034
 IFS="[- ]" read -r RELEASE REGION ARCH bootstrap <<< "$@"
 
-if [ -e ~/cloudrcs/${REGION}-${ARCH}.rc ]; then
-        . ~/cloudrcs/${REGION}-${ARCH}.rc
+# shellcheck disable=SC1090
+if [ -e ~/cloudrcs/"${REGION}"-"${ARCH}".rc ]; then
+        . ~/cloudrcs/"${REGION}"-"${ARCH}".rc
 else
-        . ~/cloudrcs/${REGION}.rc
+        . ~/cloudrcs/"${REGION}".rc
 fi
 
 if ! [ -e "${HOME}/.ssh/id_rsa" ]; then
diff --git a/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/exec-in-region b/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/exec-in-region
index 0261108..2e78e83 100755
--- a/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/exec-in-region
+++ b/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/exec-in-region
@@ -1,5 +1,6 @@
 #!/bin/sh
 # usage: exec-in-region <region name> <command> <argument>...
+# shellcheck disable=SC1090
 
 set -e
 
@@ -25,7 +26,7 @@ export REGION
 if [ "${REGION#lxd-}" != "$REGION" ]; then
 	LXD_ARCH=${REGION#*-}; LXD_ARCH=${LXD_ARCH%%-*}
 else
-	. ${HOME}/cloudrcs/${REGION}.rc
+	. "${HOME}"/cloudrcs/"${REGION}".rc
 fi
 
 exec "$@"
diff --git a/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/filter-amqp b/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/filter-amqp
index f6dda5a..cc7255f 100755
--- a/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/filter-amqp
+++ b/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/filter-amqp
@@ -1,15 +1,20 @@
 #!/usr/bin/python3
-# Filter out AMQP requests that match a given regex
+'''
+Filter out AMQP requests that match a given regex
+'''
+# pylint: disable=invalid-name, import-error, deprecated-module
 
 import logging
 import optparse
-import sys
 import re
 import urllib.parse
 import amqplib.client_0_8 as amqp
 
 
 def filter_amqp(options, host, queue_name, regex):
+    '''
+    Checks amqp queue for strings with a given regex
+    '''
     url_parts = urllib.parse.urlsplit(host, allow_fragments=False)
     filter_re = re.compile(regex.encode('UTF-8'))
     amqp_con = amqp.Connection(url_parts.hostname, userid=url_parts.username,
@@ -33,7 +38,7 @@ def filter_amqp(options, host, queue_name, regex):
                 ch.basic_ack(r.delivery_tag)
 
 
-def main():
+if __name__ == "__main__":
     parser = optparse.OptionParser(
         usage="usage: %prog [options] amqp://user:pass@host queue_name regex")
     parser.add_option(
@@ -52,6 +57,3 @@ def main():
         parser.error("Need to specify host, queue and regex")
 
     filter_amqp(opts, args[0], args[1], args[2])
-
-if __name__ == '__main__':
-    main()
diff --git a/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/filter-amqp-dupes-upstream b/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/filter-amqp-dupes-upstream
index 965fa10..f90293d 100755
--- a/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/filter-amqp-dupes-upstream
+++ b/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/filter-amqp-dupes-upstream
@@ -1,18 +1,19 @@
 #!/usr/bin/python3
-# Filter out all but the latest request for a given upstream PR
+'''
+Filter out all but the latest request for a given upstream PR
+'''
+# pylint: disable=invalid-name, deprecated-module, import-error, no-member, too-many-locals, logging-not-lazy
 
-import dateutil.parser
-import distro_info
 import json
 import logging
 import optparse
 import os
-import sys
-import re
 import urllib.parse
+from collections import defaultdict
+import distro_info
+import dateutil.parser
 import amqplib.client_0_8 as amqp
 
-from collections import defaultdict
 
 UDI = distro_info.UbuntuDistroInfo()
 ALL_UBUNTU_RELEASES = UDI.all
@@ -21,12 +22,14 @@ SUPPORTED_UBUNTU_RELEASES = sorted(
 )
 
 def filter_amqp(options, host):
+    '''Filters the contents of the amqp queue'''
     url_parts = urllib.parse.urlsplit(host, allow_fragments=False)
     amqp_con = amqp.Connection(url_parts.hostname, userid=url_parts.username,
                                password=url_parts.password)
     dry_run = '[dry-run] ' if options.dry_run else ''
 
-    queues = (f'debci-upstream-{release}-{arch}' for release in SUPPORTED_UBUNTU_RELEASES for arch in ('amd64', 'arm64', 'armhf', 'i386', 'ppc64el', 's390x'))
+    queues = (f'debci-upstream-{release}-{arch}' for release in SUPPORTED_UBUNTU_RELEASES \
+              for arch in ('amd64', 'arm64', 'armhf', 'i386', 'ppc64el', 's390x'))
     for queue_name in queues:
         ch = amqp_con.channel()
         logging.debug('Looking at %s', queue_name)
@@ -38,7 +41,7 @@ def filter_amqp(options, host):
                 (code, _, _, _) = e.args
                 if code != 404:
                     raise
-                logging.debug(f'No such queue {queue_name}')
+                logging.debug('No such queue %s', queue_name)
                 break
             if r is None:
                 break
@@ -49,22 +52,25 @@ def filter_amqp(options, host):
             (pkg, params) = body.split(' ', 1)
             params_j = json.loads(params)
             submit_time = dateutil.parser.parse(params_j['submit-time'])
-            pr = [val.split('=', 1)[1] for val in params_j['env'] if val.startswith('UPSTREAM_PULL_REQUEST')][0]
+            pr = [val.split('=', 1)[1] for val in params_j['env'] \
+                  if val.startswith('UPSTREAM_PULL_REQUEST')][0]
             try:
                 (delivery_tag, old_submit_time) = seen[pkg][pr]
                 if old_submit_time <= submit_time:
-                    logging.info(f'{dry_run}We have seen PR {pr} in {queue_name} before: acking the previous request')
+                    logging.info('%sWe have seen PR %s ' + \
+                                 'in %s before: acking the previous request',
+                                 dry_run, pr, queue_name)
                     if not options.dry_run:
                         ch.basic_ack(delivery_tag)  # delivery tag, the old one NOT r.delivery_tag!
                 del seen[pkg][pr]
             except KeyError:
                 pass
             finally:
-                logging.debug(f'Recording {pkg}/{pr} for {queue_name}')
+                logging.debug('Recording %s/%s for %s', pkg, pr, queue_name)
                 seen[pkg][pr] = (r.delivery_tag, submit_time)
 
 
-def main():
+if __name__ == "__main__":
     parser = optparse.OptionParser(
         usage="usage: %prog [options] amqp://user:pass@host queue_name regex")
     parser.add_option(
@@ -81,9 +87,6 @@ def main():
 
     user = os.environ['RABBIT_USER']
     password = os.environ['RABBIT_PASSWORD']
-    host = os.environ['RABBIT_HOST']
-    uri = f'amqp://{user}:{password}@{host}'
+    hostname = os.environ['RABBIT_HOST']
+    uri = f'amqp://{user}:{password}@{hostname}'
     filter_amqp(opts, uri)
-
-if __name__ == '__main__':
-    main()
diff --git a/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/metrics b/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/metrics
index e7c552f..403b3fe 100755
--- a/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/metrics
+++ b/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/metrics
@@ -1,12 +1,16 @@
 #!/usr/bin/python3
-
-from gi.repository import GLib, Gio
-from influxdb import InfluxDBClient
+'''
+Writes metrics to influxdb database
+'''
+# pylint: disable=fixme
 
 import json
 import os
 import subprocess
 
+from gi.repository import GLib, Gio
+from influxdb import InfluxDBClient
+
 SYSTEM_BUS = Gio.bus_get_sync(Gio.BusType.SYSTEM)
 
 INFLUXDB_CONTEXT = os.environ["INFLUXDB_CONTEXT"]
@@ -18,10 +22,13 @@ INFLUXDB_USERNAME = os.environ["INFLUXDB_USERNAME"]
 
 
 def make_submission(counts, measurement):
+    '''
+    makes submission request based on containers and units
+    '''
     out = []
     for arch in counts:
         (active, error) = counts[arch]
-        m = {
+        measure = {
             "measurement": measurement,
             "fields": {"count": active},
             "tags": {
@@ -30,8 +37,8 @@ def make_submission(counts, measurement):
                 "instance": INFLUXDB_CONTEXT,
             },
         }
-        out.append(m)
-        m = {
+        out.append(measure)
+        measure = {
             "measurement": measurement,
             "fields": {"count": error},
             "tags": {
@@ -40,11 +47,14 @@ def make_submission(counts, measurement):
                 "instance": INFLUXDB_CONTEXT,
             },
         }
-        out.append(m)
+        out.append(measure)
     return out
 
 
 def get_units():
+    '''
+    gets units in autopkgtest-cloud env - for unit definition, see juju docs
+    '''
     counts = {}
 
     (units,) = SYSTEM_BUS.call_sync(
@@ -84,10 +94,10 @@ def get_units():
             continue
 
         try:
-            (region, arch, n) = name_cloud.split("-", -1)
+            (_, arch, _) = name_cloud.split("-", -1)
         except ValueError:
             # autopkgtest@lcy01-1.service
-            (region, n) = name_cloud.split("-", -1)
+            (_, _) = name_cloud.split("-", -1)
             arch = "amd64"
         (active, error) = counts.setdefault(arch, (0, 0))
 
@@ -101,17 +111,20 @@ def get_units():
 
 
 def get_remotes():
+    '''
+    Gets list of remote containers in autopkgtest-cloud
+    '''
     cluster_counts = {}
     noncluster_counts = {}
     out = subprocess.check_output(
         ["lxc", "remote", "list", "--format=json"], universal_newlines=True
     )
 
-    for r in json.loads(out):
-        if not r.startswith("lxd"):
+    for req in json.loads(out):
+        if not req.startswith("lxd"):
             continue
 
-        (_, arch, ip) = r.split("-", 3)
+        (_, arch, _) = req.split("-", 3)
         (cluster_active, cluster_error) = cluster_counts.setdefault(
             arch, (0, 0)
         )
@@ -120,12 +133,12 @@ def get_remotes():
         )
 
         try:
-            cl = subprocess.check_output(
-                ["lxc", "cluster", "list", f"{r}:", "--format=json"],
+            cluster_list = subprocess.check_output(
+                ["lxc", "cluster", "list", f"{req}:", "--format=json"],
                 stderr=subprocess.DEVNULL,
                 universal_newlines=True,
             )
-            for node in json.loads(cl):
+            for node in json.loads(cluster_list):
                 if node["status"] == "Online":
                     cluster_active += 1
                 else:
@@ -134,7 +147,7 @@ def get_remotes():
         except subprocess.CalledProcessError:  # it's not a cluster node
             try:
                 subprocess.check_call(
-                    ["lxc", "list", f"{r}:"],
+                    ["lxc", "list", f"{req}:"],
                     stdout=subprocess.DEVNULL,
                     stderr=subprocess.DEVNULL,
                     timeout=30,
diff --git a/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/retry-github-test b/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/retry-github-test
index e47ecc2..4afe672 100755
--- a/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/retry-github-test
+++ b/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/retry-github-test
@@ -1,5 +1,6 @@
 #!/usr/bin/python3
 '''Retry a GitHub PR test request to autopkgtest.ubuntu.com'''
+# pylint: disable=invalid-name
 
 import os
 import sys
@@ -13,7 +14,8 @@ p = argparse.ArgumentParser(description='Retry a GitHub PR test request to autop
 p.add_argument('pr_api_url',
                help='GitHub PR API URL (e. g. https://api.github.com/repos/JoeDev/coolproj/pulls/1')
 p.add_argument('test_url',
-               help='autopkgtest URL (https://autopkgtest.ubuntu.com/request.cgi?release=xenial&arch=i386&;...)')
+               help='autopkgtest URL (https://autopkgtest.ubuntu.com/' + \
+                    'request.cgi?release=xenial&arch=i386&...)')
 p.add_argument('secret_file', type=argparse.FileType('rb'),
                help='Path to the GitHub secret for this test web hook')
 args = p.parse_args()
@@ -35,6 +37,6 @@ try:
     with urllib.request.urlopen(req) as f:
         print(f.read().decode())
 except urllib.error.HTTPError as e:
-    sys.stderr.write('Request failed with code %i: %s' % (e.code, e.msg))
+    sys.stderr.write(f'Request failed with code {e.code}: {e.msg}')
     sys.stderr.write(e.fp.read().decode('UTF-8', 'replace'))
     sys.exit(1)
diff --git a/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/run-autopkgtest b/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/run-autopkgtest
index a09ad42..ca19be2 100755
--- a/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/run-autopkgtest
+++ b/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/run-autopkgtest
@@ -1,7 +1,10 @@
 #!/usr/bin/python3
-# Request runs of autopkgtests for packages
-# Imported from lp:ubuntu-archive-scripts, lightly modified to not rely on a
-# britney config file, to be used for administration or testing.
+'''
+Request runs of autopkgtests for packages
+Imported from lp:ubuntu-archive-scripts, lightly modified to not rely on a
+britney config file, to be used for administration or testing.
+'''
+# pylint: disable=invalid-name, import-error
 
 from datetime import datetime
 import os
@@ -122,52 +125,52 @@ def parse_args():
     # verify syntax of triggers
     for t in args.trigger:
         try:
-            (src, ver) = t.split("/")
+            (_, _) = t.split("/")
         except ValueError:
             parser.error(
-                'Invalid trigger format "%s", must be "sourcepkg/version"' % t
+                f'Invalid trigger format "{t}", must be "sourcepkg/version"'
             )
 
     # verify syntax of PPAs
     for t in args.ppa:
         try:
-            (user, name) = t.split("/")
+            (_, _) = t.split("/")
         except ValueError:
             parser.error(
-                'Invalid ppa format "%s", must be "lpuser/ppaname"' % t
+                f'Invalid ppa format "{t}", must be "lpuser/ppaname"'
             )
 
     return args
 
 
 if __name__ == "__main__":
-    args = parse_args()
+    arguments = parse_args()
 
     context = ""
     params = {}
-    if args.bulk:
+    if arguments.bulk:
         context = "huge-"
-    if args.trigger:
-        params["triggers"] = args.trigger
-    if args.ppa:
-        params["ppas"] = args.ppa
+    if arguments.trigger:
+        params["triggers"] = arguments.trigger
+    if arguments.ppa:
+        params["ppas"] = arguments.ppa
         context = "ppa-"
-    if args.env:
-        params["env"] = args.env
-    if args.test_git:
-        params["test-git"] = args.test_git
+    if arguments.env:
+        params["env"] = arguments.env
+    if arguments.test_git:
+        params["test-git"] = arguments.test_git
         context = "upstream-"
-    elif args.build_git:
-        params["build-git"] = args.build_git
+    elif arguments.build_git:
+        params["build-git"] = arguments.build_git
         context = "upstream-"
-    if args.test_bzr:
-        params["test-bzr"] = args.test_bzr
+    if arguments.test_bzr:
+        params["test-bzr"] = arguments.test_bzr
         context = "upstream-"
-    if args.swiftuser:
-        params["swiftuser"] = args.swiftuser
-    if args.readable_by:
-        params["readable-by"] = args.readable_by
-    if args.all_proposed:
+    if arguments.swiftuser:
+        params["swiftuser"] = arguments.swiftuser
+    if arguments.readable_by:
+        params["readable-by"] = arguments.readable_by
+    if arguments.all_proposed:
         params["all-proposed"] = True
     try:
         params["requester"] = os.environ["SUDO_USER"]
@@ -180,37 +183,34 @@ if __name__ == "__main__":
 
     try:
         creds = urllib.parse.urlsplit(
-            "amqp://{user}:{password}@{host}".format(
-                user=os.environ["RABBIT_USER"],
-                password=os.environ["RABBIT_PASSWORD"],
-                host=os.environ["RABBIT_HOST"],
-            ),
+            f'amqp://{os.environ["RABBIT_USER"]}:' + \
+            f'{os.environ["RABBIT_PASSWORD"]}@' + \
+            f'{os.environ["RABBIT_HOST"]}',
             allow_fragments=False,
         )
     except KeyError:
-        with open(os.path.expanduser("~/rabbitmq.cred"), "r") as f:
+        with open(os.path.expanduser("~/rabbitmq.cred"), "r", encoding='utf-8') as f:
             env_dict = dict(
                 tuple(line.replace("\n", "").replace('"', "").split("="))
                 for line in f.readlines()
                 if not line.startswith("#")
             )
         creds = urllib.parse.urlsplit(
-            "amqp://{user}:{password}@{host}".format(
-                user=env_dict["RABBIT_USER"],
-                password=env_dict["RABBIT_PASSWORD"],
-                host=env_dict["RABBIT_HOST"],
-            ),
+            f'amqp://{env_dict["RABBIT_USER"]}:' + \
+            f'{env_dict["RABBIT_PASSWORD"]}@' + \
+            f'{env_dict["RABBIT_HOST"]}',
             allow_fragments=False,
         )
     assert creds.scheme == "amqp"
 
     with amqp.Connection(
-        creds.hostname, userid=creds.username, password=creds.password
+            creds.hostname, userid=creds.username, password=creds.password
     ) as amqp_con:
         with amqp_con.channel() as ch:
-            for arch in args.architecture:
-                queue = "debci-%s%s-%s" % (context, args.series, arch)
-                for pkg in args.package:
+            for arch in arguments.architecture:
+                queue = f'debci-{context}{arguments.series}-{arch}' % \
+                        (context, arguments.series, arch)
+                for pkg in arguments.package:
                     ch.basic_publish(
                         amqp.Message(
                             pkg + params, delivery_mode=2
diff --git a/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/with-distributed-lock b/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/with-distributed-lock
index dd4b1c8..60915e1 100755
--- a/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/with-distributed-lock
+++ b/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/with-distributed-lock
@@ -1,7 +1,7 @@
 #!/usr/bin/python3
-# Run a command while holding a distributed amqp lock
-#
+# pylint: disable=import-error, invalid-name, unused-argument
 """
+Run a command while holding a distributed amqp lock
 Usage: with-distributed-lock <lock name> <command> <argument>...
 
 Generates a RabbitMQ single active consumer queue named by the lock,
diff --git a/ci/lint_test b/ci/lint_test
index e52edc4..06a4133 100755
--- a/ci/lint_test
+++ b/ci/lint_test
@@ -1,5 +1,5 @@
 #!/usr/bin/python3
-# pylint: disable = invalid-name, broad-except, subprocess-run-check
+# pylint: disable = broad-except, unnecessary-dict-index-lookup, bad-option-value
 '''
 Script to lint the scripts in the autopkgtest-cloud repository in CI
 '''
@@ -8,33 +8,33 @@ import os
 import sys
 import logging
 import subprocess
+import argparse
 
 
-def check_for_extension(input_list, output_list, extension):
+def check_for_extension(input_list, output_list, file_extension):
     '''
-    Checks filepaths in a list for a given extension
+    Checks filepaths in a list for a given file_extension
     '''
-    for a in input_list:
-        if os.path.isfile(a):
-            # if str(a)[-3:] == extension:
-            if extension in str(a)[-6:]:
-                output_list.append(str(a))
+    for filepath in input_list:
+        if os.path.isfile(filepath):
+            if file_extension in str(filepath)[-6:]:
+                output_list.append(str(filepath))
     return output_list
 
 
-def check_for_shebang(input_list, output_list, shebang):
+def check_for_shebang(input_list, output_list, shebang_for_check):
     '''
-    Checks filepaths in a given list for a given shebang
+    Checks filepaths in a given list for a given shebang_for_check
     '''
-    for b in input_list:
-        if os.path.isfile(b):
+    for filepath in input_list:
+        if os.path.isfile(filepath):
             try:
-                with open(b, 'r', encoding='utf-8') as myfile:
+                with open(filepath, 'r', encoding='utf-8') as myfile:
                     file = myfile.read()
                     into_list = file.splitlines()
                     if len(into_list) > 1:
-                        if into_list[0] == shebang:
-                            output_list.append(str(b))
+                        if into_list[0] == shebang_for_check:
+                            output_list.append(str(filepath))
             except Exception as _:
                 pass
     return output_list
@@ -44,42 +44,56 @@ def remove_list_from_list(input_list, remove_list):
     '''
     Removes elements from remove_list from input_list
     '''
-    for ff in input_list:
-        if os.path.isfile(ff):
-            if str(ff) in remove_list:
-                input_list.remove(ff)
+    for list_elem in input_list:
+        if os.path.isfile(list_elem):
+            if str(list_elem) in remove_list:
+                input_list.remove(list_elem)
     return input_list
 
 
 def run_lint_command(files_to_lint, lint_command, arguments=None):
     '''
-    Runs a given lint command over a list of filepaths and stores output
+    Runs given lint commands over a list of filepaths and stores output
     and exit code
     '''
-    exit_codes = 0
-    lint_output = ""
-    # check lint command exists
-    for f in files_to_lint:
-        if arguments is None:
-            cmd = [lint_command, f]
-            result = subprocess.run(cmd, stdout=subprocess.PIPE)
-        else:
-            cmd = [lint_command]
-            for arg in arguments.split(" "):
-                cmd.append(arg)
-            cmd.append(f)
-            result = subprocess.run(cmd, stdout=subprocess.PIPE)
-        lint_output += result.stdout.decode("utf-8") + "\n"
-        exit_codes += result.returncode
-    return lint_output, exit_codes
+    exit_codes = []
+    lint_output = []
+    lint_success = True
+    check_for_cmd = subprocess.run(["which", lint_command], stdout=subprocess.PIPE, check=False)
+    if check_for_cmd.returncode != 0:
+        logger.error("%s not present on system - please amend before using this script.",
+                     lint_command)
+        sys.exit(1)
+    for file in files_to_lint:
+        if ".git" not in file:
+            if arguments is None:
+                cmd = [lint_command, file]
+                result = subprocess.run(cmd, stdout=subprocess.PIPE, check=False)
+            else:
+                cmd = [lint_command]
+                for arg in arguments.split(" "):
+                    cmd.append(arg)
+                cmd.append(file)
+                result = subprocess.run(cmd, stdout=subprocess.PIPE, check=False)
+            lint_output.append(result.stdout.decode("utf-8") + "\n")
+            exit_codes.append(result.returncode)
+            if result.returncode != 0:
+                lint_success = False
+    return lint_output, exit_codes, lint_success
 
 
-if __name__=="__main__":
+if __name__ == "__main__":
+    # pylint: disable=invalid-name
+    parser = argparse.ArgumentParser(description="Args for lint test")
+    parser.add_argument('-v',
+                        '--verbose',
+                        help="Verbose output from lint test (y/n)",
+                        action='store_true')
+    args = parser.parse_args()
     logging.basicConfig(level=logging.INFO)
     logger = logging.getLogger('autopkgtest-cloud-linter')
 
-    start_dir = "../"
-    repo_dir = pathlib.Path(start_dir)
+    repo_dir = pathlib.Path("../")
     repo_dir.rglob("*")
 
     final_list_of_python_files = []
@@ -90,25 +104,28 @@ if __name__=="__main__":
             "files": [],
             "extensions": [".py"],
             "shebangs": ["#!/usr/bin/python3"],
-            "args": None,
-            "output": "",
-            "code": 0
+            "args": "--disable=E0012",
+            "output": [],
+            "code": [],
+            "success": False
         },
         "shellcheck": {
             "files": [],
             "extensions": [".sh", ".bash"],
             "shebangs": ["#!/bin/bash", "#!/bin/sh"],
             "args": None,
-            "output": "",
-            "code": 0
+            "output": [],
+            "code": [],
+            "success": False
         },
         'yamllint': {
             "files": ["../"],
             "extensions": None,
             "shebangs": None,
             "args": "--no-warnings",
-            "output": "",
-            "code": 0
+            "output": [],
+            "code": [],
+            "success": False
         }
     }
 
@@ -122,19 +139,23 @@ if __name__=="__main__":
                 data[key]["files"] = check_for_shebang(all_files, data[key]["files"], shebang)
                 all_files = remove_list_from_list(all_files, data[key]["files"])
         data[key]["output"], \
-        data[key]["code"] = run_lint_command(data[key]["files"], key, data[key]["args"])
-    ecodesum = 0
-    for _, oec in data.items():
-        ecodesum += oec["code"]
-    if ecodesum > 0:
+        data[key]["code"], \
+        data[key]["success"] = run_lint_command(data[key]["files"], key, data[key]["args"])
+
+    exit_code_sum = 0
+    for _, oec_s in data.items():
+        for e_c in oec_s["code"]:
+            exit_code_sum += e_c
+    if exit_code_sum > 0:
         for key, item in data.items():
-            if item["code"] > 0:
-                # logger.info("%s output: \n%s", key, item["output"])
+            if not item["success"]:
                 logger.info("%s failed!", key)
-        # sys.exit(1)
-        # temporary exit code, will be set back to 1 when python and bash scripts have been linted
-        # right now we are just checking yaml files
-        if key == "yamllint" and item["code"] != 0:
-            sys.exit(1)
-        sys.exit(0)
-    sys.exit(0)
\ No newline at end of file
+                if args.verbose:
+                    for i in range(len(item["code"])):
+                        if item["code"][i] != 0:
+                            logger.info("%s", item["output"][i])
+            else:
+                logger.info("%s passed!", key)
+        sys.exit(1)
+    logger.info("All the following linting tests passed: %s\n", list(data.keys()))
+    sys.exit(0)
diff --git a/docs/conf.py b/docs/conf.py
index a46ffdc..b4b923c 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -1,7 +1,7 @@
 '''
 Configuration file for the Sphinx documentation builder.
 '''
-#pylint: disable=redefined-builtin
+#pylint: disable=redefined-builtin, invalid-name
 #
 # This file only contains a selection of the most common options. For a full
 # list see the documentation:
diff --git a/lxc-slave-admin/cmd b/lxc-slave-admin/cmd
index ff991ff..0700964 100755
--- a/lxc-slave-admin/cmd
+++ b/lxc-slave-admin/cmd
@@ -1,6 +1,6 @@
 #!/bin/sh
 set -e
-MYDIR=`dirname $0`
+MYDIR=$(dirname "${0}")
 
 if [ -z "$1" ]; then
     echo "Usage: $0 <hosts> <commands or .commands file>" >&2
@@ -8,11 +8,11 @@ if [ -z "$1" ]; then
 fi
 
 if [ "$1" = "all" ]; then
-    for f in $MYDIR/*.hosts; do
+    for f in "${MYDIR}"/*.hosts; do
         hosts="$hosts -h $f";
     done
 else
-    if [ -e ${1} ]; then
+    if [ -e "${1}" ]; then
         hosts="-h ${1}"
     elif [ -e "${1}.hosts" ]; then
         hosts="-h ${1}.hosts"
@@ -29,8 +29,8 @@ if [ "${1%.commands}" != "$1" ]; then
         exit 1
     fi
     # command file
-    cat "$1" | parallel-ssh -x "-F $MYDIR/ssh_config" -OUserKnownHostsFile=/dev/null -OStrictHostKeyChecking=no -OIdentitiesOnly=yes $hosts -p8 -t 0 -i -I
+    parallel-ssh -x "-F ${MYDIR}/ssh_config" -OUserKnownHostsFile=/dev/null -OStrictHostKeyChecking=no -OIdentitiesOnly=yes "${hosts}" -p8 -t 0 -i -I < "${1}"
 else
     # command
-    parallel-ssh -x "-F $MYDIR/ssh_config" -OUserKnownHostsFile=/dev/null -OStrictHostKeyChecking=no -OIdentitiesOnly=yes $hosts -p8 -t 0 -i -- "$@"
+    parallel-ssh -x "-F ${MYDIR}/ssh_config" -OUserKnownHostsFile=/dev/null -OStrictHostKeyChecking=no -OIdentitiesOnly=yes "${hosts}" -p8 -t 0 -i -- "$@"
 fi
diff --git a/mojo/make-lxd-secgroup b/mojo/make-lxd-secgroup
index 634e598..1c1d961 100755
--- a/mojo/make-lxd-secgroup
+++ b/mojo/make-lxd-secgroup
@@ -1,5 +1,5 @@
 #!/bin/sh
-
+# shellcheck disable=SC1090
 set -eu
 
 # there's apparently no way to get this dynamically
@@ -24,4 +24,4 @@ done
 
 if [ -n "${ROUTER_IP:-}" ]; then
         nova secgroup-add-rule lxd tcp 8443 8443 "${ROUTER_IP}/32" 2>/dev/null || true  # perhaps it already existed
-fi
+fi
\ No newline at end of file
diff --git a/mojo/postdeploy b/mojo/postdeploy
index 0f857ae..4b0f88f 100755
--- a/mojo/postdeploy
+++ b/mojo/postdeploy
@@ -11,5 +11,6 @@ if [ "${MOJO_STAGE_NAME}" == "staging" ]; then
 fi
 
 echo "Setting up the floating IP address of the front end..."
-$(dirname $0)/add-floating-ip haproxy
-$(dirname $0)/add-floating-ip rabbitmq-server
+directory=$(dirname "{0}")
+"${directory}"/add-floating-ip haproxy
+"${directory}"/add-floating-ip rabbitmq-server