canonical-ubuntu-qa team mailing list archive
-
canonical-ubuntu-qa team
-
Mailing list archive
-
Message #00562
[Merge] ~andersson123/autopkgtest-cloud:lint_autopkgtest-cloud-worker into autopkgtest-cloud:master
Tim Andersson has proposed merging ~andersson123/autopkgtest-cloud:lint_autopkgtest-cloud-worker into autopkgtest-cloud:master.
Commit message:
Lint only autopkgtest-cloud-worker for easier testing
Requested reviews:
Canonical's Ubuntu QA (canonical-ubuntu-qa)
For more details, see:
https://code.launchpad.net/~andersson123/autopkgtest-cloud/+git/autopkgtest-cloud/+merge/444163
Lint only autopkgtest-cloud-worker for easier testing
--
Your team Canonical's Ubuntu QA is requested to review the proposed merge of ~andersson123/autopkgtest-cloud:lint_autopkgtest-cloud-worker into autopkgtest-cloud:master.
diff --git a/.launchpad.yaml b/.launchpad.yaml
index 6dca924..4f6669e 100755
--- a/.launchpad.yaml
+++ b/.launchpad.yaml
@@ -6,4 +6,8 @@ jobs:
series: focal
architectures: amd64
packages: [pylint, python3, shellcheck, yamllint]
+<<<<<<< .launchpad.yaml
run: ./ci/lint_test
+=======
+ run: ./ci/lint_test -v
+>>>>>>> .launchpad.yaml
diff --git a/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/create-test-instances b/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/tools/create-test-instances
old mode 100755
new mode 100644
diff --git a/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/worker-config-production/setup-canonical.sh b/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/worker-config-production/setup-canonical.sh
index 93d48d8..2e4cd32 100644
--- a/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/worker-config-production/setup-canonical.sh
+++ b/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/worker-config-production/setup-canonical.sh
@@ -1,4 +1,6 @@
+#!/bin/sh
# Canonical/Ubuntu specific testbed setup
+# shellcheck disable=SC2230
# Remove trailing dot from the machine fqdn.
# Workaround for LP: #2019472.
@@ -70,6 +72,7 @@ if type iptables >/dev/null 2>&1; then
iptables -w -t mangle -A FORWARD -p tcp --tcp-flags SYN,RST SYN -j TCPMSS --clamp-mss-to-pmtu || true
EOF
chmod 755 /etc/rc.local
+ # shellcheck disable=SC1091
. /etc/rc.local
fi
diff --git a/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/worker/worker b/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/worker/worker
index 4ca5f88..5510c4e 100755
--- a/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/worker/worker
+++ b/charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/worker/worker
@@ -1,9 +1,11 @@
#!/usr/bin/python3
-# autopkgtest cloud worker
-# Author: Martin Pitt <martin.pitt@xxxxxxxxxx>
-#
-# Requirements: python3-amqplib python3-swiftclient python3-influxdb
-# Requirements for running autopkgtest from git: python3-debian libdpkg-perl
+#pylint: disable=invalid-name, fixme, consider-using-f-string, too-many-lines, import-error, too-many-arguments, consider-using-with, protected-access, logging-not-lazy, format-string-without-interpolation, line-too-long, anomalous-backslash-in-string, missing-function-docstring, global-statement, unused-argument, redefined-builtin, used-before-assignment, too-many-locals, redefined-outer-name, unused-variable, bad-except-order, too-many-nested-blocks, too-many-return-statements, too-many-branches, too-many-statements, bad-option-value
+'''
+autopkgtest cloud worker
+Author: Martin Pitt <martin.pitt@xxxxxxxxxx>
+Requirements: python3-amqplib python3-swiftclient python3-influxdb
+Requirements for running autopkgtest from git: python3-debian libdpkg-perl
+'''
import os
import sys
@@ -22,6 +24,7 @@ import hashlib
import random
import fnmatch
import socket
+from urllib.error import HTTPError
import amqplib.client_0_8 as amqp
import distro_info
@@ -30,7 +33,6 @@ import systemd.journal
from influxdb import InfluxDBClient
from influxdb.exceptions import InfluxDBClientError
-from urllib.error import HTTPError
ALL_RELEASES = distro_info.UbuntuDistroInfo().get_all(result='object')
@@ -89,7 +91,8 @@ TEMPORARY_TEST_FAIL_STRINGS = ['Could not connect to ftpmaster.internal:80',
'Cannot initiate the connection to ppa.launchpad.net:80',
'Failed to fetch http://ftpmaster.internal/',
'" failed with stderr "error: Get https://0.0.0.0/1.0/operations/',
- 'RecursionError: maximum recursion depth exceeded in comparison', # #1908506
+ 'RecursionError: maximum recursion ' + \
+ 'depth exceeded in comparison', # #1908506
'Temporary failure resolving \'archive.ubuntu.com\'',
'Temporary failure resolving \'ports.ubuntu.com\'',
'Temporary failure resolving \'ftpmaster.internal\'',
@@ -112,8 +115,10 @@ FAIL_PKG_STRINGS = {'systemd*': ['timed out waiting for testbed to reboot',
'Timed out on waiting for ssh connection',
'Temporary failure resolving',
'VirtSubproc.Timeout',
- 'ERROR: testbed failure: testbed auxverb failed with exit code 255',
- 'ERROR: testbed failure: rules extract failed with exit code 100 (apt failure)'],
+ 'ERROR: testbed failure: testbed auxverb ' + \
+ 'failed with exit code 255',
+ 'ERROR: testbed failure: rules extract failed ' + \
+ 'with exit code 100 (apt failure)'],
'linux-*': ['timed out waiting for testbed to reboot',
'Timed out on waiting for ssh connection',
'ERROR: testbed failure: testbed auxverb failed',
@@ -133,7 +138,8 @@ FAIL_PKG_STRINGS = {'systemd*': ['timed out waiting for testbed to reboot',
'Timed out on waiting for ssh connection'],
'kdump-tools': ['This does not look like a tar archive',
'Timed out on waiting for ssh connection'],
- 'llvm-toolchain-*': ['clang: error: unable to execute command: Segmentation fault (core dumped)']}
+ 'llvm-toolchain-*': ['clang: error: unable to execute command: ' + \
+ 'Segmentation fault (core dumped)']}
# Exemptions from TEMPORARY_TEST_FAIL_STRINGS / FAIL_{PKG_,}STRINGS
# Adding dbconfig-common here is a hack of sorts LP: #2001714
@@ -220,10 +226,10 @@ def parse_args():
return parser.parse_args()
-def read_per_package_configs(cfg):
+def read_per_package_configs(pkg_cfg):
def read_per_package_file(filename):
out = set()
- with open(filename, 'r') as f:
+ with open(filename, 'r', encoding='utf-8') as f:
entries = {
line.strip()
for line in f.readlines()
@@ -245,7 +251,7 @@ def read_per_package_configs(cfg):
return out
global big_packages, long_tests, never_run
- dir = cfg.get('autopkgtest', 'per_package_config_dir').strip()
+ dir = pkg_cfg.get('autopkgtest', 'per_package_config_dir').strip()
big_packages = read_per_package_file(os.path.join(dir, "big_packages"))
long_tests = read_per_package_file(os.path.join(dir, "long_tests"))
@@ -269,8 +275,10 @@ def process_output_dir(dir, pkgname, code, triggers):
# the version, so that frontends (e.g. autopkgtest-web, or britney) can
# display the result.
if code in FAIL_CODES and 'testpkg-version' not in files:
- logging.warning('Code %d returned and no testpkg-version - returning "unknown" for %s' % (code, pkgname))
- with open(os.path.join(dir, 'testpkg-version'), 'w') as testpkg_version:
+ logging.warning('Code %d returned and no testpkg-version - returning "unknown" for %s',
+ code,
+ pkgname)
+ with open(os.path.join(dir, 'testpkg-version'), 'w', encoding='utf-8') as testpkg_version:
testpkg_version.write('%s unknown' % pkgname)
files.add('testpkg-version')
# we might need to fake testinfo.json up too, depending on how
@@ -279,23 +287,23 @@ def process_output_dir(dir, pkgname, code, triggers):
if 'testinfo.json' not in files and triggers:
logging.warning('...testinfo.json is missing too, faking one up')
triggers = ' '.join(triggers)
- with open(os.path.join(dir, 'testinfo.json'), 'w') as testinfo:
+ with open(os.path.join(dir, 'testinfo.json'), 'w', encoding='utf-8') as testinfo:
d = {'custom_environment':
['ADT_TEST_TRIGGERS=%s' % triggers]}
json.dump(d, testinfo, indent=True)
files.add('testinfo.json')
- with open(os.path.join(dir, 'testpkg-version'), 'r') as tpv:
+ with open(os.path.join(dir, 'testpkg-version'), 'r', encoding='utf-8') as tpv:
testpkg_version = tpv.read().split()[1]
try:
- with open(os.path.join(dir, 'duration'), 'r') as dur:
+ with open(os.path.join(dir, 'duration'), 'r', encoding='utf-8') as dur:
duration = dur.read()
except FileNotFoundError:
duration = None
try:
- with open(os.path.join(dir, 'requester'), 'r') as req:
+ with open(os.path.join(dir, 'requester'), 'r', encoding='utf-8') as req:
requester = req.read()
except FileNotFoundError:
requester = None
@@ -303,7 +311,9 @@ def process_output_dir(dir, pkgname, code, triggers):
# these are small and we need only these for gating and indexing
resultfiles = ['exitcode']
# these might not be present in infrastructure failure cases
- for f in ['testbed-packages', 'testpkg-version', 'duration', 'testinfo.json', 'requester', 'summary']:
+ for f in ['testbed-packages',
+ 'testpkg-version',
+ 'duration', 'testinfo.json', 'requester', 'summary']:
if f in files:
resultfiles.append(f)
subprocess.check_call(['tar', 'cf', 'result.tar'] + resultfiles, cwd=dir)
@@ -352,7 +362,7 @@ def host_arch(release, architecture):
def subst(s, big_package, release, architecture, hostarch, pkgname):
- subst = {
+ substitute = {
'RELEASE': release,
'ARCHITECTURE': architecture,
'HOSTARCH': hostarch,
@@ -364,14 +374,15 @@ def subst(s, big_package, release, architecture, hostarch, pkgname):
}
for i in args.variable:
k, v = i.split('=', 1)
- subst[k] = v
+ substitute[k] = v
- for k, v in subst.items():
+ for k, v in substitute.items():
s = s.replace('$' + k, v)
return s
-def send_status_info(queue, release, architecture, pkgname, params, out_dir, running, duration, private=False):
+def send_status_info(queue, release, architecture, pkgname, params, out_dir, running, duration,
+ private=False):
'''Send status and logtail to status queue'''
if not queue:
@@ -406,18 +417,18 @@ def send_status_info(queue, release, architecture, pkgname, params, out_dir, run
'logtail': logtail})
queue.basic_publish(amqp.Message(msg, delivery_mode=2), status_exchange_name, '')
-def call_autopkgtest(argv, release, architecture, pkgname, params, out_dir, start_time, private=False):
+def call_autopkgtest(argv, release, architecture, pkgname,
+ params, out_dir, start_time, private=False):
'''Call autopkgtest and regularly send status/logtail to status_exchange_name
Return exit code.
'''
# set up status AMQP exchange
- global amqp_con
status_amqp = amqp_con.channel()
status_amqp.access_request('/data', active=True, read=False, write=True)
status_amqp.exchange_declare(status_exchange_name, 'fanout', durable=False, auto_delete=True)
- null_fd = open('/dev/null', 'w')
+ null_fd = open('/dev/null', 'w', encoding='utf-8')
autopkgtest = subprocess.Popen(argv, stdout=null_fd, stderr=subprocess.STDOUT)
# FIXME: Use autopkgtest.wait(timeout=10) once moving to Python 3
# only send status update every 10s, but check if program has finished every 1s
@@ -535,14 +546,21 @@ def request(msg):
os.makedirs(out_dir)
# now let's fake up a log file
+<<<<<<< charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/worker/worker
with open(os.path.join(out_dir, 'log'), 'w') as log:
log.write('This package is marked to never run. To get the entry removed, contact a member of the Ubuntu Release or Canonical Ubuntu QA team.')
+=======
+ with open(os.path.join(out_dir, 'log'), 'w', encoding='utf-8') as log:
+ log.write('This package is marked to never run. To get the entry ' + \
+ 'removed, contact a member of the release team.')
+>>>>>>> charms/focal/autopkgtest-cloud-worker/autopkgtest-cloud/worker/worker
triggers = None
# a json file containing the env
if 'triggers' in params:
triggers = ' '.join(params['triggers'])
- with open(os.path.join(out_dir, 'testinfo.json'), 'w') as testinfo:
+ with open(os.path.join(out_dir, 'testinfo.json'), 'w',
+ encoding='utf-8') as testinfo:
d = {'custom_environment':
['ADT_TEST_TRIGGERS=%s' % triggers]}
json.dump(d, testinfo, indent=True)
@@ -550,7 +568,8 @@ def request(msg):
# and the testpackage version (pkgname blacklisted)
# XXX: replace "blacklisted" here, but needs changes in
# proposed-migration and hints
- with open(os.path.join(out_dir, 'testpkg-version'), 'w') as testpkg_version:
+ with open(os.path.join(out_dir, 'testpkg-version'), 'w',
+ encoding='utf-8') as testpkg_version:
testpkg_version.write('%s blacklisted' % pkgname)
container = 'autopkgtest-' + release
@@ -584,7 +603,9 @@ def request(msg):
if 'triggers' in params and 'qemu-efi-noacpi/0' in params['triggers']:
if architecture == 'arm64':
- argv += ['--setup-commands', '/home/ubuntu/autopkgtest-cloud/worker-config-production/qemu-efi-noacpi.sh']
+ argv += ['--setup-commands',
+ '/home/ubuntu/autopkgtest-cloud/worker' + \
+ '-config-production/qemu-efi-noacpi.sh']
else:
# these will be written later on
code = 99
@@ -592,12 +613,13 @@ def request(msg):
os.makedirs(out_dir)
# fake a log file
- with open(os.path.join(out_dir, 'log'), 'w') as log:
+ with open(os.path.join(out_dir, 'log'), 'w', encoding='utf-8') as log:
log.write('Not running due to invalid trigger: qemu-efi-noacpi/0 is arm64 only')
dont_run = True
# and the testpackage version (invalid trigger with a reason)
- with open(os.path.join(out_dir, 'testpkg-version'), 'w') as testpkg_version:
+ with open(os.path.join(out_dir, 'testpkg-version'),
+ 'w', encoding='utf-8') as testpkg_version:
testpkg_version.write('invalid trigger: qemu-efi-noacpi/0 is arm64 only')
if 'ppas' in params and params['ppas']:
@@ -605,30 +627,39 @@ def request(msg):
try:
(ppacreds, _, ppaurl) = ppa.rpartition('@')
(ppaurl, _, fingerprint) = ppaurl.partition(':')
- (ppacreds_user, ppacreds_pass) = ppacreds.split(':') if ppacreds else (None, None)
+ (ppacreds_user, ppacreds_pass) = ppacreds.split(':') if ppacreds else (None,
+ None)
(ppauser, ppaname) = ppaurl.split('/')
except ValueError:
- logging.error('Invalid PPA specification, must be [user:token@]lpuser/ppa_name[:fingerprint]')
+ logging.error('Invalid PPA specification, must be ' + \
+ '[user:token@]lpuser/ppa_name[:fingerprint]')
msg.channel.basic_ack(msg.delivery_tag)
return
if fingerprint:
- logging.debug('Request states that PPA user %s, name %s has GPG fingerprint %s' % (ppauser, ppaname, fingerprint))
+ logging.debug('Request states that PPA user %s, name %s has GPG fingerprint %s',
+ ppauser, ppaname, fingerprint)
else:
# Private PPAs require the fingerprint passed through the
# request as we can't use the LP API to fetch it.
if ppacreds_user:
- logging.error('Invalid PPA specification, GPG fingerprint required for private PPAs')
+ logging.error('Invalid PPA specification, GPG fingerprint ' + \
+ 'required for private PPAs')
msg.channel.basic_ack(msg.delivery_tag)
return
for retry in range(5):
try:
- f = urllib.request.urlopen('https://api.launchpad.net/1.0/~%s/+archive/ubuntu/%s' % (ppauser, ppaname))
+ f = urllib.request.urlopen('https://api.launchpad.net/' + \
+ '1.0/~%s/+archive/ubuntu/%s' % \
+ (ppauser, ppaname))
contents = f.read().decode('UTF-8')
f.close()
fingerprint = json.loads(contents)['signing_key_fingerprint']
- logging.debug('PPA user %s, name %s has GPG fingerprint %s' % (ppauser, ppaname, fingerprint))
+ logging.debug('PPA user %s, name %s has GPG fingerprint %s' % \
+ (ppauser, ppaname, fingerprint))
except (IOError, ValueError, KeyError) as e:
- logging.error('Cannot get PPA information: "%s". Consuming the request - it will be left dangling; retry once the problem is resolved.' % e)
+ logging.error('Cannot get PPA information: "%s". Consuming the ' + \
+ 'request - it will be left dangling; retry once ' + \
+ 'the problem is resolved.' % e)
msg.channel.basic_ack(msg.delivery_tag)
return
except HTTPError as e:
@@ -641,7 +672,9 @@ def request(msg):
else:
break
else:
- logging.error('Cannot contact Launchpad to get PPA information. Consuming the request - it will be left dangling; retry once the problem is resolved.')
+ logging.error('Cannot contact Launchpad to get PPA information. ' + \
+ 'Consuming the request - it will be left dangling; ' + \
+ 'retry once the problem is resolved.')
msg.channel.basic_ack(msg.delivery_tag)
return
if ppacreds_user:
@@ -651,7 +684,8 @@ def request(msg):
else:
ppaprefix = 'http://'
# add GPG key
- argv += ['--setup-commands', 'apt-key adv --keyserver keyserver.ubuntu.com --recv-key ' + fingerprint]
+ argv += ['--setup-commands',
+ 'apt-key adv --keyserver keyserver.ubuntu.com --recv-key ' + fingerprint]
# add apt source
argv += ['--setup-commands', 'REL=$(sed -rn "/^(deb|deb-src) .*(ubuntu.com|ftpmaster)/ { s/^[^ ]+ +(\[.*\] *)?[^ ]* +([^ -]+) +.*$/\\2/p; q }" /etc/apt/sources.list); '
'echo "deb %(prefix)sppa.launchpad.net/%(u)s/%(p)s/ubuntu $REL main" > /etc/apt/sources.list.d/autopkgtest-%(u)s-%(p)s.list; '
@@ -754,14 +788,14 @@ def request(msg):
argv += ['--setup-commands', 'apt-get install -y linux-image-omap linux-headers-omap']
else:
argv += ['--setup-commands',
- ('apt-get install -y ^kernel-testing--%(t)s--full--preferred$ || ' +
- 'apt-get install -y ^linux-image%(f)s$ ^linux-headers%(f)s$ || ' +
- 'apt-get install -y ^linux-image-generic%(f)s$ ^linux-headers-generic%(f)s$') %
- {'f': flavor, 't': totest}]
+ ('apt-get install -y ^kernel-testing--%(t)s--full--preferred$ || ' +
+ 'apt-get install -y ^linux-image%(f)s$ ^linux-headers%(f)s$ || ' +
+ 'apt-get install -y ^linux-image-generic%(f)s$ ^linux-headers-generic%(f)s$') %
+ {'f': flavor, 't': totest}]
argv += ['--setup-commands',
- ('apt-get install -y ^kernel-testing--%(t)s--modules-extra--preferred$ || ' +
- 'apt-get install -y ^linux-modules-extra%(f)s$ || :') %
- {'f': flavor, 't': totest}]
+ ('apt-get install -y ^kernel-testing--%(t)s--modules-extra--preferred$ || ' +
+ 'apt-get install -y ^linux-modules-extra%(f)s$ || :') %
+ {'f': flavor, 't': totest}]
break
if 'testname' in params:
@@ -820,7 +854,7 @@ def request(msg):
if s in contents]
if temp_fails:
logging.warning('Saw %s in log, which is a sign of a temporary failure.',
- ' and '.join(temp_fails))
+ ' and '.join(temp_fails))
logging.warning('%sLog follows:', retrying)
logging.error(contents)
if retry < 2:
@@ -852,7 +886,7 @@ def request(msg):
if fails:
num_failures += 1
logging.warning('Saw %s in log, which is a sign of a real (not tmp) failure - seen %d so far',
- ' and '.join(fails), num_failures)
+ ' and '.join(fails), num_failures)
logging.warning('Testbed failure. %sLog follows:', retrying)
logging.error(contents)
if retry < 2:
@@ -878,17 +912,17 @@ def request(msg):
if code == 1:
logging.error('autopkgtest exited with unexpected error code 1')
sys.exit(1)
- with open(os.path.join(out_dir, 'exitcode'), 'w') as f:
+ with open(os.path.join(out_dir, 'exitcode'), 'w', encoding='utf-8') as f:
f.write('%i\n' % code)
- with open(os.path.join(out_dir, 'duration'), 'w') as f:
+ with open(os.path.join(out_dir, 'duration'), 'w', encoding='utf-8') as f:
f.write('%u\n' % duration)
if 'requester' in params:
- with open(os.path.join(out_dir, 'requester'), 'w') as f:
+ with open(os.path.join(out_dir, 'requester'), 'w', encoding='utf-8') as f:
f.write('%s\n' % params['requester'])
if 'readable-by' in params:
- with open(os.path.join(out_dir, 'readable-by'), 'w') as f:
+ with open(os.path.join(out_dir, 'readable-by'), 'w', encoding='utf-8') as f:
if isinstance(params['readable-by'], list):
f.write('\n'.join(params['readable-by']))
else:
@@ -978,20 +1012,19 @@ def request(msg):
finally:
shutil.rmtree(work_dir)
- global amqp_con
complete_amqp = amqp_con.channel()
complete_amqp.access_request('/complete', active=True, read=False, write=True)
complete_amqp.exchange_declare(complete_exchange_name, 'fanout', durable=True, auto_delete=False)
- complete_msg = json.dumps ({'architecture': architecture,
- 'container': container,
- 'duration': duration,
- 'exitcode': code,
- 'package': pkgname,
- 'testpkg_version': testpkg_version,
- 'release': release,
- 'requester': requester,
- 'swift_dir': swift_dir,
- 'triggers': triggers})
+ complete_msg = json.dumps({'architecture': architecture,
+ 'container': container,
+ 'duration': duration,
+ 'exitcode': code,
+ 'package': pkgname,
+ 'testpkg_version': testpkg_version,
+ 'release': release,
+ 'requester': requester,
+ 'swift_dir': swift_dir,
+ 'triggers': triggers})
complete_amqp.basic_publish(amqp.Message(complete_msg, delivery_mode=2),
complete_exchange_name, '')
diff --git a/charms/focal/autopkgtest-cloud-worker/lib/systemd.py b/charms/focal/autopkgtest-cloud-worker/lib/systemd.py
index b83828f..bbb0ca3 100644
--- a/charms/focal/autopkgtest-cloud-worker/lib/systemd.py
+++ b/charms/focal/autopkgtest-cloud-worker/lib/systemd.py
@@ -1,4 +1,5 @@
-#pylint: disable=missing-function-docstring
+"""Systemd handler for autopkgtest-cloud workers"""
+#pylint: disable=missing-function-docstring, fixme, import-error, consider-using-f-string, invalid-name, too-many-locals, too-many-branches, bad-option-value
import os
import shutil
from textwrap import dedent
@@ -36,7 +37,7 @@ def reload():
) # cancellable
-def enabledisable(unit_names, enabledisable, enabledisablearg, startstop):
+def enabledisable(unit_names, enabledisableflag, enabledisablearg, startstop):
print(
"calling {enabledisable} then {startstop} on {unit_names}".format(
**locals()
@@ -46,7 +47,7 @@ def enabledisable(unit_names, enabledisable, enabledisablearg, startstop):
"org.freedesktop.systemd1",
"/org/freedesktop/systemd1",
"org.freedesktop.systemd1.Manager",
- enabledisable,
+ enabledisableflag,
enabledisablearg,
GLib.VariantType(
"*"
@@ -131,7 +132,7 @@ def get_units():
lxd_object_paths = defaultdict(lambda: defaultdict(dict))
for unit in units:
- (name, _, _, active, _, _, object_path, _, _, _) = unit
+ (name, _, _, _, _, _, object_path, _, _, _) = unit
if name.startswith("build-adt-image@") and name.endswith(".timer"):
name_release_region_arch = name[16:][:-6]
(release, region, arch) = name_release_region_arch.split("-", -1)
@@ -171,8 +172,7 @@ def update_cloud_dropins(region, arch, n, releases):
def get_arches(release):
if arch == "amd64" and UbuntuRelease(release) < UbuntuRelease("focal"):
return ["amd64", "i386"]
- else:
- return [arch]
+ return [arch]
ensure_adt_units = " ".join(
[
@@ -192,7 +192,7 @@ def update_cloud_dropins(region, arch, n, releases):
shutil.rmtree(dropindir)
os.makedirs(dropindir)
- with open(os.path.join(dropindir, "ensure-adt-image.conf"), "w") as f:
+ with open(os.path.join(dropindir, "ensure-adt-image.conf"), "w", encoding='utf-8') as f:
f.write(
dedent(
"""\
@@ -222,7 +222,7 @@ def update_lxd_dropins(arch, ip, n):
pass
with open(
- os.path.join(dropindir, "autopkgtest-lxd-remote.conf"), "w"
+ os.path.join(dropindir, "autopkgtest-lxd-remote.conf"), "w", encoding='utf-8'
) as f:
remote_unit = "autopkgtest-lxd-remote@lxd-{}-{}.service".format(
arch, ip
@@ -334,7 +334,8 @@ def set_up_systemd_units(target_cloud_config, target_lxd_config, releases):
if releases_to_disable:
print(
- "Disabling build-adt-image timers for {region}/{arch}/{releases_to_disable}".format(
+ "Disabling build-adt-image timers for " + \
+ "{region}/{arch}/{releases_to_disable}".format(
**locals()
)
)
@@ -354,9 +355,7 @@ def set_up_systemd_units(target_cloud_config, target_lxd_config, releases):
target_n_units = target_lxd_config.get(arch, {}).get(ip, 0)
if target_n_units > 0:
update_lxd_dropins(arch, ip, target_n_units)
- if n_units == target_n_units:
- continue
- elif n_units < target_n_units:
+ if n_units < target_n_units:
# need to enable some units
delta = target_n_units - n_units
unit_names = [
@@ -364,7 +363,7 @@ def set_up_systemd_units(target_cloud_config, target_lxd_config, releases):
for n in range(n_units + 1, n_units + delta + 1)
]
enable(unit_names)
- else:
+ elif n_units > target_n_units:
# need to disable some units
delta = n_units - target_n_units
unit_names = [
diff --git a/charms/focal/autopkgtest-cloud-worker/lib/utils.py b/charms/focal/autopkgtest-cloud-worker/lib/utils.py
index c7fce49..eebe6a1 100644
--- a/charms/focal/autopkgtest-cloud-worker/lib/utils.py
+++ b/charms/focal/autopkgtest-cloud-worker/lib/utils.py
@@ -1,4 +1,4 @@
-#pylint: disable=missing-module-docstring, missing-class-docstring, missing-function-docstring
+#pylint: disable=missing-module-docstring, missing-class-docstring, missing-function-docstring, import-error, consider-using-f-string, bad-option-value
import os
import pwd
import subprocess
diff --git a/charms/focal/autopkgtest-cloud-worker/reactive/autopkgtest_cloud_worker.py b/charms/focal/autopkgtest-cloud-worker/reactive/autopkgtest_cloud_worker.py
index 3dc4625..4921fd7 100644
--- a/charms/focal/autopkgtest-cloud-worker/reactive/autopkgtest_cloud_worker.py
+++ b/charms/focal/autopkgtest-cloud-worker/reactive/autopkgtest_cloud_worker.py
@@ -1,4 +1,4 @@
-#pylint: disable=missing-module-docstring,missing-function-docstring
+#pylint: disable=missing-module-docstring,missing-function-docstring, consider-using-dict-items, import-error, wrong-import-order, invalid-name, fixme, consider-using-f-string, possibly-unused-variable, import-outside-toplevel, consider-using-with, redefined-outer-name, bad-option-value
from charms.layer import status
from charms.reactive import (
when,
@@ -11,7 +11,6 @@ from charms.reactive import (
hook,
not_unless,
)
-from charms.reactive.relations import endpoint_from_flag
from charmhelpers.core.hookenv import (
charm_dir,
config,
@@ -20,9 +19,7 @@ from charmhelpers.core.hookenv import (
storage_list,
)
from utils import install_autodep8, UnixUser
-
from textwrap import dedent
-
import glob
import os
import pygit2
@@ -126,21 +123,20 @@ def clone_autopkgtest():
"autopkgtest.influx-creds-written",
)
def set_up_systemd_units():
+ def link_and_enable(unit, dest, base):
+ os.symlink(unit, dest)
+ if "@" not in base:
+ subprocess.check_call(["systemctl", "enable", base])
for unit in glob.glob(os.path.join(charm_dir(), "units", "*")):
base = os.path.basename(unit)
dest = os.path.join(os.path.sep, "etc", "systemd", "system", base)
- def link_and_enable():
- os.symlink(unit, dest)
- if "@" not in base:
- subprocess.check_call(["systemctl", "enable", base])
-
try:
- link_and_enable()
+ link_and_enable(unit, dest, base)
except FileExistsError:
if not os.path.islink(dest):
os.unlink(dest)
- link_and_enable()
+ link_and_enable(unit, dest, base)
set_flag("autopkgtest.systemd_units_linked_and_enabled")
@@ -212,7 +208,7 @@ def set_up_rabbitmq(rabbitmq):
host = rabbitmq.private_address()
status.maintenance("Configuring rabbitmq")
log("Setting up rabbitmq connection to: {}@{}".format(username, host))
- with open(RABBITMQ_CRED_PATH, "w") as cred_file:
+ with open(RABBITMQ_CRED_PATH, "w", encoding='utf-8') as cred_file:
cred_file.write(
dedent(
"""\
@@ -398,7 +394,7 @@ def write_v2_config():
def write_swift_config():
with open(
- os.path.expanduser("~ubuntu/swift-password.cred"), "w"
+ os.path.expanduser("~ubuntu/swift-password.cred"), "w", encoding='utf-8'
) as swift_password_file:
for key in config():
if key.startswith("swift") and config()[key] is not None:
@@ -471,17 +467,18 @@ def write_worker_config():
}
def write(conf_file):
- with open(conf_file, "w") as cf:
+ with open(conf_file, "w", encoding='utf-8') as cf:
cp = configparser.ConfigParser()
cp.read_dict(conf)
cp.write(cf)
# FIXME: Hotfix for bos01
if "bos01" in conf_file:
- with open(conf_file, "r") as cf:
+ with open(conf_file, "r", encoding='utf-8') as cf:
conf_data = cf.read()
- with open(conf_file, "w") as cf:
- cf.write(conf_data.replace(config().get("mirror"), "http://us.ports.ubuntu.com/ubuntu-ports/"))
+ with open(conf_file, "w", encoding='utf-8') as cf:
+ cf.write(conf_data.replace(config().get("mirror"),
+ "http://us.ports.ubuntu.com/ubuntu-ports/"))
for region in nworkers_yaml:
@@ -494,13 +491,12 @@ def write_worker_config():
conf["autopkgtest"]["architectures"] = "amd64 i386"
write(conf_file)
break
- else:
- conf_file = os.path.join(
- os.path.expanduser("~ubuntu"),
- "worker-{}-{}.conf".format(region, arch),
- )
- conf["autopkgtest"]["architectures"] = arch
- write(conf_file)
+ conf_file = os.path.join(
+ os.path.expanduser("~ubuntu"),
+ "worker-{}-{}.conf".format(region, arch),
+ )
+ conf["autopkgtest"]["architectures"] = arch
+ write(conf_file)
for arch in lxdremotes_yaml:
conf_file = os.path.join(
@@ -516,7 +512,7 @@ def write_worker_config():
@when("config.changed.net-name")
def write_net_name():
clear_flag("autopkgtest.net-name-written")
- with open(os.path.expanduser("~ubuntu/net-name.rc"), "w") as f:
+ with open(os.path.expanduser("~ubuntu/net-name.rc"), "w", encoding='utf-8') as f:
f.write('NET_NAME="{}"\n'.format(config().get("net-name")))
set_flag("autopkgtest.net-name-written")
set_flag("autopkgtest.reload-needed")
@@ -524,7 +520,7 @@ def write_net_name():
@when("config.changed.mirror")
def write_mirror():
- with open(os.path.expanduser("~ubuntu/mirror.rc"), "w") as f:
+ with open(os.path.expanduser("~ubuntu/mirror.rc"), "w", encoding='utf-8') as f:
f.write('MIRROR="{}"\n'.format(config().get("mirror")))
set_flag("autopkgtest.reload-needed")
@@ -575,7 +571,7 @@ def write_influx_creds():
influxdb_database = config().get("influxdb-database")
influxdb_context = config().get("influxdb-context")
- with open(os.path.expanduser("~ubuntu/influx.cred"), "w") as cf:
+ with open(os.path.expanduser("~ubuntu/influx.cred"), "w", encoding='utf-8') as cf:
cf.write(
dedent(
f"""\
diff --git a/charms/focal/autopkgtest-cloud-worker/tests/10-deploy b/charms/focal/autopkgtest-cloud-worker/tests/10-deploy
index 2cd32f6..3400ff3 100755
--- a/charms/focal/autopkgtest-cloud-worker/tests/10-deploy
+++ b/charms/focal/autopkgtest-cloud-worker/tests/10-deploy
@@ -1,25 +1,38 @@
#!/usr/bin/python3
+# pylint: disable=import-error, invalid-name
+'''
+Unit test for deploying juju charm
+'''
-import amulet
-import requests
import unittest
+import requests
+import amulet
class TestCharm(unittest.TestCase):
+ '''
+ Tests juju charm
+ '''
def setUp(self):
- self.d = amulet.Deployment()
+ '''
+ Sets up service for juju charm
+ '''
+ self.deployment = amulet.Deployment()
- self.d.add('autopkgtest-cloud-worker')
- self.d.expose('autopkgtest-cloud-worker')
+ self.deployment.add('autopkgtest-cloud-worker')
+ self.deployment.expose('autopkgtest-cloud-worker')
- self.d.setup(timeout=900)
- self.d.sentry.wait()
+ self.deployment.setup(timeout=900)
+ self.deployment.sentry.wait()
- self.unit = self.d.sentry['autopkgtest-cloud-worker'][0]
+ self.unit = self.deployment.sentry['autopkgtest-cloud-worker'][0]
def test_service(self):
+ '''
+ Tests connectivity to juju charm via http
+ '''
# test we can access over http
- page = requests.get('http://{}'.format(self.unit.info['public-address']))
+ page = requests.get(f'http://{self.unit.info["public-address"]}')
self.assertEqual(page.status_code, 200)
# Now you can use self.d.sentry[SERVICE][UNIT] to address each of the units and perform
# more in-depth steps. Each self.d.sentry[SERVICE][UNIT] has the following methods:
@@ -30,6 +43,6 @@ class TestCharm(unittest.TestCase):
# - .directory_contents(PATH) - List files and folders in PATH on that unit
# - .relation(relation, service:rel) - Get relation data from return service
-
+
if __name__ == '__main__':
unittest.main()
diff --git a/ci/lint_test b/ci/lint_test
index e52edc4..06a4133 100755
--- a/ci/lint_test
+++ b/ci/lint_test
@@ -1,5 +1,5 @@
#!/usr/bin/python3
-# pylint: disable = invalid-name, broad-except, subprocess-run-check
+# pylint: disable = broad-except, unnecessary-dict-index-lookup, bad-option-value
'''
Script to lint the scripts in the autopkgtest-cloud repository in CI
'''
@@ -8,33 +8,33 @@ import os
import sys
import logging
import subprocess
+import argparse
-def check_for_extension(input_list, output_list, extension):
+def check_for_extension(input_list, output_list, file_extension):
'''
- Checks filepaths in a list for a given extension
+ Checks filepaths in a list for a given file_extension
'''
- for a in input_list:
- if os.path.isfile(a):
- # if str(a)[-3:] == extension:
- if extension in str(a)[-6:]:
- output_list.append(str(a))
+ for filepath in input_list:
+ if os.path.isfile(filepath):
+ if file_extension in str(filepath)[-6:]:
+ output_list.append(str(filepath))
return output_list
-def check_for_shebang(input_list, output_list, shebang):
+def check_for_shebang(input_list, output_list, shebang_for_check):
'''
- Checks filepaths in a given list for a given shebang
+ Checks filepaths in a given list for a given shebang_for_check
'''
- for b in input_list:
- if os.path.isfile(b):
+ for filepath in input_list:
+ if os.path.isfile(filepath):
try:
- with open(b, 'r', encoding='utf-8') as myfile:
+ with open(filepath, 'r', encoding='utf-8') as myfile:
file = myfile.read()
into_list = file.splitlines()
if len(into_list) > 1:
- if into_list[0] == shebang:
- output_list.append(str(b))
+ if into_list[0] == shebang_for_check:
+ output_list.append(str(filepath))
except Exception as _:
pass
return output_list
@@ -44,42 +44,56 @@ def remove_list_from_list(input_list, remove_list):
'''
Removes elements from remove_list from input_list
'''
- for ff in input_list:
- if os.path.isfile(ff):
- if str(ff) in remove_list:
- input_list.remove(ff)
+ for list_elem in input_list:
+ if os.path.isfile(list_elem):
+ if str(list_elem) in remove_list:
+ input_list.remove(list_elem)
return input_list
def run_lint_command(files_to_lint, lint_command, arguments=None):
'''
- Runs a given lint command over a list of filepaths and stores output
+ Runs given lint commands over a list of filepaths and stores output
and exit code
'''
- exit_codes = 0
- lint_output = ""
- # check lint command exists
- for f in files_to_lint:
- if arguments is None:
- cmd = [lint_command, f]
- result = subprocess.run(cmd, stdout=subprocess.PIPE)
- else:
- cmd = [lint_command]
- for arg in arguments.split(" "):
- cmd.append(arg)
- cmd.append(f)
- result = subprocess.run(cmd, stdout=subprocess.PIPE)
- lint_output += result.stdout.decode("utf-8") + "\n"
- exit_codes += result.returncode
- return lint_output, exit_codes
+ exit_codes = []
+ lint_output = []
+ lint_success = True
+ check_for_cmd = subprocess.run(["which", lint_command], stdout=subprocess.PIPE, check=False)
+ if check_for_cmd.returncode != 0:
+ logger.error("%s not present on system - please amend before using this script.",
+ lint_command)
+ sys.exit(1)
+ for file in files_to_lint:
+ if ".git" not in file:
+ if arguments is None:
+ cmd = [lint_command, file]
+ result = subprocess.run(cmd, stdout=subprocess.PIPE, check=False)
+ else:
+ cmd = [lint_command]
+ for arg in arguments.split(" "):
+ cmd.append(arg)
+ cmd.append(file)
+ result = subprocess.run(cmd, stdout=subprocess.PIPE, check=False)
+ lint_output.append(result.stdout.decode("utf-8") + "\n")
+ exit_codes.append(result.returncode)
+ if result.returncode != 0:
+ lint_success = False
+ return lint_output, exit_codes, lint_success
-if __name__=="__main__":
+if __name__ == "__main__":
+ # pylint: disable=invalid-name
+ parser = argparse.ArgumentParser(description="Args for lint test")
+ parser.add_argument('-v',
+ '--verbose',
+ help="Verbose output from lint test (y/n)",
+ action='store_true')
+ args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('autopkgtest-cloud-linter')
- start_dir = "../"
- repo_dir = pathlib.Path(start_dir)
+ repo_dir = pathlib.Path("../")
repo_dir.rglob("*")
final_list_of_python_files = []
@@ -90,25 +104,28 @@ if __name__=="__main__":
"files": [],
"extensions": [".py"],
"shebangs": ["#!/usr/bin/python3"],
- "args": None,
- "output": "",
- "code": 0
+ "args": "--disable=E0012",
+ "output": [],
+ "code": [],
+ "success": False
},
"shellcheck": {
"files": [],
"extensions": [".sh", ".bash"],
"shebangs": ["#!/bin/bash", "#!/bin/sh"],
"args": None,
- "output": "",
- "code": 0
+ "output": [],
+ "code": [],
+ "success": False
},
'yamllint': {
"files": ["../"],
"extensions": None,
"shebangs": None,
"args": "--no-warnings",
- "output": "",
- "code": 0
+ "output": [],
+ "code": [],
+ "success": False
}
}
@@ -122,19 +139,23 @@ if __name__=="__main__":
data[key]["files"] = check_for_shebang(all_files, data[key]["files"], shebang)
all_files = remove_list_from_list(all_files, data[key]["files"])
data[key]["output"], \
- data[key]["code"] = run_lint_command(data[key]["files"], key, data[key]["args"])
- ecodesum = 0
- for _, oec in data.items():
- ecodesum += oec["code"]
- if ecodesum > 0:
+ data[key]["code"], \
+ data[key]["success"] = run_lint_command(data[key]["files"], key, data[key]["args"])
+
+ exit_code_sum = 0
+ for _, oec_s in data.items():
+ for e_c in oec_s["code"]:
+ exit_code_sum += e_c
+ if exit_code_sum > 0:
for key, item in data.items():
- if item["code"] > 0:
- # logger.info("%s output: \n%s", key, item["output"])
+ if not item["success"]:
logger.info("%s failed!", key)
- # sys.exit(1)
- # temporary exit code, will be set back to 1 when python and bash scripts have been linted
- # right now we are just checking yaml files
- if key == "yamllint" and item["code"] != 0:
- sys.exit(1)
- sys.exit(0)
- sys.exit(0)
\ No newline at end of file
+ if args.verbose:
+ for i in range(len(item["code"])):
+ if item["code"][i] != 0:
+ logger.info("%s", item["output"][i])
+ else:
+ logger.info("%s passed!", key)
+ sys.exit(1)
+ logger.info("All the following linting tests passed: %s\n", list(data.keys()))
+ sys.exit(0)
diff --git a/docs/conf.py b/docs/conf.py
index a46ffdc..b4b923c 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -1,7 +1,7 @@
'''
Configuration file for the Sphinx documentation builder.
'''
-#pylint: disable=redefined-builtin
+#pylint: disable=redefined-builtin, invalid-name
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
diff --git a/lxc-slave-admin/cmd b/lxc-slave-admin/cmd
index ff991ff..0700964 100755
--- a/lxc-slave-admin/cmd
+++ b/lxc-slave-admin/cmd
@@ -1,6 +1,6 @@
#!/bin/sh
set -e
-MYDIR=`dirname $0`
+MYDIR=$(dirname "${0}")
if [ -z "$1" ]; then
echo "Usage: $0 <hosts> <commands or .commands file>" >&2
@@ -8,11 +8,11 @@ if [ -z "$1" ]; then
fi
if [ "$1" = "all" ]; then
- for f in $MYDIR/*.hosts; do
+ for f in "${MYDIR}"/*.hosts; do
hosts="$hosts -h $f";
done
else
- if [ -e ${1} ]; then
+ if [ -e "${1}" ]; then
hosts="-h ${1}"
elif [ -e "${1}.hosts" ]; then
hosts="-h ${1}.hosts"
@@ -29,8 +29,8 @@ if [ "${1%.commands}" != "$1" ]; then
exit 1
fi
# command file
- cat "$1" | parallel-ssh -x "-F $MYDIR/ssh_config" -OUserKnownHostsFile=/dev/null -OStrictHostKeyChecking=no -OIdentitiesOnly=yes $hosts -p8 -t 0 -i -I
+ parallel-ssh -x "-F ${MYDIR}/ssh_config" -OUserKnownHostsFile=/dev/null -OStrictHostKeyChecking=no -OIdentitiesOnly=yes "${hosts}" -p8 -t 0 -i -I < "${1}"
else
# command
- parallel-ssh -x "-F $MYDIR/ssh_config" -OUserKnownHostsFile=/dev/null -OStrictHostKeyChecking=no -OIdentitiesOnly=yes $hosts -p8 -t 0 -i -- "$@"
+ parallel-ssh -x "-F ${MYDIR}/ssh_config" -OUserKnownHostsFile=/dev/null -OStrictHostKeyChecking=no -OIdentitiesOnly=yes "${hosts}" -p8 -t 0 -i -- "$@"
fi
diff --git a/mojo/make-lxd-secgroup b/mojo/make-lxd-secgroup
index 634e598..1c1d961 100755
--- a/mojo/make-lxd-secgroup
+++ b/mojo/make-lxd-secgroup
@@ -1,5 +1,5 @@
#!/bin/sh
-
+# shellcheck disable=SC1090
set -eu
# there's apparently no way to get this dynamically
@@ -24,4 +24,4 @@ done
if [ -n "${ROUTER_IP:-}" ]; then
nova secgroup-add-rule lxd tcp 8443 8443 "${ROUTER_IP}/32" 2>/dev/null || true # perhaps it already existed
-fi
+fi
\ No newline at end of file
diff --git a/mojo/postdeploy b/mojo/postdeploy
index 0f857ae..4b0f88f 100755
--- a/mojo/postdeploy
+++ b/mojo/postdeploy
@@ -11,5 +11,6 @@ if [ "${MOJO_STAGE_NAME}" == "staging" ]; then
fi
echo "Setting up the floating IP address of the front end..."
-$(dirname $0)/add-floating-ip haproxy
-$(dirname $0)/add-floating-ip rabbitmq-server
+directory=$(dirname "{0}")
+"${directory}"/add-floating-ip haproxy
+"${directory}"/add-floating-ip rabbitmq-server