nagios-charmers team mailing list archive
-
nagios-charmers team
-
Mailing list archive
-
Message #01069
[Merge] ~addyess/charm-nagios:blacken into charm-nagios:master
Adam Dyess has proposed merging ~addyess/charm-nagios:blacken into charm-nagios:master.
Requested reviews:
Nagios Charm developers (nagios-charmers)
For more details, see:
https://code.launchpad.net/~addyess/charm-nagios/+git/charm-nagios/+merge/387617
--
Your team Nagios Charm developers is requested to review the proposed merge of ~addyess/charm-nagios:blacken into charm-nagios:master.
diff --git a/Makefile b/Makefile
index 49a4a5c..e5b661e 100644
--- a/Makefile
+++ b/Makefile
@@ -12,7 +12,7 @@ endif
default:
echo Nothing to do
-test: lint proof unittest functional
+test: lint proof unittests functional
@echo "Testing charm $(CHARM_NAME)"
lint:
@@ -34,7 +34,7 @@ functional: build
PYTEST_CLOUD_REGION=$(PYTEST_CLOUD_REGION) \
tox -e functional
-unittest:
+unittests:
@echo "Running unit tests"
@tox -e unit
diff --git a/bin/charm_helpers_sync.py b/bin/charm_helpers_sync.py
index 7c0c194..a689b84 100644
--- a/bin/charm_helpers_sync.py
+++ b/bin/charm_helpers_sync.py
@@ -29,36 +29,36 @@ from fnmatch import fnmatch
import six
-CHARM_HELPERS_REPO = 'https://github.com/juju/charm-helpers'
+CHARM_HELPERS_REPO = "https://github.com/juju/charm-helpers"
def parse_config(conf_file):
if not os.path.isfile(conf_file):
- logging.error('Invalid config file: %s.' % conf_file)
+ logging.error("Invalid config file: %s." % conf_file)
return False
return yaml.load(open(conf_file).read())
def clone_helpers(work_dir, repo):
- dest = os.path.join(work_dir, 'charm-helpers')
- logging.info('Cloning out %s to %s.' % (repo, dest))
+ dest = os.path.join(work_dir, "charm-helpers")
+ logging.info("Cloning out %s to %s." % (repo, dest))
branch = None
- if '@' in repo:
- repo, branch = repo.split('@', 1)
- cmd = ['git', 'clone', '--depth=1']
+ if "@" in repo:
+ repo, branch = repo.split("@", 1)
+ cmd = ["git", "clone", "--depth=1"]
if branch is not None:
- cmd += ['--branch', branch]
+ cmd += ["--branch", branch]
cmd += [repo, dest]
subprocess.check_call(cmd)
return dest
def _module_path(module):
- return os.path.join(*module.split('.'))
+ return os.path.join(*module.split("."))
def _src_path(src, module):
- return os.path.join(src, 'charmhelpers', _module_path(module))
+ return os.path.join(src, "charmhelpers", _module_path(module))
def _dest_path(dest, module):
@@ -66,73 +66,70 @@ def _dest_path(dest, module):
def _is_pyfile(path):
- return os.path.isfile(path + '.py')
+ return os.path.isfile(path + ".py")
def ensure_init(path):
- '''
+ """
ensure directories leading up to path are importable, omitting
parent directory, eg path='/hooks/helpers/foo'/:
hooks/
hooks/helpers/__init__.py
hooks/helpers/foo/__init__.py
- '''
- for d, dirs, files in os.walk(os.path.join(*path.split('/')[:2])):
- _i = os.path.join(d, '__init__.py')
+ """
+ for d, dirs, files in os.walk(os.path.join(*path.split("/")[:2])):
+ _i = os.path.join(d, "__init__.py")
if not os.path.exists(_i):
- logging.info('Adding missing __init__.py: %s' % _i)
- open(_i, 'wb').close()
+ logging.info("Adding missing __init__.py: %s" % _i)
+ open(_i, "wb").close()
def sync_pyfile(src, dest):
- src = src + '.py'
+ src = src + ".py"
src_dir = os.path.dirname(src)
- logging.info('Syncing pyfile: %s -> %s.' % (src, dest))
+ logging.info("Syncing pyfile: %s -> %s." % (src, dest))
if not os.path.exists(dest):
os.makedirs(dest)
shutil.copy(src, dest)
- if os.path.isfile(os.path.join(src_dir, '__init__.py')):
- shutil.copy(os.path.join(src_dir, '__init__.py'),
- dest)
+ if os.path.isfile(os.path.join(src_dir, "__init__.py")):
+ shutil.copy(os.path.join(src_dir, "__init__.py"), dest)
ensure_init(dest)
def get_filter(opts=None):
opts = opts or []
- if 'inc=*' in opts:
+ if "inc=*" in opts:
# do not filter any files, include everything
return None
def _filter(dir, ls):
- incs = [opt.split('=').pop() for opt in opts if 'inc=' in opt]
+ incs = [opt.split("=").pop() for opt in opts if "inc=" in opt]
_filter = []
for f in ls:
_f = os.path.join(dir, f)
- if not os.path.isdir(_f) and not _f.endswith('.py') and incs:
+ if not os.path.isdir(_f) and not _f.endswith(".py") and incs:
if True not in [fnmatch(_f, inc) for inc in incs]:
- logging.debug('Not syncing %s, does not match include '
- 'filters (%s)' % (_f, incs))
+ logging.debug("Not syncing %s, does not match include " "filters (%s)" % (_f, incs))
_filter.append(f)
else:
- logging.debug('Including file, which matches include '
- 'filters (%s): %s' % (incs, _f))
- elif (os.path.isfile(_f) and not _f.endswith('.py')):
- logging.debug('Not syncing file: %s' % f)
+ logging.debug("Including file, which matches include " "filters (%s): %s" % (incs, _f))
+ elif os.path.isfile(_f) and not _f.endswith(".py"):
+ logging.debug("Not syncing file: %s" % f)
_filter.append(f)
- elif (os.path.isdir(_f) and not
- os.path.isfile(os.path.join(_f, '__init__.py'))):
- logging.debug('Not syncing directory: %s' % f)
+ elif os.path.isdir(_f) and not os.path.isfile(os.path.join(_f, "__init__.py")):
+ logging.debug("Not syncing directory: %s" % f)
_filter.append(f)
return _filter
+
return _filter
def sync_directory(src, dest, opts=None):
if os.path.exists(dest):
- logging.debug('Removing existing directory: %s' % dest)
+ logging.debug("Removing existing directory: %s" % dest)
shutil.rmtree(dest)
- logging.info('Syncing directory: %s -> %s.' % (src, dest))
+ logging.info("Syncing directory: %s -> %s." % (src, dest))
shutil.copytree(src, dest, ignore=get_filter(opts))
ensure_init(dest)
@@ -141,47 +138,44 @@ def sync_directory(src, dest, opts=None):
def sync(src, dest, module, opts=None):
# Sync charmhelpers/__init__.py for bootstrap code.
- sync_pyfile(_src_path(src, '__init__'), dest)
+ sync_pyfile(_src_path(src, "__init__"), dest)
# Sync other __init__.py files in the path leading to module.
m = []
- steps = module.split('.')[:-1]
+ steps = module.split(".")[:-1]
while steps:
m.append(steps.pop(0))
- init = '.'.join(m + ['__init__'])
- sync_pyfile(_src_path(src, init),
- os.path.dirname(_dest_path(dest, init)))
+ init = ".".join(m + ["__init__"])
+ sync_pyfile(_src_path(src, init), os.path.dirname(_dest_path(dest, init)))
# Sync the module, or maybe a .py file.
if os.path.isdir(_src_path(src, module)):
sync_directory(_src_path(src, module), _dest_path(dest, module), opts)
elif _is_pyfile(_src_path(src, module)):
- sync_pyfile(_src_path(src, module),
- os.path.dirname(_dest_path(dest, module)))
+ sync_pyfile(_src_path(src, module), os.path.dirname(_dest_path(dest, module)))
else:
- logging.warn('Could not sync: %s. Neither a pyfile or directory, '
- 'does it even exist?' % module)
+ logging.warn("Could not sync: %s. Neither a pyfile or directory, " "does it even exist?" % module)
def parse_sync_options(options):
if not options:
return []
- return options.split(',')
+ return options.split(",")
def extract_options(inc, global_options=None):
global_options = global_options or []
if global_options and isinstance(global_options, six.string_types):
global_options = [global_options]
- if '|' not in inc:
+ if "|" not in inc:
return (inc, global_options)
- inc, opts = inc.split('|')
+ inc, opts = inc.split("|")
return (inc, parse_sync_options(opts) + global_options)
def sync_helpers(include, src, dest, options=None):
if os.path.exists(dest):
- logging.debug('Removing existing directory: %s' % dest)
+ logging.debug("Removing existing directory: %s" % dest)
shutil.rmtree(dest)
if not os.path.isdir(dest):
os.makedirs(dest)
@@ -198,19 +192,19 @@ def sync_helpers(include, src, dest, options=None):
if isinstance(v, list):
for m in v:
inc, opts = extract_options(m, global_options)
- sync(src, dest, '%s.%s' % (k, inc), opts)
+ sync(src, dest, "%s.%s" % (k, inc), opts)
-if __name__ == '__main__':
+if __name__ == "__main__":
parser = optparse.OptionParser()
- parser.add_option('-c', '--config', action='store', dest='config',
- default=None, help='helper config file')
- parser.add_option('-D', '--debug', action='store_true', dest='debug',
- default=False, help='debug')
- parser.add_option('-r', '--repository', action='store', dest='repo',
- help='charm-helpers git repository (overrides config)')
- parser.add_option('-d', '--destination', action='store', dest='dest_dir',
- help='sync destination dir (overrides config)')
+ parser.add_option("-c", "--config", action="store", dest="config", default=None, help="helper config file")
+ parser.add_option("-D", "--debug", action="store_true", dest="debug", default=False, help="debug")
+ parser.add_option(
+ "-r", "--repository", action="store", dest="repo", help="charm-helpers git repository (overrides config)"
+ )
+ parser.add_option(
+ "-d", "--destination", action="store", dest="dest_dir", help="sync destination dir (overrides config)"
+ )
(opts, args) = parser.parse_args()
if opts.debug:
@@ -219,43 +213,42 @@ if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
if opts.config:
- logging.info('Loading charm helper config from %s.' % opts.config)
+ logging.info("Loading charm helper config from %s." % opts.config)
config = parse_config(opts.config)
if not config:
- logging.error('Could not parse config from %s.' % opts.config)
+ logging.error("Could not parse config from %s." % opts.config)
sys.exit(1)
else:
config = {}
- if 'repo' not in config:
- config['repo'] = CHARM_HELPERS_REPO
+ if "repo" not in config:
+ config["repo"] = CHARM_HELPERS_REPO
if opts.repo:
- config['repo'] = opts.repo
+ config["repo"] = opts.repo
if opts.dest_dir:
- config['destination'] = opts.dest_dir
+ config["destination"] = opts.dest_dir
- if 'destination' not in config:
- logging.error('No destination dir. specified as option or config.')
+ if "destination" not in config:
+ logging.error("No destination dir. specified as option or config.")
sys.exit(1)
- if 'include' not in config:
+ if "include" not in config:
if not args:
- logging.error('No modules to sync specified as option or config.')
+ logging.error("No modules to sync specified as option or config.")
sys.exit(1)
- config['include'] = []
- [config['include'].append(a) for a in args]
+ config["include"] = []
+ [config["include"].append(a) for a in args]
sync_options = None
- if 'options' in config:
- sync_options = config['options']
+ if "options" in config:
+ sync_options = config["options"]
tmpd = tempfile.mkdtemp()
try:
- checkout = clone_helpers(tmpd, config['repo'])
- sync_helpers(config['include'], checkout, config['destination'],
- options=sync_options)
+ checkout = clone_helpers(tmpd, config["repo"])
+ sync_helpers(config["include"], checkout, config["destination"], options=sync_options)
except Exception as e:
logging.error("Could not sync: %s" % e)
raise e
finally:
- logging.debug('Cleaning up %s' % tmpd)
+ logging.debug("Cleaning up %s" % tmpd)
shutil.rmtree(tmpd)
diff --git a/hooks/common.py b/hooks/common.py
index ec34670..b3f403f 100644
--- a/hooks/common.py
+++ b/hooks/common.py
@@ -1,37 +1,42 @@
-import subprocess
-import socket
+"""Provide common utilities to many of the hooks in this charm."""
import os
-import os.path
import re
import shutil
+import socket
+import subprocess
import tempfile
from charmhelpers.core.hookenv import (
+ config,
log,
network_get,
network_get_primary_address,
unit_get,
- config,
)
from pynag import Model
-INPROGRESS_DIR = '/etc/nagios3-inprogress'
-INPROGRESS_CFG = '/etc/nagios3-inprogress/nagios.cfg'
-INPROGRESS_CONF_D = '/etc/nagios3-inprogress/conf.d'
-CHARM_CFG = '/etc/nagios3-inprogress/conf.d/charm.cfg'
-MAIN_NAGIOS_BAK = '/etc/nagios3.bak'
-MAIN_NAGIOS_DIR = '/etc/nagios3'
-MAIN_NAGIOS_CFG = '/etc/nagios3/nagios.cfg'
-PLUGIN_PATH = '/usr/lib/nagios/plugins'
+INPROGRESS_DIR = "/etc/nagios3-inprogress"
+INPROGRESS_CFG = "/etc/nagios3-inprogress/nagios.cfg"
+INPROGRESS_CONF_D = "/etc/nagios3-inprogress/conf.d"
+CHARM_CFG = "/etc/nagios3-inprogress/conf.d/charm.cfg"
+MAIN_NAGIOS_BAK = "/etc/nagios3.bak"
+MAIN_NAGIOS_DIR = "/etc/nagios3"
+MAIN_NAGIOS_CFG = "/etc/nagios3/nagios.cfg"
+PLUGIN_PATH = "/usr/lib/nagios/plugins"
Model.cfg_file = INPROGRESS_CFG
Model.pynag_directory = INPROGRESS_CONF_D
-reduce_RE = re.compile(r'[\W_]')
+REDUCE_RE = re.compile(r"[\W_]")
def check_ip(n):
+ """
+ Validate string is an ip address.
+
+ @param str n: string to check is an IP
+ """
try:
socket.inet_pton(socket.AF_INET, n)
return True
@@ -43,14 +48,14 @@ def check_ip(n):
return False
-def get_local_ingress_address(binding='website'):
- # using network-get to retrieve the address details if available.
- log('Getting hostname for binding %s' % binding)
+def get_local_ingress_address(binding="website"):
+ """Use network-get to retrieve the address details if available."""
+ log("Getting hostname for binding %s" % binding)
try:
network_info = network_get(binding)
- if network_info is not None and 'ingress-addresses' in network_info:
- log('Using ingress-addresses')
- hostname = network_info['ingress-addresses'][0]
+ if network_info is not None and "ingress-addresses" in network_info:
+ log("Using ingress-addresses")
+ hostname = network_info["ingress-addresses"][0]
log(hostname)
return hostname
except NotImplementedError:
@@ -60,26 +65,28 @@ def get_local_ingress_address(binding='website'):
# Pre 2.3 output
try:
hostname = network_get_primary_address(binding)
- log('Using primary-addresses')
+ log("Using primary-addresses")
except NotImplementedError:
# pre Juju 2.0
- hostname = unit_get('private-address')
- log('Using unit_get private address')
+ hostname = unit_get("private-address")
+ log("Using unit_get private address")
log(hostname)
return hostname
def get_remote_relation_attr(remote_unit, attr_name, relation_id=None):
+ """Get Remote Relation Attributes."""
args = ["relation-get", attr_name, remote_unit]
if relation_id is not None:
- args.extend(['-r', relation_id])
+ args.extend(["-r", relation_id])
return subprocess.check_output(args).strip()
def get_ip_and_hostname(remote_unit, relation_id=None):
- hostname = get_remote_relation_attr(remote_unit, 'ingress-address', relation_id)
+ """Get IP and Hostname from remote unit."""
+ hostname = get_remote_relation_attr(remote_unit, "ingress-address", relation_id)
if hostname is None or not len(hostname):
- hostname = get_remote_relation_attr(remote_unit, 'private-address', relation_id)
+ hostname = get_remote_relation_attr(remote_unit, "private-address", relation_id)
if hostname is None or not len(hostname):
log("relation-get failed")
@@ -89,18 +96,22 @@ def get_ip_and_hostname(remote_unit, relation_id=None):
ip_address = hostname
else:
ip_address = socket.getaddrinfo(hostname, None)[0][4][0]
- return (ip_address, remote_unit.replace('/', '-'))
+ return (ip_address, remote_unit.replace("/", "-"))
def refresh_hostgroups(): # noqa:C901
- """ Not the most efficient thing but since we're only
- parsing what is already on disk here its not too bad """
- hosts = [x['host_name'] for x in Model.Host.objects.all if x['host_name']]
+ """
+ Refresh Host Groups.
+
+ Not the most efficient thing but since we're only
+ parsing what is already on disk here its not too bad
+ """
+ hosts = [x["host_name"] for x in Model.Host.objects.all if x["host_name"]]
hgroups = {}
for host in hosts:
try:
- (service, unit_id) = host.rsplit('-', 1)
+ (service, unit_id) = host.rsplit("-", 1)
except ValueError:
continue
if service in hgroups:
@@ -109,8 +120,8 @@ def refresh_hostgroups(): # noqa:C901
hgroups[service] = [host]
# Find existing autogenerated
- auto_hgroups = Model.Hostgroup.objects.filter(notes__contains='#autogenerated#')
- auto_hgroups = [x.get_attribute('hostgroup_name') for x in auto_hgroups]
+ auto_hgroups = Model.Hostgroup.objects.filter(notes__contains="#autogenerated#")
+ auto_hgroups = [x.get_attribute("hostgroup_name") for x in auto_hgroups]
# Delete the ones not in hgroups
to_delete = set(auto_hgroups).difference(set(hgroups.keys()))
@@ -127,10 +138,10 @@ def refresh_hostgroups(): # noqa:C901
except (ValueError, KeyError):
hgroup = Model.Hostgroup()
hgroup.set_filename(CHARM_CFG)
- hgroup.set_attribute('hostgroup_name', hgroup_name)
- hgroup.set_attribute('notes', '#autogenerated#')
+ hgroup.set_attribute("hostgroup_name", hgroup_name)
+ hgroup.set_attribute("notes", "#autogenerated#")
- hgroup.set_attribute('members', ','.join(members))
+ hgroup.set_attribute("members", ",".join(members))
hgroup.save()
@@ -138,15 +149,14 @@ def _make_check_command(args):
args = [str(arg) for arg in args]
# There is some worry of collision, but the uniqueness of the initial
# command should be enough.
- signature = reduce_RE.sub('_', ''.join(
- [os.path.basename(arg) for arg in args]))
+ signature = REDUCE_RE.sub("_", "".join([os.path.basename(arg) for arg in args]))
Model.Command.objects.reload_cache()
try:
cmd = Model.Command.objects.get_by_shortname(signature)
except (ValueError, KeyError):
cmd = Model.Command()
- cmd.set_attribute('command_name', signature)
- cmd.set_attribute('command_line', ' '.join(args))
+ cmd.set_attribute("command_name", signature)
+ cmd.set_attribute("command_line", " ".join(args))
cmd.save()
return signature
@@ -157,165 +167,167 @@ def _extend_args(args, cmd_args, switch, value):
def customize_http(service, name, extra):
- args = []
- cmd_args = []
- plugin = os.path.join(PLUGIN_PATH, 'check_http')
- port = extra.get('port', 80)
- path = extra.get('path', '/')
+ """Customize the http check."""
+ plugin = os.path.join(PLUGIN_PATH, "check_http")
+ port = extra.get("port", 80)
+ path = extra.get("path", "/")
args = [port, path]
- cmd_args = [plugin, '-p', '"$ARG1$"', '-u', '"$ARG2$"']
- if 'status' in extra:
- _extend_args(args, cmd_args, '-e', extra['status'])
- if 'host' in extra:
- _extend_args(args, cmd_args, '-H', extra['host'])
- cmd_args.extend(('-I', '$HOSTADDRESS$'))
+ cmd_args = [plugin, "-p", '"$ARG1$"', "-u", '"$ARG2$"']
+ if "status" in extra:
+ _extend_args(args, cmd_args, "-e", extra["status"])
+ if "host" in extra:
+ _extend_args(args, cmd_args, "-H", extra["host"])
+ cmd_args.extend(("-I", "$HOSTADDRESS$"))
else:
- cmd_args.extend(('-H', '$HOSTADDRESS$'))
- check_timeout = config('check_timeout')
+ cmd_args.extend(("-H", "$HOSTADDRESS$"))
+ check_timeout = config("check_timeout")
if check_timeout is not None:
- cmd_args.extend(('-t', check_timeout))
+ cmd_args.extend(("-t", check_timeout))
check_command = _make_check_command(cmd_args)
- cmd = '%s!%s' % (check_command, '!'.join([str(x) for x in args]))
- service.set_attribute('check_command', cmd)
+ cmd = "%s!%s" % (check_command, "!".join([str(x) for x in args]))
+ service.set_attribute("check_command", cmd)
return True
def customize_mysql(service, name, extra):
- plugin = os.path.join(PLUGIN_PATH, 'check_mysql')
+ """Customize the mysql check."""
+ plugin = os.path.join(PLUGIN_PATH, "check_mysql")
args = []
- cmd_args = [plugin, '-H', '$HOSTADDRESS$']
- if 'user' in extra:
- _extend_args(args, cmd_args, '-u', extra['user'])
- if 'password' in extra:
- _extend_args(args, cmd_args, '-p', extra['password'])
- check_timeout = config('check_timeout')
+ cmd_args = [plugin, "-H", "$HOSTADDRESS$"]
+ if "user" in extra:
+ _extend_args(args, cmd_args, "-u", extra["user"])
+ if "password" in extra:
+ _extend_args(args, cmd_args, "-p", extra["password"])
+ check_timeout = config("check_timeout")
if check_timeout is not None:
- cmd_args.extend(('-t', check_timeout))
+ cmd_args.extend(("-t", check_timeout))
check_command = _make_check_command(cmd_args)
- cmd = '%s!%s' % (check_command, '!'.join([str(x) for x in args]))
- service.set_attribute('check_command', cmd)
+ cmd = "%s!%s" % (check_command, "!".join([str(x) for x in args]))
+ service.set_attribute("check_command", cmd)
return True
def customize_pgsql(service, name, extra):
- plugin = os.path.join(PLUGIN_PATH, 'check_pgsql')
+ """Customize the pgsql check."""
+ plugin = os.path.join(PLUGIN_PATH, "check_pgsql")
args = []
- cmd_args = [plugin, '-H', '$HOSTADDRESS$']
- check_timeout = config('check_timeout')
+ cmd_args = [plugin, "-H", "$HOSTADDRESS$"]
+ check_timeout = config("check_timeout")
if check_timeout is not None:
- cmd_args.extend(('-t', check_timeout))
+ cmd_args.extend(("-t", check_timeout))
check_command = _make_check_command(cmd_args)
- cmd = '%s!%s' % (check_command, '!'.join([str(x) for x in args]))
- service.set_attribute('check_command', cmd)
+ cmd = "%s!%s" % (check_command, "!".join([str(x) for x in args]))
+ service.set_attribute("check_command", cmd)
return True
def customize_nrpe(service, name, extra):
- plugin = os.path.join(PLUGIN_PATH, 'check_nrpe')
+ """Customize the nrpe check."""
+ plugin = os.path.join(PLUGIN_PATH, "check_nrpe")
args = []
- cmd_args = [plugin, '-H', '$HOSTADDRESS$']
- if name in ('mem', 'swap'):
- cmd_args.extend(('-c', 'check_%s' % name))
- elif 'command' in extra:
- cmd_args.extend(('-c', extra['command']))
+ cmd_args = [plugin, "-H", "$HOSTADDRESS$"]
+ if name in ("mem", "swap"):
+ cmd_args.extend(("-c", "check_%s" % name))
+ elif "command" in extra:
+ cmd_args.extend(("-c", extra["command"]))
else:
- cmd_args.extend(('-c', extra))
- check_timeout = config('check_timeout')
+ cmd_args.extend(("-c", extra))
+ check_timeout = config("check_timeout")
if check_timeout is not None:
- cmd_args.extend(('-t', check_timeout))
+ cmd_args.extend(("-t", check_timeout))
check_command = _make_check_command(cmd_args)
- cmd = '%s!%s' % (check_command, '!'.join([str(x) for x in args]))
- service.set_attribute('check_command', cmd)
+ cmd = "%s!%s" % (check_command, "!".join([str(x) for x in args]))
+ service.set_attribute("check_command", cmd)
return True
def customize_rpc(service, name, extra):
- """ Customize the check_rpc plugin to check things like nfs."""
- plugin = os.path.join(PLUGIN_PATH, 'check_rpc')
+ """Customize the check_rpc plugin to check things like nfs."""
+ plugin = os.path.join(PLUGIN_PATH, "check_rpc")
args = []
# /usr/lib/nagios/plugins/check_rpc -H <host> -C <rpc_command>
- cmd_args = [plugin, '-H', '$HOSTADDRESS$']
- if 'rpc_command' in extra:
- cmd_args.extend(('-C', extra['rpc_command']))
- if 'program_version' in extra:
- cmd_args.extend(('-c', extra['program_version']))
+ cmd_args = [plugin, "-H", "$HOSTADDRESS$"]
+ if "rpc_command" in extra:
+ cmd_args.extend(("-C", extra["rpc_command"]))
+ if "program_version" in extra:
+ cmd_args.extend(("-c", extra["program_version"]))
check_command = _make_check_command(cmd_args)
- cmd = '%s!%s' % (check_command, '!'.join([str(x) for x in args]))
- service.set_attribute('check_command', cmd)
+ cmd = "%s!%s" % (check_command, "!".join([str(x) for x in args]))
+ service.set_attribute("check_command", cmd)
return True
def customize_tcp(service, name, extra):
- """ Customize tcp can be used to check things like memcached. """
- plugin = os.path.join(PLUGIN_PATH, 'check_tcp')
+ """Customize tcp can be used to check things like memcached."""
+ plugin = os.path.join(PLUGIN_PATH, "check_tcp")
args = []
# /usr/lib/nagios/plugins/check_tcp -H <host> -E
- cmd_args = [plugin, '-H', '$HOSTADDRESS$', '-E']
- if 'port' in extra:
- cmd_args.extend(('-p', extra['port']))
- if 'string' in extra:
- cmd_args.extend(('-s', "'{}'".format(extra['string'])))
- if 'expect' in extra:
- cmd_args.extend(('-e', extra['expect']))
- if 'warning' in extra:
- cmd_args.extend(('-w', extra['warning']))
- if 'critical' in extra:
- cmd_args.extend(('-c', extra['critical']))
- if 'timeout' in extra:
- cmd_args.extend(('-t', extra['timeout']))
- check_timeout = config('check_timeout')
+ cmd_args = [plugin, "-H", "$HOSTADDRESS$", "-E"]
+ if "port" in extra:
+ cmd_args.extend(("-p", extra["port"]))
+ if "string" in extra:
+ cmd_args.extend(("-s", "'{}'".format(extra["string"])))
+ if "expect" in extra:
+ cmd_args.extend(("-e", extra["expect"]))
+ if "warning" in extra:
+ cmd_args.extend(("-w", extra["warning"]))
+ if "critical" in extra:
+ cmd_args.extend(("-c", extra["critical"]))
+ if "timeout" in extra:
+ cmd_args.extend(("-t", extra["timeout"]))
+ check_timeout = config("check_timeout")
if check_timeout is not None:
- cmd_args.extend(('-t', check_timeout))
+ cmd_args.extend(("-t", check_timeout))
check_command = _make_check_command(cmd_args)
- cmd = '%s!%s' % (check_command, '!'.join([str(x) for x in args]))
- service.set_attribute('check_command', cmd)
+ cmd = "%s!%s" % (check_command, "!".join([str(x) for x in args]))
+ service.set_attribute("check_command", cmd)
return True
def customize_service(service, family, name, extra):
- """ The monitors.yaml names are mapped to methods that customize services. """
- customs = {'http': customize_http,
- 'mysql': customize_mysql,
- 'nrpe': customize_nrpe,
- 'tcp': customize_tcp,
- 'rpc': customize_rpc,
- 'pgsql': customize_pgsql,
- }
+ """Customize Service based on names in monitors.yaml."""
+ customs = {
+ "http": customize_http,
+ "mysql": customize_mysql,
+ "nrpe": customize_nrpe,
+ "tcp": customize_tcp,
+ "rpc": customize_rpc,
+ "pgsql": customize_pgsql,
+ }
if family in customs:
return customs[family](service, name, extra)
return False
def update_localhost():
- """ Update the localhost definition to use the ubuntu icons."""
-
+ """Update the localhost definition to use the ubuntu icons."""
Model.cfg_file = MAIN_NAGIOS_CFG
- Model.pynag_directory = os.path.join(MAIN_NAGIOS_DIR, 'conf.d')
- hosts = Model.Host.objects.filter(host_name='localhost',
- object_type='host')
+ Model.pynag_directory = os.path.join(MAIN_NAGIOS_DIR, "conf.d")
+ hosts = Model.Host.objects.filter(host_name="localhost", object_type="host")
for host in hosts:
- host.icon_image = 'base/ubuntu.png'
- host.icon_image_alt = 'Ubuntu Linux'
- host.vrml_image = 'ubuntu.png'
- host.statusmap_image = 'base/ubuntu.gd2'
+ host.icon_image = "base/ubuntu.png"
+ host.icon_image_alt = "Ubuntu Linux"
+ host.vrml_image = "ubuntu.png"
+ host.statusmap_image = "base/ubuntu.gd2"
host.save()
def get_pynag_host(target_id, owner_unit=None, owner_relation=None):
+ """Get Pynag Host by target_id."""
try:
host = Model.Host.objects.get_by_shortname(target_id)
except (ValueError, KeyError):
host = Model.Host()
host.set_filename(CHARM_CFG)
- host.set_attribute('host_name', target_id)
- host.set_attribute('use', 'generic-host')
+ host.set_attribute("host_name", target_id)
+ host.set_attribute("use", "generic-host")
# Adding the ubuntu icon image definitions to the host.
- host.set_attribute('icon_image', 'base/ubuntu.png')
- host.set_attribute('icon_image_alt', 'Ubuntu Linux')
- host.set_attribute('vrml_image', 'ubuntu.png')
- host.set_attribute('statusmap_image', 'base/ubuntu.gd2')
+ host.set_attribute("icon_image", "base/ubuntu.png")
+ host.set_attribute("icon_image_alt", "Ubuntu Linux")
+ host.set_attribute("vrml_image", "ubuntu.png")
+ host.set_attribute("statusmap_image", "base/ubuntu.gd2")
host.save()
host = Model.Host.objects.get_by_shortname(target_id)
apply_host_policy(target_id, owner_unit, owner_relation)
@@ -323,22 +335,23 @@ def get_pynag_host(target_id, owner_unit=None, owner_relation=None):
def get_pynag_service(target_id, service_name):
- services = Model.Service.objects.filter(host_name=target_id,
- service_description=service_name)
+ """Get Pynag service by target_id and service name."""
+ services = Model.Service.objects.filter(host_name=target_id, service_description=service_name)
if len(services) == 0:
service = Model.Service()
service.set_filename(CHARM_CFG)
- service.set_attribute('service_description', service_name)
- service.set_attribute('host_name', target_id)
- service.set_attribute('use', 'generic-service')
+ service.set_attribute("service_description", service_name)
+ service.set_attribute("host_name", target_id)
+ service.set_attribute("use", "generic-service")
else:
service = services[0]
return service
-def apply_host_policy(target_id, owner_unit, owner_relation):
- ssh_service = get_pynag_service(target_id, 'SSH')
- ssh_service.set_attribute('check_command', 'check_ssh')
+def apply_host_policy(target_id, _owner_unit, _owner_relation):
+ """Apply host policy."""
+ ssh_service = get_pynag_service(target_id, "SSH")
+ ssh_service.set_attribute("check_command", "check_ssh")
ssh_service.save()
@@ -365,6 +378,7 @@ def _commit_in_config(find_me, replacement):
def initialize_inprogress_config():
+ """Initialize In-progress config."""
if os.path.exists(INPROGRESS_DIR):
shutil.rmtree(INPROGRESS_DIR)
shutil.copytree(MAIN_NAGIOS_DIR, INPROGRESS_DIR)
@@ -374,6 +388,7 @@ def initialize_inprogress_config():
def flush_inprogress_config():
+ """Flush In-progress config."""
if not os.path.exists(INPROGRESS_DIR):
return
if os.path.exists(MAIN_NAGIOS_BAK):
diff --git a/hooks/monitors_relation_changed.py b/hooks/monitors_relation_changed.py
index 5e4f664..15ed680 100755
--- a/hooks/monitors_relation_changed.py
+++ b/hooks/monitors_relation_changed.py
@@ -1,50 +1,54 @@
#!/usr/bin/python
-# monitors-relation-changed - Process monitors.yaml into remote nagios monitors
-# Copyright Canonical 2012 Canonical Ltd. All Rights Reserved
-# Author: Clint Byrum <clint.byrum@xxxxxxxxxxxxx>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+monitors-relation-changed - Process monitors.yaml into remote nagios monitors.
+
+Copyright Canonical 2020 Canonical Ltd. All Rights Reserved
+Author: Clint Byrum <clint.byrum@xxxxxxxxxxxxx>
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
-import sys
import os
-import yaml
import re
+import sys
from collections import defaultdict
from charmhelpers.core.hookenv import (
- relation_get,
+ DEBUG,
ingress_address,
+ log,
related_units,
+ relation_get,
relation_ids,
- log,
- DEBUG
)
from common import (
customize_service,
+ flush_inprogress_config,
get_pynag_host,
get_pynag_service,
- refresh_hostgroups,
initialize_inprogress_config,
- flush_inprogress_config
+ refresh_hostgroups,
)
+import yaml
+
REQUIRED_REL_DATA_KEYS = [
- 'target-address',
- 'monitors',
- 'target-id',
+ "target-address",
+ "monitors",
+ "target-id",
]
@@ -52,20 +56,17 @@ def _prepare_relation_data(unit, rid):
relation_data = relation_get(unit=unit, rid=rid)
if not relation_data:
- msg = (
- 'no relation data found for unit {} in relation {} - '
- 'skipping'.format(unit, rid)
- )
+ msg = "no relation data found for unit {} in relation {} - " "skipping".format(unit, rid)
log(msg, level=DEBUG)
return {}
- if rid.split(':')[0] == 'nagios':
+ if rid.split(":")[0] == "nagios":
# Fake it for the more generic 'nagios' relation
- relation_data['target-id'] = unit.replace('/', '-')
- relation_data['monitors'] = {'monitors': {'remote': {}}}
+ relation_data["target-id"] = unit.replace("/", "-")
+ relation_data["monitors"] = {"monitors": {"remote": {}}}
- if not relation_data.get('target-address'):
- relation_data['target-address'] = ingress_address(unit=unit, rid=rid)
+ if not relation_data.get("target-address"):
+ relation_data["target-address"] = ingress_address(unit=unit, rid=rid)
for key in REQUIRED_REL_DATA_KEYS:
if not relation_data.get(key):
@@ -73,10 +74,7 @@ def _prepare_relation_data(unit, rid):
# the relation at first (e.g. gnocchi). After a few hook runs,
# though, they add the key. For this reason I think using a logging
# level higher than DEBUG could be misleading
- msg = (
- '{} not found for unit {} in relation {} - '
- 'skipping'.format(key, unit, rid)
- )
+ msg = "{} not found for unit {} in relation {} - " "skipping".format(key, unit, rid)
log(msg, level=DEBUG)
return {}
@@ -85,7 +83,7 @@ def _prepare_relation_data(unit, rid):
def _collect_relation_data():
all_relations = defaultdict(dict)
- for relname in ['nagios', 'monitors']:
+ for relname in ["nagios", "monitors"]:
for relid in relation_ids(relname):
for unit in related_units(relid):
relation_data = _prepare_relation_data(unit=unit, rid=relid)
@@ -96,29 +94,31 @@ def _collect_relation_data():
def main(argv): # noqa: C901
- # Note that one can pass in args positionally, 'monitors.yaml targetid
- # and target-address' so the hook can be tested without being in a hook
- # context.
- #
+ """
+ Handle monitor-relation-* hooks.
+
+ Note that one can pass in args positionally, 'monitors.yaml targetid
+ and target-address' so the hook can be tested without being in a hook
+ context.
+ """
if len(argv) > 1:
- relation_settings = {'monitors': open(argv[1]).read(),
- 'target-id': argv[2]}
+ relation_settings = {"monitors": open(argv[1]).read(), "target-id": argv[2]}
if len(argv) > 3:
- relation_settings['target-address'] = argv[3]
- all_relations = {'monitors:99': {'testing/0': relation_settings}}
+ relation_settings["target-address"] = argv[3]
+ all_relations = {"monitors:99": {"testing/0": relation_settings}}
else:
all_relations = _collect_relation_data()
# Hack to work around http://pad.lv/1025478
targets_with_addresses = set()
- for relid, units in all_relations.iteritems():
+ for relid, units in all_relations.items():
for unit, relation_settings in units.items():
- if 'target-id' in relation_settings:
- targets_with_addresses.add(relation_settings['target-id'])
+ if "target-id" in relation_settings:
+ targets_with_addresses.add(relation_settings["target-id"])
new_all_relations = {}
- for relid, units in all_relations.iteritems():
+ for relid, units in all_relations.items():
for unit, relation_settings in units.items():
- if relation_settings['target-id'] in targets_with_addresses:
+ if relation_settings["target-id"] in targets_with_addresses:
if relid not in new_all_relations:
new_all_relations[relid] = {}
new_all_relations[relid][unit] = relation_settings
@@ -128,22 +128,23 @@ def main(argv): # noqa: C901
# make a dict of machine ids to target-id hostnames
all_hosts = {}
for relid, units in all_relations.items():
- for unit, relation_settings in units.iteritems():
- machine_id = relation_settings.get('machine_id', None)
+ for unit, relation_settings in units.items():
+ machine_id = relation_settings.get("machine_id", None)
if machine_id:
- all_hosts[machine_id] = relation_settings['target-id']
+ all_hosts[machine_id] = relation_settings["target-id"]
for relid, units in all_relations.items():
apply_relation_config(relid, units, all_hosts)
refresh_hostgroups()
flush_inprogress_config()
- os.system('service nagios3 reload')
+ os.system("service nagios3 reload")
-def apply_relation_config(relid, units, all_hosts): # noqa: C901
- for unit, relation_settings in units.iteritems():
- monitors = relation_settings['monitors']
- target_id = relation_settings['target-id']
- machine_id = relation_settings.get('machine_id', None)
+def apply_relation_config(relid, units, all_hosts): # noqa: C901
+ """Apply relation config to every related unit."""
+ for unit, relation_settings in units.items():
+ monitors = relation_settings["monitors"]
+ target_id = relation_settings["target-id"]
+ machine_id = relation_settings.get("machine_id", None)
parent_host = None
if machine_id:
container_regex = re.compile(r"(\d+)/lx[cd]/\d+")
@@ -155,7 +156,7 @@ def apply_relation_config(relid, units, all_hosts): # noqa: C901
# If not set, we don't mess with it, as multiple services may feed
# monitors in for a particular address. Generally a primary will set
# this to its own private-address
- target_address = relation_settings.get('target-address', None)
+ target_address = relation_settings.get("target-address", None)
if type(monitors) != dict:
monitors = yaml.safe_load(monitors)
@@ -164,23 +165,22 @@ def apply_relation_config(relid, units, all_hosts): # noqa: C901
host = get_pynag_host(target_id)
if not target_address:
raise Exception("No Target Address provied by NRPE service!")
- host.set_attribute('address', target_address)
+ host.set_attribute("address", target_address)
if parent_host:
# We assume that we only want one parent and will overwrite any
# existing parents for this host.
- host.set_attribute('parents', parent_host)
+ host.set_attribute("parents", parent_host)
host.save()
- for mon_family, mons in monitors['monitors']['remote'].iteritems():
- for mon_name, mon in mons.iteritems():
- service_name = '%s-%s' % (target_id, mon_name)
+ for mon_family, mons in monitors["monitors"]["remote"].items():
+ for mon_name, mon in mons.items():
+ service_name = "%s-%s" % (target_id, mon_name)
service = get_pynag_service(target_id, service_name)
if customize_service(service, mon_family, mon_name, mon):
service.save()
else:
- print('Ignoring %s due to unknown family %s' % (mon_name,
- mon_family))
+ print("Ignoring %s due to unknown family %s" % (mon_name, mon_family))
-if __name__ == '__main__':
+if __name__ == "__main__":
main(sys.argv)
diff --git a/hooks/upgrade_charm.py b/hooks/upgrade_charm.py
index ea7532c..705dc4c 100755
--- a/hooks/upgrade_charm.py
+++ b/hooks/upgrade_charm.py
@@ -1,44 +1,48 @@
#!/usr/bin/env python
+"""
+Handle the upgrade-charm hooks among others.
-# Rewritten from bash to python 3/2/2014 for charm helper inclusion
-# of SSL-Everywhere!
+Rewritten from bash to python 3/2/2014 for charm helper inclusion of SSL-Everywhere!
+"""
import base64
-from jinja2 import Template
+import errno
import glob
+import grp
import os
import pwd
-import grp
-import stat
-import errno
import shutil
+import stat
import subprocess
+
+from charmhelpers import fetch
from charmhelpers.contrib import ssl
from charmhelpers.core import hookenv, host
-from charmhelpers import fetch
from common import update_localhost
+from jinja2 import Template
+
# Gather facts
-legacy_relations = hookenv.config('legacy')
-extra_config = hookenv.config('extraconfig')
-enable_livestatus = hookenv.config('enable_livestatus')
-livestatus_path = hookenv.config('livestatus_path')
-enable_pagerduty = hookenv.config('enable_pagerduty')
-pagerduty_key = hookenv.config('pagerduty_key')
-pagerduty_path = hookenv.config('pagerduty_path')
-notification_levels = hookenv.config('pagerduty_notification_levels')
-nagios_user = hookenv.config('nagios_user')
-nagios_group = hookenv.config('nagios_group')
-ssl_config = str(hookenv.config('ssl')).lower()
-charm_dir = os.environ['CHARM_DIR']
-cert_domain = hookenv.unit_get('public-address')
+legacy_relations = hookenv.config("legacy")
+extra_config = hookenv.config("extraconfig")
+enable_livestatus = hookenv.config("enable_livestatus")
+livestatus_path = hookenv.config("livestatus_path")
+enable_pagerduty = hookenv.config("enable_pagerduty")
+pagerduty_key = hookenv.config("pagerduty_key")
+pagerduty_path = hookenv.config("pagerduty_path")
+notification_levels = hookenv.config("pagerduty_notification_levels")
+nagios_user = hookenv.config("nagios_user")
+nagios_group = hookenv.config("nagios_group")
+ssl_config = str(hookenv.config("ssl")).lower()
+charm_dir = os.environ["CHARM_DIR"]
+cert_domain = hookenv.unit_get("public-address")
nagios_cfg = "/etc/nagios3/nagios.cfg"
nagios_cgi_cfg = "/etc/nagios3/cgi.cfg"
pagerduty_cfg = "/etc/nagios3/conf.d/pagerduty_nagios.cfg"
pagerduty_cron = "/etc/cron.d/nagios-pagerduty-flush"
-password = hookenv.config('password')
-ro_password = hookenv.config('ro-password')
-nagiosadmin = hookenv.config('nagiosadmin') or 'nagiosadmin'
+password = hookenv.config("password")
+ro_password = hookenv.config("ro-password")
+nagiosadmin = hookenv.config("nagiosadmin") or "nagiosadmin"
SSL_CONFIGURED = ssl_config in ["on", "only"]
HTTP_ENABLED = ssl_config not in ["only"]
@@ -46,33 +50,41 @@ HTTP_ENABLED = ssl_config not in ["only"]
def warn_legacy_relations():
"""
- Checks the charm relations for legacy relations
+ Check the charm relations for legacy relations.
+
Inserts warnings into the log about legacy relations, as they will be removed
in the future
"""
if legacy_relations is not None:
- hookenv.log("Relations have been radically changed."
- " The monitoring interface is not supported anymore.",
- "WARNING")
- hookenv.log("Please use the generic juju-info or the monitors interface",
- "WARNING")
+ hookenv.log(
+ "Relations have been radically changed." " The monitoring interface is not supported anymore.", "WARNING"
+ )
+ hookenv.log("Please use the generic juju-info or the monitors interface", "WARNING")
-# If the charm has extra configuration provided, write that to the
-# proper nagios3 configuration file, otherwise remove the config
def write_extra_config():
+ """
+ Write Extra Config.
+
+ If the charm has extra configuration provided, write that to the proper
+ nagios3 configuration file, otherwise remove the config.
+ """
# Be predjudice about this - remove the file always.
- if host.file_hash('/etc/nagios3/conf.d/extra.cfg') is not None:
- os.remove('/etc/nagios3/conf.d/extra.cfg')
+ if host.file_hash("/etc/nagios3/conf.d/extra.cfg") is not None:
+ os.remove("/etc/nagios3/conf.d/extra.cfg")
# If we have a config, then write it. the hook reconfiguration will
# handle the details
if extra_config is not None:
- host.write_file('/etc/nagios3/conf.d/extra.cfg', extra_config)
+ host.write_file("/etc/nagios3/conf.d/extra.cfg", extra_config)
-# Equivalent of mkdir -p, since we can't rely on
-# python 3.2 os.makedirs exist_ok argument
def mkdir_p(path):
+ """
+ Create directory recursively.
+
+ Equivalent of mkdir -p, since we can't rely on py32 os.makedirs
+ `exist_ok` argument
+ """
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
@@ -82,8 +94,8 @@ def mkdir_p(path):
raise
-# Fix the path to be world executable
def fixpath(path):
+ """Fix the path to be world executable."""
if os.path.isdir(path):
st = os.stat(path)
os.chmod(path, st.st_mode | stat.S_IXOTH)
@@ -92,10 +104,11 @@ def fixpath(path):
def enable_livestatus_config():
+ """Enable livestatus config."""
if enable_livestatus:
hookenv.log("Livestatus is enabled")
fetch.apt_update()
- fetch.apt_install('check-mk-livestatus')
+ fetch.apt_install("check-mk-livestatus")
# Make the directory and fix perms on it
hookenv.log("Fixing perms on livestatus_path")
@@ -113,42 +126,43 @@ def enable_livestatus_config():
os.chown(livestatus_dir, uid, gid)
st = os.stat(livestatus_path)
os.chmod(livestatus_path, st.st_mode | stat.S_IRGRP)
- os.chmod(livestatus_dir, st.st_mode | stat.S_IRGRP |
- stat.S_ISGID | stat.S_IXUSR | stat.S_IXGRP)
+ os.chmod(livestatus_dir, st.st_mode | stat.S_IRGRP | stat.S_ISGID | stat.S_IXUSR | stat.S_IXGRP)
def enable_pagerduty_config():
+ """Enable pagerduty config."""
if enable_pagerduty:
hookenv.log("Pagerduty is enabled")
fetch.apt_update()
- fetch.apt_install('libhttp-parser-perl')
+ fetch.apt_install("libhttp-parser-perl")
env = os.environ
- proxy = env.get('JUJU_CHARM_HTTPS_PROXY') or env.get('https_proxy')
- proxy_switch = '--proxy {}'.format(proxy) if proxy else ''
+ proxy = env.get("JUJU_CHARM_HTTPS_PROXY") or env.get("https_proxy")
+ proxy_switch = "--proxy {}".format(proxy) if proxy else ""
# Ship the pagerduty_nagios.cfg file
- template_values = {'pagerduty_key': pagerduty_key,
- 'pagerduty_path': pagerduty_path,
- 'proxy_switch': proxy_switch,
- 'notification_levels': notification_levels}
-
- with open('hooks/templates/pagerduty_nagios_cfg.tmpl', 'r') as f:
- templateDef = f.read()
-
- t = Template(templateDef)
- with open(pagerduty_cfg, 'w') as f:
+ template_values = {
+ "pagerduty_key": pagerduty_key,
+ "pagerduty_path": pagerduty_path,
+ "proxy_switch": proxy_switch,
+ "notification_levels": notification_levels,
+ }
+
+ with open("hooks/templates/pagerduty_nagios_cfg.tmpl", "r") as f:
+ template_def = f.read()
+
+ t = Template(template_def)
+ with open(pagerduty_cfg, "w") as f:
f.write(t.render(template_values))
- with open('hooks/templates/nagios-pagerduty-flush-cron.tmpl', 'r') as f2:
- templateDef = f2.read()
+ with open("hooks/templates/nagios-pagerduty-flush-cron.tmpl", "r") as f2:
+ template_def = f2.read()
- t2 = Template(templateDef)
- with open(pagerduty_cron, 'w') as f2:
+ t2 = Template(template_def)
+ with open(pagerduty_cron, "w") as f2:
f2.write(t2.render(template_values))
# Ship the pagerduty_nagios.pl script
- shutil.copy('files/pagerduty_nagios.pl',
- '/usr/local/bin/pagerduty_nagios.pl')
+ shutil.copy("files/pagerduty_nagios.pl", "/usr/local/bin/pagerduty_nagios.pl")
# Create the pagerduty queue dir
if not os.path.isdir(pagerduty_path):
@@ -168,33 +182,18 @@ def enable_pagerduty_config():
# Multiple Email Contacts
contactgroup_members = hookenv.config("contactgroup-members")
contacts = []
- admin_email = list(
- filter(None, set(hookenv.config('admin_email').split(',')))
- )
+ admin_email = list(filter(None, set(hookenv.config("admin_email").split(","))))
if len(admin_email) == 0:
hookenv.log("admin_email is unset, this isn't valid config")
hookenv.status_set("blocked", "admin_email is not configured")
return
if len(admin_email) == 1:
hookenv.log("Setting one admin email address '%s'" % admin_email[0])
- contacts = [{
- 'contact_name': 'root',
- 'alias': 'Root',
- 'email': admin_email[0]
- }]
+ contacts = [{"contact_name": "root", "alias": "Root", "email": admin_email[0]}]
elif len(admin_email) > 1:
hookenv.log("Setting %d admin email addresses" % len(admin_email))
- contacts = [
- {
- 'contact_name': email,
- 'alias': email,
- 'email': email
- }
- for email in admin_email
- ]
- contactgroup_members = ', '.join([
- c['contact_name'] for c in contacts
- ])
+ contacts = [{"contact_name": email, "alias": email, "email": email} for email in admin_email]
+ contactgroup_members = ", ".join([c["contact_name"] for c in contacts])
# Update contacts for admin
if enable_pagerduty:
@@ -202,39 +201,43 @@ def enable_pagerduty_config():
if "pagerduty" not in contactgroup_members:
contactgroup_members += ", pagerduty"
- template_values = {'admin_service_notification_period': hookenv.config('admin_service_notification_period'),
- 'admin_host_notification_period': hookenv.config('admin_host_notification_period'),
- 'admin_service_notification_options': hookenv.config('admin_service_notification_options'),
- 'admin_host_notification_options': hookenv.config('admin_host_notification_options'),
- 'admin_service_notification_commands': hookenv.config('admin_service_notification_commands'),
- 'admin_host_notification_commands': hookenv.config('admin_host_notification_commands'),
- 'contacts': contacts,
- 'contactgroup_members': contactgroup_members}
-
- with open('hooks/templates/contacts-cfg.tmpl', 'r') as f:
- templateDef = f.read()
-
- t = Template(templateDef)
- with open('/etc/nagios3/conf.d/contacts_nagios2.cfg', 'w') as f:
+ template_values = {
+ "admin_service_notification_period": hookenv.config("admin_service_notification_period"),
+ "admin_host_notification_period": hookenv.config("admin_host_notification_period"),
+ "admin_service_notification_options": hookenv.config("admin_service_notification_options"),
+ "admin_host_notification_options": hookenv.config("admin_host_notification_options"),
+ "admin_service_notification_commands": hookenv.config("admin_service_notification_commands"),
+ "admin_host_notification_commands": hookenv.config("admin_host_notification_commands"),
+ "contacts": contacts,
+ "contactgroup_members": contactgroup_members,
+ }
+
+ with open("hooks/templates/contacts-cfg.tmpl", "r") as f:
+ template_def = f.read()
+
+ t = Template(template_def)
+ with open("/etc/nagios3/conf.d/contacts_nagios2.cfg", "w") as f:
f.write(t.render(template_values))
- host.service_reload('nagios3')
+ host.service_reload("nagios3")
# Gather local facts for SSL deployment
-deploy_key_path = os.path.join(charm_dir, 'data', '%s.key' % (cert_domain))
-deploy_cert_path = os.path.join(charm_dir, 'data', '%s.crt' % (cert_domain))
-deploy_csr_path = os.path.join(charm_dir, 'data', '%s.csr' % (cert_domain))
+deploy_key_path = os.path.join(charm_dir, "data", "%s.key" % (cert_domain))
+deploy_cert_path = os.path.join(charm_dir, "data", "%s.crt" % (cert_domain))
+deploy_csr_path = os.path.join(charm_dir, "data", "%s.csr" % (cert_domain))
# set basename for SSL key locations
-cert_file = '/etc/ssl/certs/%s.pem' % (cert_domain)
-key_file = '/etc/ssl/private/%s.key' % (cert_domain)
-chain_file = '/etc/ssl/certs/%s.csr' % (cert_domain)
+cert_file = "/etc/ssl/certs/%s.pem" % (cert_domain)
+key_file = "/etc/ssl/private/%s.key" % (cert_domain)
+chain_file = "/etc/ssl/certs/%s.csr" % (cert_domain)
-# Check for key and certificate, since the CSR is optional
-# leave it out of the dir file check and let the config manager
-# worry about it
def check_ssl_files():
+ """
+ Check for key and certificate, since the CSR is optional.
+
+ Leave it out of the dir file check and let the config manager worry about it.
+ """
key = os.path.exists(deploy_key_path)
cert = os.path.exists(deploy_cert_path)
if key is False or cert is False:
@@ -242,25 +245,24 @@ def check_ssl_files():
return True
-# Decode the SSL keys from their base64 encoded values in the configuration
def decode_ssl_keys():
- if hookenv.config('ssl_key'):
+ """Decode the SSL keys from their base64 encoded values in the config."""
+ if hookenv.config("ssl_key"):
hookenv.log("Writing key from config ssl_key: %s" % key_file)
- with open(key_file, 'w') as f:
- f.write(str(base64.b64decode(hookenv.config('ssl_key'))))
- if hookenv.config('ssl_cert'):
- with open(cert_file, 'w') as f:
- f.write(str(base64.b64decode(hookenv.config('ssl_cert'))))
- if hookenv.config('ssl_chain'):
- with open(chain_file, 'w') as f:
- f.write(str(base64.b64decode(hookenv.config('ssl_cert'))))
+ with open(key_file, "w") as f:
+ f.write(str(base64.b64decode(hookenv.config("ssl_key"))))
+ if hookenv.config("ssl_cert"):
+ with open(cert_file, "w") as f:
+ f.write(str(base64.b64decode(hookenv.config("ssl_cert"))))
+ if hookenv.config("ssl_chain"):
+ with open(chain_file, "w") as f:
+ f.write(str(base64.b64decode(hookenv.config("ssl_cert"))))
def enable_ssl():
- # Set the basename of all ssl files
-
+ """Set the basename of all ssl files."""
# Validate that we have configs, and generate a self signed certificate.
- if not hookenv.config('ssl_cert'):
+ if not hookenv.config("ssl_cert"):
# bail if keys already exist
if os.path.exists(cert_file):
hookenv.log("Keys exist, not creating keys!", "WARNING")
@@ -279,106 +281,104 @@ def nagios_bool(value):
def update_config():
- host_context = hookenv.config('nagios_host_context')
- local_host_name = 'nagios'
+ """Update Config."""
+ host_context = hookenv.config("nagios_host_context")
principal_unitname = hookenv.principal_unit()
# Fallback to using "primary" if it exists.
if principal_unitname:
local_host_name = principal_unitname
else:
- local_host_name = hookenv.local_unit().replace('/', '-')
- template_values = {'nagios_user': nagios_user,
- 'nagios_group': nagios_group,
- 'enable_livestatus': enable_livestatus,
- 'livestatus_path': livestatus_path,
- 'livestatus_args': hookenv.config('livestatus_args'),
- 'check_external_commands': hookenv.config('check_external_commands'),
- 'command_check_interval': hookenv.config('command_check_interval'),
- 'command_file': hookenv.config('command_file'),
- 'debug_file': hookenv.config('debug_file'),
- 'debug_verbosity': hookenv.config('debug_verbosity'),
- 'debug_level': hookenv.config('debug_level'),
- 'daemon_dumps_core': hookenv.config('daemon_dumps_core'),
- 'flap_detection': nagios_bool(hookenv.config('flap_detection')),
- 'admin_email': hookenv.config('admin_email'),
- 'admin_pager': hookenv.config('admin_pager'),
- 'log_rotation_method': hookenv.config('log_rotation_method'),
- 'log_archive_path': hookenv.config('log_archive_path'),
- 'use_syslog': hookenv.config('use_syslog'),
- 'monitor_self': hookenv.config('monitor_self'),
- 'nagios_hostname': "{}-{}".format(host_context, local_host_name),
- 'load_monitor': hookenv.config('load_monitor'),
- 'is_container': host.is_container(),
- 'service_check_timeout': hookenv.config('service_check_timeout'),
- 'service_check_timeout_state': hookenv.config('service_check_timeout_state'),
- }
-
- with open('hooks/templates/nagios-cfg.tmpl', 'r') as f:
- templateDef = f.read()
-
- t = Template(templateDef)
- with open(nagios_cfg, 'w') as f:
+ local_host_name = hookenv.local_unit().replace("/", "-")
+ template_values = {
+ "nagios_user": nagios_user,
+ "nagios_group": nagios_group,
+ "enable_livestatus": enable_livestatus,
+ "livestatus_path": livestatus_path,
+ "livestatus_args": hookenv.config("livestatus_args"),
+ "check_external_commands": hookenv.config("check_external_commands"),
+ "command_check_interval": hookenv.config("command_check_interval"),
+ "command_file": hookenv.config("command_file"),
+ "debug_file": hookenv.config("debug_file"),
+ "debug_verbosity": hookenv.config("debug_verbosity"),
+ "debug_level": hookenv.config("debug_level"),
+ "daemon_dumps_core": hookenv.config("daemon_dumps_core"),
+ "flap_detection": nagios_bool(hookenv.config("flap_detection")),
+ "admin_email": hookenv.config("admin_email"),
+ "admin_pager": hookenv.config("admin_pager"),
+ "log_rotation_method": hookenv.config("log_rotation_method"),
+ "log_archive_path": hookenv.config("log_archive_path"),
+ "use_syslog": hookenv.config("use_syslog"),
+ "monitor_self": hookenv.config("monitor_self"),
+ "nagios_hostname": "{}-{}".format(host_context, local_host_name),
+ "load_monitor": hookenv.config("load_monitor"),
+ "is_container": host.is_container(),
+ "service_check_timeout": hookenv.config("service_check_timeout"),
+ "service_check_timeout_state": hookenv.config("service_check_timeout_state"),
+ }
+
+ with open("hooks/templates/nagios-cfg.tmpl", "r") as f:
+ template_def = f.read()
+
+ t = Template(template_def)
+ with open(nagios_cfg, "w") as f:
f.write(t.render(template_values))
- with open('hooks/templates/localhost_nagios2.cfg.tmpl', 'r') as f:
- templateDef = f.read()
- t = Template(templateDef)
- with open('/etc/nagios3/conf.d/localhost_nagios2.cfg', 'w') as f:
+ with open("hooks/templates/localhost_nagios2.cfg.tmpl", "r") as f:
+ template_def = f.read()
+ t = Template(template_def)
+ with open("/etc/nagios3/conf.d/localhost_nagios2.cfg", "w") as f:
f.write(t.render(template_values))
- host.service_reload('nagios3')
+ host.service_reload("nagios3")
def update_cgi_config():
- template_values = {'nagiosadmin': nagiosadmin,
- 'ro_password': ro_password}
- with open('hooks/templates/nagios-cgi.tmpl', 'r') as f:
- templateDef = f.read()
+ """Update CGI Config."""
+ template_values = {"nagiosadmin": nagiosadmin, "ro_password": ro_password}
+ with open("hooks/templates/nagios-cgi.tmpl", "r") as f:
+ template_def = f.read()
- t = Template(templateDef)
- with open(nagios_cgi_cfg, 'w') as f:
+ t = Template(template_def)
+ with open(nagios_cgi_cfg, "w") as f:
f.write(t.render(template_values))
- host.service_reload('nagios3')
- host.service_reload('apache2')
+ host.service_reload("nagios3")
+ host.service_reload("apache2")
def update_apache():
"""
Nagios3 is deployed as a global apache application from the archive.
+
We'll get a little funky and add the SSL keys to the default-ssl config
which sets our keys, including the self-signed ones, as the host keyfiles.
"""
-
# Start by Setting the ports.conf
+ with open("hooks/templates/ports-cfg.jinja2", "r") as f:
+ template_def = f.read()
+ t = Template(template_def)
+ ports_conf = "/etc/apache2/ports.conf"
- with open('hooks/templates/ports-cfg.jinja2', 'r') as f:
- templateDef = f.read()
- t = Template(templateDef)
- ports_conf = '/etc/apache2/ports.conf'
-
- with open(ports_conf, 'w') as f:
- f.write(t.render({'enable_http': HTTP_ENABLED}))
+ with open(ports_conf, "w") as f:
+ f.write(t.render({"enable_http": HTTP_ENABLED}))
# Next setup the default-ssl.conf
if os.path.exists(chain_file) and os.path.getsize(chain_file) > 0:
ssl_chain = chain_file
else:
ssl_chain = None
- template_values = {'ssl_key': key_file,
- 'ssl_cert': cert_file,
- 'ssl_chain': ssl_chain}
- with open('hooks/templates/default-ssl.tmpl', 'r') as f:
- templateDef = f.read()
-
- t = Template(templateDef)
- ssl_conf = '/etc/apache2/sites-available/default-ssl.conf'
- with open(ssl_conf, 'w') as f:
+ template_values = {"ssl_key": key_file, "ssl_cert": cert_file, "ssl_chain": ssl_chain}
+ with open("hooks/templates/default-ssl.tmpl", "r") as f:
+ template_def = f.read()
+
+ t = Template(template_def)
+ ssl_conf = "/etc/apache2/sites-available/default-ssl.conf"
+ with open(ssl_conf, "w") as f:
f.write(t.render(template_values))
# Create directory for extra *.include files installed by subordinates
try:
- os.makedirs('/etc/apache2/vhost.d/')
+ os.makedirs("/etc/apache2/vhost.d/")
except OSError:
pass
@@ -386,23 +386,35 @@ def update_apache():
sites = glob.glob("/etc/apache2/sites-available/*.conf")
non_ssl = set(sites) - {ssl_conf}
for each in non_ssl:
- site = os.path.basename(each).strip('.conf')
+ site = os.path.basename(each).strip(".conf")
Apache2Site(site).action(enabled=HTTP_ENABLED)
# Configure the behavior of https site
Apache2Site("default-ssl").action(enabled=SSL_CONFIGURED)
# Finally, restart apache2
- host.service_reload('apache2')
+ host.service_reload("apache2")
class Apache2Site:
+ """Wrapper for dealing with apache2 sites."""
+
def __init__(self, site):
+ """
+ Create wrapper object based on site name.
+
+ @params str site: site to manage
+ """
self.site = site
- self.is_ssl = 'ssl' in site.lower()
+ self.is_ssl = "ssl" in site.lower()
self.port = 443 if self.is_ssl else 80
def action(self, enabled):
+ """
+ Enable or disable based on boolean.
+
+ @params bool enabled: should we enable or disable?
+ """
fn = self._enable if enabled else self._disable
return fn()
@@ -410,36 +422,33 @@ class Apache2Site:
try:
subprocess.check_output(args, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
- hookenv.log("Apache2Site: `{}`, returned {}, stdout:\n{}"
- .format(e.cmd, e.returncode, e.output), "ERROR")
+ hookenv.log("Apache2Site: `{}`, returned {}, stdout:\n{}".format(e.cmd, e.returncode, e.output), "ERROR")
def _enable(self):
hookenv.log("Apache2Site: Enabling %s..." % self.site, "INFO")
- self._call(['a2ensite', self.site])
+ self._call(["a2ensite", self.site])
if self.port == 443:
- self._call(['a2enmod', 'ssl'])
+ self._call(["a2enmod", "ssl"])
hookenv.open_port(self.port)
def _disable(self):
hookenv.log("Apache2Site: Disabling %s..." % self.site, "INFO")
- self._call(['a2dissite', self.site])
+ self._call(["a2dissite", self.site])
hookenv.close_port(self.port)
def update_password(account, password):
"""Update the charm and Apache's record of the password for the supplied account."""
- account_file = ''.join(['/var/lib/juju/nagios.', account, '.passwd'])
+ account_file = "".join(["/var/lib/juju/nagios.", account, ".passwd"])
if password:
- with open(account_file, 'w') as f:
+ with open(account_file, "w") as f:
f.write(password)
os.fchmod(f.fileno(), 0o0400)
- subprocess.call(['htpasswd', '-b', '/etc/nagios3/htpasswd.users',
- account, password])
+ subprocess.call(["htpasswd", "-b", "/etc/nagios3/htpasswd.users", account, password])
else:
""" password was empty, it has been removed. We should delete the account """
os.path.isfile(account_file) and os.remove(account_file)
- subprocess.call(['htpasswd', '-D', '/etc/nagios3/htpasswd.users',
- account])
+ subprocess.call(["htpasswd", "-D", "/etc/nagios3/htpasswd.users", account])
hookenv.status_set("active", "ready")
@@ -453,12 +462,12 @@ if SSL_CONFIGURED:
update_apache()
update_localhost()
update_cgi_config()
-update_password('nagiosro', ro_password)
+update_password("nagiosro", ro_password)
if password:
update_password(nagiosadmin, password)
-if nagiosadmin != 'nagiosadmin':
- update_password('nagiosadmin', False)
+if nagiosadmin != "nagiosadmin":
+ update_password("nagiosadmin", False)
-subprocess.call(['scripts/postfix_loopback_only.sh'])
-subprocess.call(['hooks/mymonitors-relation-joined'])
-subprocess.call(['hooks/monitors-relation-changed'])
+subprocess.call(["scripts/postfix_loopback_only.sh"])
+subprocess.call(["hooks/mymonitors-relation-joined"])
+subprocess.call(["hooks/monitors-relation-changed"])
diff --git a/hooks/website_relation_joined.py b/hooks/website_relation_joined.py
index 984ae80..42e5b47 100755
--- a/hooks/website_relation_joined.py
+++ b/hooks/website_relation_joined.py
@@ -1,21 +1,21 @@
#!/usr/bin/python
-# website-relation-joined - Set the hostname into remote nagios http consumers
-# Copyright Canonical 2017 Canonical Ltd. All Rights Reserved
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
+"""
+website-relation-joined - Set the hostname into remote nagios http consumers.
+
+Copyright Canonical 2020 Canonical Ltd. All Rights Reserved
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
-import common
+You should have received a copy of the GNU General Public License
+along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
from charmhelpers.core.hookenv import (
config,
@@ -23,17 +23,20 @@ from charmhelpers.core.hookenv import (
relation_set,
)
+import common
+
def main():
- relation_data = {'hostname': common.get_local_ingress_address()}
- sslcfg = config()['ssl']
- if sslcfg == 'only':
- relation_data['port'] = 443
+ """Handle website-relation-joined hook."""
+ relation_data = {"hostname": common.get_local_ingress_address()}
+ sslcfg = config()["ssl"]
+ if sslcfg == "only":
+ relation_data["port"] = 443
else:
- relation_data['port'] = 80
- log('website-relation-joined data %s' % relation_data)
+ relation_data["port"] = 80
+ log("website-relation-joined data %s" % relation_data)
relation_set(None, **relation_data)
-if __name__ == '__main__': # pragma: no cover
+if __name__ == "__main__": # pragma: no cover
main()
diff --git a/tests/functional/conftest.py b/tests/functional/conftest.py
index fafe7f0..5b31d0b 100644
--- a/tests/functional/conftest.py
+++ b/tests/functional/conftest.py
@@ -1,5 +1,5 @@
#!/usr/bin/python3
-
+"""Configure pytest for functional testing."""
import asyncio
import json
import os
@@ -16,9 +16,9 @@ import pytest
STAT_FILE = "python3 -c \"import json; import os; s=os.stat('%s'); print(json.dumps({'uid': s.st_uid, 'gid': s.st_gid, 'mode': oct(s.st_mode), 'size': s.st_size}))\"" # noqa: E501
-@pytest.yield_fixture(scope='session')
+@pytest.yield_fixture(scope="session")
def event_loop(request):
- """Override the default pytest event loop to allow for broaded scopedv fixtures."""
+ """Override the pytest event loop to allow for broader scoped fixtures."""
loop = asyncio.get_event_loop_policy().new_event_loop()
asyncio.set_event_loop(loop)
loop.set_debug(True)
@@ -27,7 +27,7 @@ def event_loop(request):
asyncio.set_event_loop(None)
-@pytest.fixture(scope='session')
+@pytest.fixture(scope="session")
async def controller():
"""Connect to the current controller."""
controller = Controller()
@@ -36,21 +36,21 @@ async def controller():
await controller.disconnect()
-@pytest.fixture(scope='session')
+@pytest.fixture(scope="session")
async def model(controller):
"""Create a model that lives only for the duration of the test."""
model_name = "functest-{}".format(uuid.uuid4())
model = await controller.add_model(model_name)
yield model
await model.disconnect()
- if os.getenv('PYTEST_KEEP_MODEL'):
+ if os.getenv("PYTEST_KEEP_MODEL"):
return
await controller.destroy_model(model_name)
while model_name in await controller.list_models():
await asyncio.sleep(1)
-@pytest.fixture(scope='session')
+@pytest.fixture(scope="session")
async def current_model():
"""Return the current model, does not create or destroy it."""
model = Model()
@@ -62,29 +62,37 @@ async def current_model():
@pytest.fixture
async def get_app(model):
"""Return the application requested."""
+ # This comment stops black style adding a blank line here, which causes flake8 D202.
+
async def _get_app(name):
try:
return model.applications[name]
except KeyError:
raise JujuError("Cannot find application {}".format(name))
+
return _get_app
@pytest.fixture
async def get_unit(model):
"""Return the requested <app_name>/<unit_number> unit."""
+ # This comment stops black style adding a blank line here, which causes flake8 D202.
+
async def _get_unit(name):
try:
- (app_name, unit_number) = name.split('/')
+ (app_name, unit_number) = name.split("/")
return model.applications[app_name].units[unit_number]
except (KeyError, ValueError):
raise JujuError("Cannot find unit {}".format(name))
+
return _get_unit
@pytest.fixture
async def get_entity(model, get_unit, get_app):
"""Return a unit or an application."""
+ # This comment stops black style adding a blank line here, which causes flake8 D202.
+
async def _get_entity(name):
try:
return await get_unit(name)
@@ -93,6 +101,7 @@ async def get_entity(model, get_unit, get_app):
return await get_app(name)
except JujuError:
raise JujuError("Cannot find entity {}".format(name))
+
return _get_entity
@@ -101,17 +110,15 @@ async def run_command(get_unit):
"""
Run a command on a unit.
- :param cmd: Command to be run
- :param target: Unit object or unit name string
+ :param get_unit: fixture defining on which unit to run the command
"""
+ # This comment stops black style adding a blank line here, which causes flake8 D202.
+
async def _run_command(cmd, target):
- unit = (
- target
- if type(target) is juju.unit.Unit
- else await get_unit(target)
- )
+ unit = target if type(target) is juju.unit.Unit else await get_unit(target)
action = await unit.run(cmd)
return action.results
+
return _run_command
@@ -120,13 +127,15 @@ async def file_stat(run_command):
"""
Run stat on a file.
- :param path: File path
- :param target: Unit object or unit name string
+ :param run_command: fixture defining how to run a command
"""
+ # This comment stops black style adding a blank line here, which causes flake8 D202.
+
async def _file_stat(path, target):
cmd = STAT_FILE % path
results = await run_command(cmd, target)
- return json.loads(results['Stdout'])
+ return json.loads(results["Stdout"])
+
return _file_stat
@@ -135,37 +144,45 @@ async def file_contents(run_command):
"""
Return the contents of a file.
- :param path: File path
- :param target: Unit object or unit name string
+ :param run_command: fixture defining how to run a command
"""
+ # This comment stops black style adding a blank line here, which causes flake8 D202.
+
async def _file_contents(path, target):
- cmd = 'cat {}'.format(path)
+ cmd = "cat {}".format(path)
results = await run_command(cmd, target)
- return results['Stdout']
+ return results["Stdout"]
+
return _file_contents
@pytest.fixture
async def reconfigure_app(get_app, model):
"""Apply a different config to the requested app."""
+ # This comment stops black style adding a blank line here, which causes flake8 D202.
+
async def _reconfigure_app(cfg, target):
- application = (
- target
- if type(target) is juju.application.Application
- else await get_app(target)
- )
+ application = target if type(target) is juju.application.Application else await get_app(target)
await application.set_config(cfg)
await application.get_config()
- await model.block_until(lambda: application.status == 'active')
+ await model.block_until(lambda: application.status == "active")
+
return _reconfigure_app
@pytest.fixture
async def create_group(run_command):
- """Create the UNIX group specified."""
+ """
+ Create the UNIX group specified.
+
+ :param run_command: fixture defining how to run a command
+ """
+ # This comment stops black style adding a blank line here, which causes flake8 D202.
+
async def _create_group(group_name, target):
cmd = "sudo groupadd %s" % group_name
await run_command(cmd, target)
+
return _create_group
@@ -183,93 +200,90 @@ SERIES = [
############
# FIXTURES #
############
-@pytest.fixture(scope='session', params=SERIES)
+@pytest.fixture(scope="session", params=SERIES)
def series(request):
"""Return ubuntu version (i.e. xenial) in use in the test."""
return request.param
-@pytest.fixture(scope='session')
+@pytest.fixture(scope="session")
async def relatives(model):
+ """Fixture providing the necessary dependant applications to test nagios."""
nrpe = "nrpe"
- nrpe_app = await model.deploy(
- 'cs:' + nrpe, application_name=nrpe,
- series='trusty', config={},
- num_units=0
- )
+ nrpe_app = await model.deploy("cs:" + nrpe, application_name=nrpe, series="trusty", config={}, num_units=0)
mysql = "mysql"
- mysql_app = await model.deploy(
- 'cs:' + mysql, application_name=mysql,
- series='trusty', config={}
- )
+ mysql_app = await model.deploy("cs:" + mysql, application_name=mysql, series="trusty", config={})
mediawiki = "mediawiki"
- mediawiki_app = await model.deploy(
- 'cs:' + mediawiki, application_name=mediawiki,
- series='trusty', config={}
- )
+ mediawiki_app = await model.deploy("cs:" + mediawiki, application_name=mediawiki, series="trusty", config={})
- await model.add_relation('mysql:db', 'mediawiki:db')
- await model.add_relation('mysql:juju-info', 'nrpe:general-info')
- await model.add_relation('mediawiki:juju-info', 'nrpe:general-info')
- await model.block_until(
- lambda: all(_.status == "active" for _ in (mysql_app, mediawiki_app))
- )
+ await model.add_relation("mysql:db", "mediawiki:db")
+ await model.add_relation("mysql:juju-info", "nrpe:general-info")
+ await model.add_relation("mediawiki:juju-info", "nrpe:general-info")
+ await model.block_until(lambda: all(_.status == "active" for _ in (mysql_app, mediawiki_app)))
yield {mediawiki: mediawiki_app, mysql: mysql_app, nrpe: nrpe_app}
-@pytest.fixture(scope='session')
+@pytest.fixture(scope="session")
async def deploy_app(relatives, model, series):
"""Return application of the charm under test."""
app_name = "nagios-{}".format(series)
"""Deploy the nagios app."""
nagios_app = await model.deploy(
- os.path.join(CHARM_BUILD_DIR, 'nagios'),
+ os.path.join(CHARM_BUILD_DIR, "nagios"),
application_name=app_name,
series=series,
- config={
- 'enable_livestatus': False,
- 'ssl': False,
- 'extraconfig': '',
- 'enable_pagerduty': False
- }
+ config={"enable_livestatus": False, "ssl": False, "extraconfig": "", "enable_pagerduty": False},
)
- await model.add_relation('{}:monitors'.format(app_name), 'mysql:monitors')
- await model.add_relation('{}:nagios'.format(app_name), 'mediawiki:juju-info')
- await model.add_relation('nrpe:monitors', '{}:monitors'.format(app_name))
+ await model.add_relation("{}:monitors".format(app_name), "mysql:monitors")
+ await model.add_relation("{}:nagios".format(app_name), "mediawiki:juju-info")
+ await model.add_relation("nrpe:monitors", "{}:monitors".format(app_name))
await model.block_until(lambda: nagios_app.status == "active")
- await model.block_until(lambda: all(
- _.status == "active"
- for _ in list(relatives.values()) + [nagios_app]
- ))
+ await model.block_until(lambda: all(_.status == "active" for _ in list(relatives.values()) + [nagios_app]))
yield nagios_app
- if os.getenv('PYTEST_KEEP_MODEL'):
+ if os.getenv("PYTEST_KEEP_MODEL"):
return
await nagios_app.destroy()
class Agent:
+ """Wraps Juju Unit and Application."""
+
def __init__(self, unit, application):
+ """
+ Create an Agent object.
+
+ @param unit: Wraps the unit object from juju
+ @param application: Associated juju application
+ """
self.u = unit
self.application = application
self.model = unit.model
def is_active(self, status):
+ """Check whether the unit is currently active with a particular agent status."""
u = self.u
return u.agent_status == status and u.workload_status == "active"
async def block_until_or_timeout(self, lambda_f, **kwargs):
+ """Wait until a condition is met or the event times out."""
await self.block_until(lambda_f, ignore_timeout=True, **kwargs)
- async def block_until(self, lambda_f, timeout=120, wait_period=5,
- ignore_timeout=False):
+ async def block_until(self, lambda_f, timeout=120, wait_period=5, ignore_timeout=False):
+ """
+ Wait until a condition is met.
+
+ @param lambda_f: condition definition called with no arguments.
+ @param float timeout: how long to wait until the next check
+ @param float wait_period: how many times to wait before declaring
+ TimeoutError
+ @param bool ignore_timeout: if True, don't raise TimeoutError
+ """
try:
- await self.model.block_until(
- lambda_f, timeout=timeout, wait_period=wait_period
- )
+ await self.model.block_until(lambda_f, timeout=timeout, wait_period=wait_period)
except asyncio.TimeoutError:
if not ignore_timeout:
raise
@@ -279,7 +293,7 @@ class Agent:
async def unit(model, deploy_app):
"""Return the unit we've deployed."""
unit = Agent(deploy_app.units[0], deploy_app)
- await unit.block_until(lambda: unit.is_active('idle'))
+ await unit.block_until(lambda: unit.is_active("idle"))
return unit
@@ -287,4 +301,4 @@ async def unit(model, deploy_app):
async def auth(file_contents, unit):
"""Return the basic auth credentials."""
nagiospwd = await file_contents("/var/lib/juju/nagios.passwd", unit.u)
- return 'nagiosadmin', nagiospwd.strip()
+ return "nagiosadmin", nagiospwd.strip()
diff --git a/tests/functional/test_config.py b/tests/functional/test_config.py
index 2e1cab6..1d3c9c7 100644
--- a/tests/functional/test_config.py
+++ b/tests/functional/test_config.py
@@ -1,40 +1,52 @@
+"""Test configuring the juju application."""
from async_generator import asynccontextmanager
+
import pytest
+
import requests
+
pytestmark = pytest.mark.asyncio
@asynccontextmanager
async def config(unit, item, test_value, post_test):
+ """
+ Fixture that applies config changes before and after tests.
+
+ @param Agent unit: Unit object whose application is configured
+ @param str item: Name of config to test
+ @param str test_value: Value of config during the test
+ @param str post_test: Value after the test completes
+ """
await unit.application.set_config({item: test_value})
await unit.block_until_or_timeout(
- lambda: unit.is_active('executing'), timeout=5,
+ lambda: unit.is_active("executing"), timeout=5,
)
- await unit.block_until(lambda: unit.is_active('idle'))
+ await unit.block_until(lambda: unit.is_active("idle"))
yield test_value
await unit.application.set_config({item: post_test})
await unit.block_until_or_timeout(
- lambda: unit.is_active('executing'), timeout=5,
+ lambda: unit.is_active("executing"), timeout=5,
)
- await unit.block_until(lambda: unit.is_active('idle'))
+ await unit.block_until(lambda: unit.is_active("idle"))
-@pytest.fixture(params=['on', 'only'])
+@pytest.fixture(params=["on", "only"])
async def ssl(unit, request):
"""
- Enable SSL before a test, then disable after test
+ Enable SSL before a test, then disable after test.
:param Agent unit: unit from the fixture
:param request: test parameters
"""
- async with config(unit, 'ssl', request.param, 'off') as value:
+ async with config(unit, "ssl", request.param, "off") as value:
yield value
@pytest.fixture
async def extra_config(unit):
"""
- Enable extraconfig for a test, and revert afterwards
+ Enable extraconfig for a test, and revert afterwards.
:param Agent unit: unit from the fixture
"""
@@ -52,33 +64,34 @@ async def extra_config(unit):
@pytest.fixture
async def livestatus_path(unit):
"""
- Enable livestatus before a test, then disable after test
+ Enable livestatus before a test, then disable after test.
:param Agent unit: unit from the fixture
"""
async with config(unit, "enable_livestatus", "true", "false"):
app_config = await unit.application.get_config()
- yield app_config['livestatus_path']['value']
+ yield app_config["livestatus_path"]["value"]
@pytest.fixture()
async def enable_pagerduty(unit):
"""
- Enable enable_pagerduty before first test, then disable after last test
+ Enable enable_pagerduty before first test, then disable after last test.
:param Agent unit: unit from the fixture
"""
async with config(unit, "enable_pagerduty", "true", "false"):
app_config = await unit.application.get_config()
- yield app_config['pagerduty_path']['value']
+ yield app_config["pagerduty_path"]["value"]
#########
# TESTS #
#########
async def test_web_interface_with_ssl(auth, unit, ssl):
+ """Test Web Interface using ssl."""
http_url = "http://%s/nagios3/" % unit.u.public_address
- if ssl == 'only':
+ if ssl == "only":
with pytest.raises(requests.ConnectionError):
requests.get(http_url, auth=auth)
else:
@@ -90,25 +103,23 @@ async def test_web_interface_with_ssl(auth, unit, ssl):
assert r.status_code == 200, "HTTPs Admin login failed"
-@pytest.mark.usefixtures('extra_config')
+@pytest.mark.usefixtures("extra_config")
async def test_extra_config(auth, unit):
- host_url = "http://%s/cgi-bin/nagios3/status.cgi?" \
- "hostgroup=all&style=hostdetail" % unit.u.public_address
+ """Test setting extra_config."""
+ host_url = "http://%s/cgi-bin/nagios3/status.cgi?" "hostgroup=all&style=hostdetail" % unit.u.public_address
r = requests.get(host_url, auth=auth)
- assert r.text.find('extra_config'), "Nagios is not monitoring extra_config"
+ assert r.text.find("extra_config"), "Nagios is not monitoring extra_config"
async def test_live_status(unit, livestatus_path, file_stat):
+ """Test setting livestatus feature."""
stat = await file_stat(livestatus_path, unit.u)
- assert stat['size'] == 0, (
- "File %s didn't match expected size" % livestatus_path
- )
+ assert stat["size"] == 0, "File %s didn't match expected size" % livestatus_path
async def test_pager_duty(unit, enable_pagerduty, file_stat):
+ """Test setting pager_duty feature."""
stat = await file_stat(enable_pagerduty, unit.u)
- assert stat['size'] != 0, (
- "Directory %s wasn't a non-zero size" % enable_pagerduty
- )
- stat = await file_stat('/etc/nagios3/conf.d/pagerduty_nagios.cfg', unit.u)
- assert stat['size'] != 0, "pagerduty_config wasn't a non-zero sized file"
+ assert stat["size"] != 0, "Directory %s wasn't a non-zero size" % enable_pagerduty
+ stat = await file_stat("/etc/nagios3/conf.d/pagerduty_nagios.cfg", unit.u)
+ assert stat["size"] != 0, "pagerduty_config wasn't a non-zero sized file"
diff --git a/tests/functional/test_deploy.py b/tests/functional/test_deploy.py
index 9a9e067..1a9d958 100644
--- a/tests/functional/test_deploy.py
+++ b/tests/functional/test_deploy.py
@@ -1,11 +1,11 @@
+"""Test Deployment and after effects based on default application config."""
import pytest
+
import requests
+
pytestmark = pytest.mark.asyncio
-#########
-# TESTS #
-#########
async def test_status(deploy_app):
"""Check that the app is in active state."""
assert deploy_app.status == "active"
@@ -22,18 +22,14 @@ async def test_web_interface_is_protected(auth, unit):
async def test_hosts_being_monitored(auth, unit):
- host_url = ("http://%s/cgi-bin/nagios3/status.cgi?"
- "hostgroup=all&style=hostdetail") % unit.u.public_address
+ """Check that nagios is monitoring related apps."""
+ host_url = ("http://%s/cgi-bin/nagios3/status.cgi?" "hostgroup=all&style=hostdetail") % unit.u.public_address
r = requests.get(host_url, auth=auth)
- assert r.text.find('mysql') and r.text.find('mediawiki'), \
- "Nagios is not monitoring the hosts it supposed to."
+ assert r.text.find("mysql") and r.text.find("mediawiki"), "Nagios is not monitoring the hosts it supposed to."
async def test_nrpe_monitors_config(relatives, unit, file_contents):
- # look for disk root check in nrpe config
- mysql_unit = relatives['mysql'].units[0]
- contents = await file_contents(
- '/etc/nagios/nrpe.d/check_disk_root.cfg',
- mysql_unit
- )
+ """Look for disk root check in nrpe config."""
+ mysql_unit = relatives["mysql"].units[0]
+ contents = await file_contents("/etc/nagios/nrpe.d/check_disk_root.cfg", mysql_unit)
assert contents, "disk root check config not found."
diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py
index f6fceac..33450e1 100644
--- a/tests/unit/conftest.py
+++ b/tests/unit/conftest.py
@@ -1,5 +1,6 @@
+"""Configure pytest for unit testing."""
import os
import sys
-HOOKS = os.path.join(os.path.dirname(__file__), '..', '..', 'hooks')
+HOOKS = os.path.join(os.path.dirname(__file__), "..", "..", "hooks")
sys.path.append(HOOKS)
diff --git a/tests/unit/test_common.py b/tests/unit/test_common.py
index 9833a02..ba9069e 100644
--- a/tests/unit/test_common.py
+++ b/tests/unit/test_common.py
@@ -1,5 +1,12 @@
+"""Testing hooks/common.py."""
import common
def test_check_ip():
+ """Validate check_ip command."""
+ # Ensure that 1.2.3.4 is a valid IPv4 Address
assert common.check_ip("1.2.3.4")
+ # Ensure that ::1 is a valid IPv6 Address
+ assert common.check_ip("::1")
+ # Ensure that 'flying-spaghetti-monster' is not an ip address
+ assert not common.check_ip("flying-spaghetti-monster")
diff --git a/tests/unit/test_monitor_relation_changed.py b/tests/unit/test_monitor_relation_changed.py
index 3a7f5d6..ffd7948 100644
--- a/tests/unit/test_monitor_relation_changed.py
+++ b/tests/unit/test_monitor_relation_changed.py
@@ -1,7 +1,11 @@
+"""Testing hooks/monitor_relation_changed.py."""
import monitors_relation_changed
def test_has_main():
- # THIS IS A REALLY LAME TEST -- but it's a start for where there was nothing
- # if you add tests later, please do better than me
- assert hasattr(monitors_relation_changed, 'main')
+ """
+ THIS IS A REALLY LAME TEST -- but it's a start for where there was nothing.
+
+ if you add tests later, please do better than me
+ """
+ assert hasattr(monitors_relation_changed, "main")
diff --git a/tests/unit/test_website_relation_joined.py b/tests/unit/test_website_relation_joined.py
index 3164428..1e1a8d6 100644
--- a/tests/unit/test_website_relation_joined.py
+++ b/tests/unit/test_website_relation_joined.py
@@ -1,19 +1,18 @@
+"""Testing hooks/website_relation_joined.py."""
import unittest.mock as mock
import pytest
+
import website_relation_joined
-@mock.patch('common.get_local_ingress_address')
-@mock.patch('website_relation_joined.config')
-@mock.patch('website_relation_joined.relation_set')
-@pytest.mark.parametrize('ssl', [
- ('only', 443),
- ('on', 80),
- ('off', 80)
-], ids=['ssl=only', 'ssl=on', 'ssl=off'])
+@mock.patch("common.get_local_ingress_address")
+@mock.patch("website_relation_joined.config")
+@mock.patch("website_relation_joined.relation_set")
+@pytest.mark.parametrize("ssl", [("only", 443), ("on", 80), ("off", 80)], ids=["ssl=only", "ssl=on", "ssl=off"])
def test_main(relation_set, config, get_local_ingress_address, ssl):
- get_local_ingress_address.return_value = 'example.com'
- config.return_value = {'ssl': ssl[0]}
+ """Tests the website_relation_joined hook."""
+ get_local_ingress_address.return_value = "example.com"
+ config.return_value = {"ssl": ssl[0]}
website_relation_joined.main()
- relation_set.assert_called_with(None, port=ssl[1], hostname='example.com')
+ relation_set.assert_called_with(None, port=ssl[1], hostname="example.com")
diff --git a/tox.ini b/tox.ini
index 34f9248..dabaa6e 100644
--- a/tox.ini
+++ b/tox.ini
@@ -36,8 +36,16 @@ deps = -r{toxinidir}/tests/functional/requirements.txt
-r{toxinidir}/requirements.txt
[testenv:lint]
-commands = flake8
-deps = flake8
+commands =
+ flake8
+ black --check --line-length 120 --exclude /(\.eggs|\.git|\.tox|\.venv|build|dist|charmhelpers|mod)/ .
+deps =
+ black
+ flake8
+ flake8-docstrings
+ flake8-import-order
+ pep8-naming
+ flake8-colors
[flake8]
exclude =
References