nagios-charmers team mailing list archive
-
nagios-charmers team
-
Mailing list archive
-
Message #01106
[Merge] ~xavpaice/charm-nagios:lint-fixes into charm-nagios:master
Xav Paice has proposed merging ~xavpaice/charm-nagios:lint-fixes into charm-nagios:master.
Requested reviews:
Nagios Charm developers (nagios-charmers)
For more details, see:
https://code.launchpad.net/~xavpaice/charm-nagios/+git/nagios-charm/+merge/388441
--
Your team Nagios Charm developers is requested to review the proposed merge of ~xavpaice/charm-nagios:lint-fixes into charm-nagios:master.
diff --git a/hooks/charmhelpers/__init__.py b/hooks/charmhelpers/__init__.py
index 61ef907..3eb716a 100644
--- a/hooks/charmhelpers/__init__.py
+++ b/hooks/charmhelpers/__init__.py
@@ -26,18 +26,18 @@ try:
import six # NOQA:F401
except ImportError:
if sys.version_info.major == 2:
- subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
+ subprocess.check_call(["apt-get", "install", "-y", "python-six"])
else:
- subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
+ subprocess.check_call(["apt-get", "install", "-y", "python3-six"])
import six # NOQA:F401
try:
import yaml # NOQA:F401
except ImportError:
if sys.version_info.major == 2:
- subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
+ subprocess.check_call(["apt-get", "install", "-y", "python-yaml"])
else:
- subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
+ subprocess.check_call(["apt-get", "install", "-y", "python3-yaml"])
import yaml # NOQA:F401
@@ -67,8 +67,8 @@ def deprecate(warning, date=None, log=None):
function will definitely (probably) be removed.
:param log: The log function to call to log. If not, logs to stdout
"""
- def wrap(f):
+ def wrap(f):
@functools.wraps(f)
def wrapped_f(*args, **kwargs):
try:
@@ -76,14 +76,16 @@ def deprecate(warning, date=None, log=None):
file = inspect.getsourcefile(f)
lines = inspect.getsourcelines(f)
f_name = "{}-{}-{}..{}-{}".format(
- module.__name__, file, lines[0], lines[-1], f.__name__)
+ module.__name__, file, lines[0], lines[-1], f.__name__
+ )
except (IOError, TypeError):
# assume it was local, so just use the name of the function
f_name = f.__name__
if f_name not in __deprecated_functions:
__deprecated_functions[f_name] = True
s = "DEPRECATION WARNING: Function {} is being removed".format(
- f.__name__)
+ f.__name__
+ )
if date:
s = "{} on/around {}".format(s, date)
if warning:
@@ -93,5 +95,7 @@ def deprecate(warning, date=None, log=None):
else:
print(s)
return f(*args, **kwargs)
+
return wrapped_f
+
return wrap
diff --git a/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/hooks/charmhelpers/contrib/charmsupport/nrpe.py
index d775861..037a34a 100644
--- a/hooks/charmhelpers/contrib/charmsupport/nrpe.py
+++ b/hooks/charmhelpers/contrib/charmsupport/nrpe.py
@@ -127,8 +127,9 @@ class CheckException(Exception):
class Check(object):
- shortname_re = '[A-Za-z0-9-_.@]+$'
- service_template = ("""
+ shortname_re = "[A-Za-z0-9-_.@]+$"
+ service_template = (
+ """
#---------------------------------------------------
# This file is Juju managed
#---------------------------------------------------
@@ -136,18 +137,18 @@ define service {{
use active-service
host_name {nagios_hostname}
service_description {nagios_hostname}[{shortname}] """
- """{description}
+ """{description}
check_command check_nrpe!{command}
servicegroups {nagios_servicegroup}
}}
-""")
+"""
+ )
def __init__(self, shortname, description, check_cmd):
super(Check, self).__init__()
# XXX: could be better to calculate this from the service name
if not re.match(self.shortname_re, shortname):
- raise CheckException("shortname must match {}".format(
- Check.shortname_re))
+ raise CheckException("shortname must match {}".format(Check.shortname_re))
self.shortname = shortname
self.command = "check_{}".format(shortname)
# Note: a set of invalid characters is defined by the
@@ -157,17 +158,15 @@ define service {{
self.check_cmd = self._locate_cmd(check_cmd)
def _get_check_filename(self):
- return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command))
+ return os.path.join(NRPE.nrpe_confdir, "{}.cfg".format(self.command))
def _get_service_filename(self, hostname):
- return os.path.join(NRPE.nagios_exportdir,
- 'service__{}_{}.cfg'.format(hostname, self.command))
+ return os.path.join(
+ NRPE.nagios_exportdir, "service__{}_{}.cfg".format(hostname, self.command)
+ )
def _locate_cmd(self, check_cmd):
- search_path = (
- '/usr/lib/nagios/plugins',
- '/usr/local/lib/nagios/plugins',
- )
+ search_path = ("/usr/lib/nagios/plugins", "/usr/local/lib/nagios/plugins")
parts = shlex.split(check_cmd)
for path in search_path:
if os.path.exists(os.path.join(path, parts[0])):
@@ -175,14 +174,14 @@ define service {{
if len(parts) > 1:
command += " " + " ".join(parts[1:])
return command
- log('Check command not found: {}'.format(parts[0]))
- return ''
+ log("Check command not found: {}".format(parts[0]))
+ return ""
def _remove_service_files(self):
if not os.path.exists(NRPE.nagios_exportdir):
return
for f in os.listdir(NRPE.nagios_exportdir):
- if f.endswith('_{}.cfg'.format(self.command)):
+ if f.endswith("_{}.cfg".format(self.command)):
os.remove(os.path.join(NRPE.nagios_exportdir, f))
def remove(self, hostname):
@@ -193,39 +192,44 @@ define service {{
def write(self, nagios_context, hostname, nagios_servicegroups):
nrpe_check_file = self._get_check_filename()
- with open(nrpe_check_file, 'w') as nrpe_check_config:
+ with open(nrpe_check_file, "w") as nrpe_check_config:
nrpe_check_config.write("# check {}\n".format(self.shortname))
if nagios_servicegroups:
nrpe_check_config.write(
- "# The following header was added automatically by juju\n")
+ "# The following header was added automatically by juju\n"
+ )
nrpe_check_config.write(
- "# Modifying it will affect nagios monitoring and alerting\n")
+ "# Modifying it will affect nagios monitoring and alerting\n"
+ )
nrpe_check_config.write(
- "# servicegroups: {}\n".format(nagios_servicegroups))
- nrpe_check_config.write("command[{}]={}\n".format(
- self.command, self.check_cmd))
+ "# servicegroups: {}\n".format(nagios_servicegroups)
+ )
+ nrpe_check_config.write(
+ "command[{}]={}\n".format(self.command, self.check_cmd)
+ )
if not os.path.exists(NRPE.nagios_exportdir):
- log('Not writing service config as {} is not accessible'.format(
- NRPE.nagios_exportdir))
+ log(
+ "Not writing service config as {} is not accessible".format(
+ NRPE.nagios_exportdir
+ )
+ )
else:
- self.write_service_config(nagios_context, hostname,
- nagios_servicegroups)
+ self.write_service_config(nagios_context, hostname, nagios_servicegroups)
- def write_service_config(self, nagios_context, hostname,
- nagios_servicegroups):
+ def write_service_config(self, nagios_context, hostname, nagios_servicegroups):
self._remove_service_files()
templ_vars = {
- 'nagios_hostname': hostname,
- 'nagios_servicegroup': nagios_servicegroups,
- 'description': self.description,
- 'shortname': self.shortname,
- 'command': self.command,
+ "nagios_hostname": hostname,
+ "nagios_servicegroup": nagios_servicegroups,
+ "description": self.description,
+ "shortname": self.shortname,
+ "command": self.command,
}
nrpe_service_text = Check.service_template.format(**templ_vars)
nrpe_service_file = self._get_service_filename(hostname)
- with open(nrpe_service_file, 'w') as nrpe_service_config:
+ with open(nrpe_service_file, "w") as nrpe_service_config:
nrpe_service_config.write(str(nrpe_service_text))
def run(self):
@@ -233,21 +237,24 @@ define service {{
class NRPE(object):
- nagios_logdir = '/var/log/nagios'
- nagios_exportdir = '/var/lib/nagios/export'
- nrpe_confdir = '/etc/nagios/nrpe.d'
- homedir = '/var/lib/nagios' # home dir provided by nagios-nrpe-server
+ nagios_logdir = "/var/log/nagios"
+ nagios_exportdir = "/var/lib/nagios/export"
+ nrpe_confdir = "/etc/nagios/nrpe.d"
+ homedir = "/var/lib/nagios" # home dir provided by nagios-nrpe-server
def __init__(self, hostname=None, primary=True):
super(NRPE, self).__init__()
self.config = config()
self.primary = primary
- self.nagios_context = self.config['nagios_context']
- if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']:
- self.nagios_servicegroups = self.config['nagios_servicegroups']
+ self.nagios_context = self.config["nagios_context"]
+ if (
+ "nagios_servicegroups" in self.config
+ and self.config["nagios_servicegroups"]
+ ):
+ self.nagios_servicegroups = self.config["nagios_servicegroups"]
else:
self.nagios_servicegroups = self.nagios_context
- self.unit_name = local_unit().replace('/', '-')
+ self.unit_name = local_unit().replace("/", "-")
if hostname:
self.hostname = hostname
else:
@@ -258,20 +265,22 @@ class NRPE(object):
self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
self.checks = []
# Iff in an nrpe-external-master relation hook, set primary status
- relation = relation_ids('nrpe-external-master')
+ relation = relation_ids("nrpe-external-master")
if relation:
log("Setting charm primary status {}".format(primary))
for rid in relation:
- relation_set(relation_id=rid, relation_settings={'primary': self.primary})
+ relation_set(
+ relation_id=rid, relation_settings={"primary": self.primary}
+ )
self.remove_check_queue = set()
def add_check(self, *args, **kwargs):
shortname = None
- if kwargs.get('shortname') is None:
+ if kwargs.get("shortname") is None:
if len(args) > 0:
shortname = args[0]
else:
- shortname = kwargs['shortname']
+ shortname = kwargs["shortname"]
self.checks.append(Check(*args, **kwargs))
try:
@@ -280,26 +289,26 @@ class NRPE(object):
pass
def remove_check(self, *args, **kwargs):
- if kwargs.get('shortname') is None:
- raise ValueError('shortname of check must be specified')
+ if kwargs.get("shortname") is None:
+ raise ValueError("shortname of check must be specified")
# Use sensible defaults if they're not specified - these are not
# actually used during removal, but they're required for constructing
# the Check object; check_disk is chosen because it's part of the
# nagios-plugins-basic package.
- if kwargs.get('check_cmd') is None:
- kwargs['check_cmd'] = 'check_disk'
- if kwargs.get('description') is None:
- kwargs['description'] = ''
+ if kwargs.get("check_cmd") is None:
+ kwargs["check_cmd"] = "check_disk"
+ if kwargs.get("description") is None:
+ kwargs["description"] = ""
check = Check(*args, **kwargs)
check.remove(self.hostname)
- self.remove_check_queue.add(kwargs['shortname'])
+ self.remove_check_queue.add(kwargs["shortname"])
def write(self):
try:
- nagios_uid = pwd.getpwnam('nagios').pw_uid
- nagios_gid = grp.getgrnam('nagios').gr_gid
+ nagios_uid = pwd.getpwnam("nagios").pw_uid
+ nagios_gid = grp.getgrnam("nagios").gr_gid
except Exception:
log("Nagios user not set up, nrpe checks not updated")
return
@@ -311,33 +320,36 @@ class NRPE(object):
nrpe_monitors = {}
monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
for nrpecheck in self.checks:
- nrpecheck.write(self.nagios_context, self.hostname,
- self.nagios_servicegroups)
- nrpe_monitors[nrpecheck.shortname] = {
- "command": nrpecheck.command,
- }
+ nrpecheck.write(
+ self.nagios_context, self.hostname, self.nagios_servicegroups
+ )
+ nrpe_monitors[nrpecheck.shortname] = {"command": nrpecheck.command}
# update-status hooks are configured to firing every 5 minutes by
# default. When nagios-nrpe-server is restarted, the nagios server
# reports checks failing causing unnecessary alerts. Let's not restart
# on update-status hooks.
- if not hook_name() == 'update-status':
- service('restart', 'nagios-nrpe-server')
+ if not hook_name() == "update-status":
+ service("restart", "nagios-nrpe-server")
- monitor_ids = relation_ids("local-monitors") + \
- relation_ids("nrpe-external-master")
+ monitor_ids = relation_ids("local-monitors") + relation_ids(
+ "nrpe-external-master"
+ )
for rid in monitor_ids:
reldata = relation_get(unit=local_unit(), rid=rid)
- if 'monitors' in reldata:
+ if "monitors" in reldata:
# update the existing set of monitors with the new data
- old_monitors = yaml.safe_load(reldata['monitors'])
- old_nrpe_monitors = old_monitors['monitors']['remote']['nrpe']
+ old_monitors = yaml.safe_load(reldata["monitors"])
+ old_nrpe_monitors = old_monitors["monitors"]["remote"]["nrpe"]
# remove keys that are in the remove_check_queue
- old_nrpe_monitors = {k: v for k, v in old_nrpe_monitors.items()
- if k not in self.remove_check_queue}
+ old_nrpe_monitors = {
+ k: v
+ for k, v in old_nrpe_monitors.items()
+ if k not in self.remove_check_queue
+ }
# update/add nrpe_monitors
old_nrpe_monitors.update(nrpe_monitors)
- old_monitors['monitors']['remote']['nrpe'] = old_nrpe_monitors
+ old_monitors["monitors"]["remote"]["nrpe"] = old_nrpe_monitors
# write back to the relation
relation_set(relation_id=rid, monitors=yaml.dump(old_monitors))
else:
@@ -347,29 +359,29 @@ class NRPE(object):
self.remove_check_queue.clear()
-def get_nagios_hostcontext(relation_name='nrpe-external-master'):
+def get_nagios_hostcontext(relation_name="nrpe-external-master"):
"""
Query relation with nrpe subordinate, return the nagios_host_context
:param str relation_name: Name of relation nrpe sub joined to
"""
for rel in relations_of_type(relation_name):
- if 'nagios_host_context' in rel:
- return rel['nagios_host_context']
+ if "nagios_host_context" in rel:
+ return rel["nagios_host_context"]
-def get_nagios_hostname(relation_name='nrpe-external-master'):
+def get_nagios_hostname(relation_name="nrpe-external-master"):
"""
Query relation with nrpe subordinate, return the nagios_hostname
:param str relation_name: Name of relation nrpe sub joined to
"""
for rel in relations_of_type(relation_name):
- if 'nagios_hostname' in rel:
- return rel['nagios_hostname']
+ if "nagios_hostname" in rel:
+ return rel["nagios_hostname"]
-def get_nagios_unit_name(relation_name='nrpe-external-master'):
+def get_nagios_unit_name(relation_name="nrpe-external-master"):
"""
Return the nagios unit name prepended with host_context if needed
@@ -394,49 +406,45 @@ def add_init_service_checks(nrpe, services, unit_name, immediate_check=True):
"""
for svc in services:
# Don't add a check for these services from neutron-gateway
- if svc in ['ext-port', 'os-charm-phy-nic-mtu']:
+ if svc in ["ext-port", "os-charm-phy-nic-mtu"]:
next
- upstart_init = '/etc/init/%s.conf' % svc
- sysv_init = '/etc/init.d/%s' % svc
+ upstart_init = "/etc/init/%s.conf" % svc
+ sysv_init = "/etc/init.d/%s" % svc
if host.init_is_systemd():
nrpe.add_check(
shortname=svc,
- description='process check {%s}' % unit_name,
- check_cmd='check_systemd.py %s' % svc
+ description="process check {%s}" % unit_name,
+ check_cmd="check_systemd.py %s" % svc,
)
elif os.path.exists(upstart_init):
nrpe.add_check(
shortname=svc,
- description='process check {%s}' % unit_name,
- check_cmd='check_upstart_job %s' % svc
+ description="process check {%s}" % unit_name,
+ check_cmd="check_upstart_job %s" % svc,
)
elif os.path.exists(sysv_init):
- cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
- checkpath = '%s/service-check-%s.txt' % (nrpe.homedir, svc)
+ cronpath = "/etc/cron.d/nagios-service-check-%s" % svc
+ checkpath = "%s/service-check-%s.txt" % (nrpe.homedir, svc)
croncmd = (
- '/usr/local/lib/nagios/plugins/check_exit_status.pl '
- '-e -s /etc/init.d/%s status' % svc
+ "/usr/local/lib/nagios/plugins/check_exit_status.pl "
+ "-e -s /etc/init.d/%s status" % svc
)
- cron_file = '*/5 * * * * root %s > %s\n' % (croncmd, checkpath)
- f = open(cronpath, 'w')
+ cron_file = "*/5 * * * * root %s > %s\n" % (croncmd, checkpath)
+ f = open(cronpath, "w")
f.write(cron_file)
f.close()
nrpe.add_check(
shortname=svc,
- description='service check {%s}' % unit_name,
- check_cmd='check_status_file.py -f %s' % checkpath,
+ description="service check {%s}" % unit_name,
+ check_cmd="check_status_file.py -f %s" % checkpath,
)
# if /var/lib/nagios doesn't exist open(checkpath, 'w') will fail
# (LP: #1670223).
if immediate_check and os.path.isdir(nrpe.homedir):
- f = open(checkpath, 'w')
- subprocess.call(
- croncmd.split(),
- stdout=f,
- stderr=subprocess.STDOUT
- )
+ f = open(checkpath, "w")
+ subprocess.call(croncmd.split(), stdout=f, stderr=subprocess.STDOUT)
f.close()
os.chmod(checkpath, 0o644)
@@ -446,17 +454,20 @@ def copy_nrpe_checks(nrpe_files_dir=None):
Copy the nrpe checks into place
"""
- NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
+ NAGIOS_PLUGINS = "/usr/local/lib/nagios/plugins"
if nrpe_files_dir is None:
# determine if "charmhelpers" is in CHARMDIR or CHARMDIR/hooks
- for segment in ['.', 'hooks']:
- nrpe_files_dir = os.path.abspath(os.path.join(
- os.getenv('CHARM_DIR'),
- segment,
- 'charmhelpers',
- 'contrib',
- 'openstack',
- 'files'))
+ for segment in [".", "hooks"]:
+ nrpe_files_dir = os.path.abspath(
+ os.path.join(
+ os.getenv("CHARM_DIR"),
+ segment,
+ "charmhelpers",
+ "contrib",
+ "openstack",
+ "files",
+ )
+ )
if os.path.isdir(nrpe_files_dir):
break
else:
@@ -465,8 +476,7 @@ def copy_nrpe_checks(nrpe_files_dir=None):
os.makedirs(NAGIOS_PLUGINS)
for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")):
if os.path.isfile(fname):
- shutil.copy2(fname,
- os.path.join(NAGIOS_PLUGINS, os.path.basename(fname)))
+ shutil.copy2(fname, os.path.join(NAGIOS_PLUGINS, os.path.basename(fname)))
def add_haproxy_checks(nrpe, unit_name):
@@ -477,13 +487,15 @@ def add_haproxy_checks(nrpe, unit_name):
:param str unit_name: Unit name to use in check description
"""
nrpe.add_check(
- shortname='haproxy_servers',
- description='Check HAProxy {%s}' % unit_name,
- check_cmd='check_haproxy.sh')
+ shortname="haproxy_servers",
+ description="Check HAProxy {%s}" % unit_name,
+ check_cmd="check_haproxy.sh",
+ )
nrpe.add_check(
- shortname='haproxy_queue',
- description='Check HAProxy queue depth {%s}' % unit_name,
- check_cmd='check_haproxy_queue_depth.sh')
+ shortname="haproxy_queue",
+ description="Check HAProxy queue depth {%s}" % unit_name,
+ check_cmd="check_haproxy_queue_depth.sh",
+ )
def remove_deprecated_check(nrpe, deprecated_services):
@@ -496,5 +508,5 @@ def remove_deprecated_check(nrpe, deprecated_services):
:type deprecated_services: list
"""
for dep_svc in deprecated_services:
- log('Deprecated service: {}'.format(dep_svc))
+ log("Deprecated service: {}".format(dep_svc))
nrpe.remove_check(shortname=dep_svc)
diff --git a/hooks/charmhelpers/contrib/charmsupport/volumes.py b/hooks/charmhelpers/contrib/charmsupport/volumes.py
index 7ea43f0..723f898 100644
--- a/hooks/charmhelpers/contrib/charmsupport/volumes.py
+++ b/hooks/charmhelpers/contrib/charmsupport/volumes.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-'''
+"""
Functions for managing volumes in juju units. One volume is supported per unit.
Subordinates may have their own storage, provided it is on its own partition.
@@ -51,7 +51,7 @@ Usage::
except VolumeConfigurationError:
log('Storage could not be configured', ERROR)
-'''
+"""
# XXX: Known limitations
# - fstab is neither consulted nor updated
@@ -62,54 +62,60 @@ from charmhelpers.core import host
import yaml
-MOUNT_BASE = '/srv/juju/volumes'
+MOUNT_BASE = "/srv/juju/volumes"
class VolumeConfigurationError(Exception):
- '''Volume configuration data is missing or invalid'''
+ """Volume configuration data is missing or invalid"""
+
pass
def get_config():
- '''Gather and sanity-check volume configuration data'''
+ """Gather and sanity-check volume configuration data"""
volume_config = {}
config = hookenv.config()
errors = False
- if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'):
- volume_config['ephemeral'] = True
+ if config.get("volume-ephemeral") in (True, "True", "true", "Yes", "yes"):
+ volume_config["ephemeral"] = True
else:
- volume_config['ephemeral'] = False
+ volume_config["ephemeral"] = False
try:
- volume_map = yaml.safe_load(config.get('volume-map', '{}'))
+ volume_map = yaml.safe_load(config.get("volume-map", "{}"))
except yaml.YAMLError as e:
- hookenv.log("Error parsing YAML volume-map: {}".format(e),
- hookenv.ERROR)
+ hookenv.log("Error parsing YAML volume-map: {}".format(e), hookenv.ERROR)
errors = True
if volume_map is None:
# probably an empty string
volume_map = {}
elif not isinstance(volume_map, dict):
- hookenv.log("Volume-map should be a dictionary, not {}".format(
- type(volume_map)))
+ hookenv.log(
+ "Volume-map should be a dictionary, not {}".format(type(volume_map))
+ )
errors = True
- volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME'])
- if volume_config['device'] and volume_config['ephemeral']:
+ volume_config["device"] = volume_map.get(os.environ["JUJU_UNIT_NAME"])
+ if volume_config["device"] and volume_config["ephemeral"]:
# asked for ephemeral storage but also defined a volume ID
- hookenv.log('A volume is defined for this unit, but ephemeral '
- 'storage was requested', hookenv.ERROR)
+ hookenv.log(
+ "A volume is defined for this unit, but ephemeral " "storage was requested",
+ hookenv.ERROR,
+ )
errors = True
- elif not volume_config['device'] and not volume_config['ephemeral']:
+ elif not volume_config["device"] and not volume_config["ephemeral"]:
# asked for permanent storage but did not define volume ID
- hookenv.log('Ephemeral storage was requested, but there is no volume '
- 'defined for this unit.', hookenv.ERROR)
+ hookenv.log(
+ "Ephemeral storage was requested, but there is no volume "
+ "defined for this unit.",
+ hookenv.ERROR,
+ )
errors = True
- unit_mount_name = hookenv.local_unit().replace('/', '-')
- volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name)
+ unit_mount_name = hookenv.local_unit().replace("/", "-")
+ volume_config["mountpoint"] = os.path.join(MOUNT_BASE, unit_mount_name)
if errors:
return None
@@ -117,51 +123,51 @@ def get_config():
def mount_volume(config):
- if os.path.exists(config['mountpoint']):
- if not os.path.isdir(config['mountpoint']):
- hookenv.log('Not a directory: {}'.format(config['mountpoint']))
+ if os.path.exists(config["mountpoint"]):
+ if not os.path.isdir(config["mountpoint"]):
+ hookenv.log("Not a directory: {}".format(config["mountpoint"]))
raise VolumeConfigurationError()
else:
- host.mkdir(config['mountpoint'])
- if os.path.ismount(config['mountpoint']):
+ host.mkdir(config["mountpoint"])
+ if os.path.ismount(config["mountpoint"]):
unmount_volume(config)
- if not host.mount(config['device'], config['mountpoint'], persist=True):
+ if not host.mount(config["device"], config["mountpoint"], persist=True):
raise VolumeConfigurationError()
def unmount_volume(config):
- if os.path.ismount(config['mountpoint']):
- if not host.umount(config['mountpoint'], persist=True):
+ if os.path.ismount(config["mountpoint"]):
+ if not host.umount(config["mountpoint"], persist=True):
raise VolumeConfigurationError()
def managed_mounts():
- '''List of all mounted managed volumes'''
+ """List of all mounted managed volumes"""
return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts())
def configure_volume(before_change=lambda: None, after_change=lambda: None):
- '''Set up storage (or don't) according to the charm's volume configuration.
+ """Set up storage (or don't) according to the charm's volume configuration.
Returns the mount point or "ephemeral". before_change and after_change
are optional functions to be called if the volume configuration changes.
- '''
+ """
config = get_config()
if not config:
- hookenv.log('Failed to read volume configuration', hookenv.CRITICAL)
+ hookenv.log("Failed to read volume configuration", hookenv.CRITICAL)
raise VolumeConfigurationError()
- if config['ephemeral']:
- if os.path.ismount(config['mountpoint']):
+ if config["ephemeral"]:
+ if os.path.ismount(config["mountpoint"]):
before_change()
unmount_volume(config)
after_change()
- return 'ephemeral'
+ return "ephemeral"
else:
# persistent storage
- if os.path.ismount(config['mountpoint']):
+ if os.path.ismount(config["mountpoint"]):
mounts = dict(managed_mounts())
- if mounts.get(config['mountpoint']) != config['device']:
+ if mounts.get(config["mountpoint"]) != config["device"]:
before_change()
unmount_volume(config)
mount_volume(config)
@@ -170,4 +176,4 @@ def configure_volume(before_change=lambda: None, after_change=lambda: None):
before_change()
mount_volume(config)
after_change()
- return config['mountpoint']
+ return config["mountpoint"]
diff --git a/hooks/charmhelpers/contrib/ssl/__init__.py b/hooks/charmhelpers/contrib/ssl/__init__.py
index 1d238b5..847ff14 100644
--- a/hooks/charmhelpers/contrib/ssl/__init__.py
+++ b/hooks/charmhelpers/contrib/ssl/__init__.py
@@ -16,7 +16,9 @@ import subprocess
from charmhelpers.core import hookenv
-def generate_selfsigned(keyfile, certfile, keysize="1024", config=None, subject=None, cn=None):
+def generate_selfsigned(
+ keyfile, certfile, keysize="1024", config=None, subject=None, cn=None
+):
"""Generate selfsigned SSL keypair
You must provide one of the 3 optional arguments:
@@ -45,10 +47,23 @@ def generate_selfsigned(keyfile, certfile, keysize="1024", config=None, subject=
cmd = []
if config:
- cmd = ["/usr/bin/openssl", "req", "-new", "-newkey",
- "rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509",
- "-keyout", keyfile,
- "-out", certfile, "-config", config]
+ cmd = [
+ "/usr/bin/openssl",
+ "req",
+ "-new",
+ "-newkey",
+ "rsa:{}".format(keysize),
+ "-days",
+ "365",
+ "-nodes",
+ "-x509",
+ "-keyout",
+ keyfile,
+ "-out",
+ certfile,
+ "-config",
+ config,
+ ]
elif subject:
ssl_subject = ""
if "country" in subject:
@@ -64,25 +79,55 @@ def generate_selfsigned(keyfile, certfile, keysize="1024", config=None, subject=
if "cn" in subject:
ssl_subject = ssl_subject + "/CN={}".format(subject["cn"])
else:
- hookenv.log("When using \"subject\" argument you must "
- "provide \"cn\" field at very least")
+ hookenv.log(
+ 'When using "subject" argument you must '
+ 'provide "cn" field at very least'
+ )
return False
if "email" in subject:
ssl_subject = ssl_subject + "/emailAddress={}".format(subject["email"])
- cmd = ["/usr/bin/openssl", "req", "-new", "-newkey",
- "rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509",
- "-keyout", keyfile,
- "-out", certfile, "-subj", ssl_subject]
+ cmd = [
+ "/usr/bin/openssl",
+ "req",
+ "-new",
+ "-newkey",
+ "rsa:{}".format(keysize),
+ "-days",
+ "365",
+ "-nodes",
+ "-x509",
+ "-keyout",
+ keyfile,
+ "-out",
+ certfile,
+ "-subj",
+ ssl_subject,
+ ]
elif cn:
- cmd = ["/usr/bin/openssl", "req", "-new", "-newkey",
- "rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509",
- "-keyout", keyfile,
- "-out", certfile, "-subj", "/CN={}".format(cn)]
+ cmd = [
+ "/usr/bin/openssl",
+ "req",
+ "-new",
+ "-newkey",
+ "rsa:{}".format(keysize),
+ "-days",
+ "365",
+ "-nodes",
+ "-x509",
+ "-keyout",
+ keyfile,
+ "-out",
+ certfile,
+ "-subj",
+ "/CN={}".format(cn),
+ ]
if not cmd:
- hookenv.log("No config, subject or cn provided,"
- "unable to generate self signed SSL certificates")
+ hookenv.log(
+ "No config, subject or cn provided,"
+ "unable to generate self signed SSL certificates"
+ )
return False
try:
subprocess.check_call(cmd)
diff --git a/hooks/charmhelpers/contrib/ssl/service.py b/hooks/charmhelpers/contrib/ssl/service.py
index 06b534f..331463f 100644
--- a/hooks/charmhelpers/contrib/ssl/service.py
+++ b/hooks/charmhelpers/contrib/ssl/service.py
@@ -40,15 +40,15 @@ class ServiceCA(object):
# Hook Helper API
@staticmethod
def get_ca(type=STD_CERT):
- service_name = os.environ['JUJU_UNIT_NAME'].split('/')[0]
- ca_path = os.path.join(os.environ['CHARM_DIR'], 'ca')
+ service_name = os.environ["JUJU_UNIT_NAME"].split("/")[0]
+ ca_path = os.path.join(os.environ["CHARM_DIR"], "ca")
ca = ServiceCA(service_name, ca_path, type)
ca.init()
return ca
@classmethod
def get_service_cert(cls, type=STD_CERT):
- service_name = os.environ['JUJU_UNIT_NAME'].split('/')[0]
+ service_name = os.environ["JUJU_UNIT_NAME"].split("/")[0]
ca = cls.get_ca()
crt, key = ca.get_or_create_cert(service_name)
return crt, key, ca.get_ca_bundle()
@@ -63,55 +63,66 @@ class ServiceCA(object):
@property
def ca_key(self):
- return path_join(self.ca_dir, 'private', 'cacert.key')
+ return path_join(self.ca_dir, "private", "cacert.key")
@property
def ca_cert(self):
- return path_join(self.ca_dir, 'cacert.pem')
+ return path_join(self.ca_dir, "cacert.pem")
@property
def ca_conf(self):
- return path_join(self.ca_dir, 'ca.cnf')
+ return path_join(self.ca_dir, "ca.cnf")
@property
def signing_conf(self):
- return path_join(self.ca_dir, 'signing.cnf')
+ return path_join(self.ca_dir, "signing.cnf")
def _init_ca_dir(self, ca_dir):
os.mkdir(ca_dir)
- for i in ['certs', 'crl', 'newcerts', 'private']:
+ for i in ["certs", "crl", "newcerts", "private"]:
sd = path_join(ca_dir, i)
if not exists(sd):
os.mkdir(sd)
- if not exists(path_join(ca_dir, 'serial')):
- with open(path_join(ca_dir, 'serial'), 'w') as fh:
- fh.write('02\n')
+ if not exists(path_join(ca_dir, "serial")):
+ with open(path_join(ca_dir, "serial"), "w") as fh:
+ fh.write("02\n")
- if not exists(path_join(ca_dir, 'index.txt')):
- with open(path_join(ca_dir, 'index.txt'), 'w') as fh:
- fh.write('')
+ if not exists(path_join(ca_dir, "index.txt")):
+ with open(path_join(ca_dir, "index.txt"), "w") as fh:
+ fh.write("")
def _init_ca(self):
"""Generate the root ca's cert and key.
"""
- if not exists(path_join(self.ca_dir, 'ca.cnf')):
- with open(path_join(self.ca_dir, 'ca.cnf'), 'w') as fh:
- fh.write(
- CA_CONF_TEMPLATE % (self.get_conf_variables()))
+ if not exists(path_join(self.ca_dir, "ca.cnf")):
+ with open(path_join(self.ca_dir, "ca.cnf"), "w") as fh:
+ fh.write(CA_CONF_TEMPLATE % (self.get_conf_variables()))
- if not exists(path_join(self.ca_dir, 'signing.cnf')):
- with open(path_join(self.ca_dir, 'signing.cnf'), 'w') as fh:
- fh.write(
- SIGNING_CONF_TEMPLATE % (self.get_conf_variables()))
+ if not exists(path_join(self.ca_dir, "signing.cnf")):
+ with open(path_join(self.ca_dir, "signing.cnf"), "w") as fh:
+ fh.write(SIGNING_CONF_TEMPLATE % (self.get_conf_variables()))
if exists(self.ca_cert) or exists(self.ca_key):
raise RuntimeError("Initialized called when CA already exists")
- cmd = ['openssl', 'req', '-config', self.ca_conf,
- '-x509', '-nodes', '-newkey', 'rsa',
- '-days', self.default_ca_expiry,
- '-keyout', self.ca_key, '-out', self.ca_cert,
- '-outform', 'PEM']
+ cmd = [
+ "openssl",
+ "req",
+ "-config",
+ self.ca_conf,
+ "-x509",
+ "-nodes",
+ "-newkey",
+ "rsa",
+ "-days",
+ self.default_ca_expiry,
+ "-keyout",
+ self.ca_key,
+ "-out",
+ self.ca_cert,
+ "-outform",
+ "PEM",
+ ]
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
log("CA Init:\n %s" % output, level=DEBUG)
@@ -120,7 +131,8 @@ class ServiceCA(object):
org_name="juju",
org_unit_name="%s service" % self.name,
common_name=self.name,
- ca_dir=self.ca_dir)
+ ca_dir=self.ca_dir,
+ )
def get_or_create_cert(self, common_name):
if common_name in self:
@@ -153,29 +165,70 @@ class ServiceCA(object):
def _create_certificate(self, common_name, key_p, csr_p, crt_p):
template_vars = self.get_conf_variables()
- template_vars['common_name'] = common_name
- subj = '/O=%(org_name)s/OU=%(org_unit_name)s/CN=%(common_name)s' % (
- template_vars)
+ template_vars["common_name"] = common_name
+ subj = "/O=%(org_name)s/OU=%(org_unit_name)s/CN=%(common_name)s" % (
+ template_vars
+ )
log("CA Create Cert %s" % common_name, level=DEBUG)
- cmd = ['openssl', 'req', '-sha1', '-newkey', 'rsa:2048',
- '-nodes', '-days', self.default_expiry,
- '-keyout', key_p, '-out', csr_p, '-subj', subj]
+ cmd = [
+ "openssl",
+ "req",
+ "-sha1",
+ "-newkey",
+ "rsa:2048",
+ "-nodes",
+ "-days",
+ self.default_expiry,
+ "-keyout",
+ key_p,
+ "-out",
+ csr_p,
+ "-subj",
+ subj,
+ ]
subprocess.check_call(cmd, stderr=subprocess.PIPE)
- cmd = ['openssl', 'rsa', '-in', key_p, '-out', key_p]
+ cmd = ["openssl", "rsa", "-in", key_p, "-out", key_p]
subprocess.check_call(cmd, stderr=subprocess.PIPE)
log("CA Sign Cert %s" % common_name, level=DEBUG)
if self.cert_type == MYSQL_CERT:
- cmd = ['openssl', 'x509', '-req',
- '-in', csr_p, '-days', self.default_expiry,
- '-CA', self.ca_cert, '-CAkey', self.ca_key,
- '-set_serial', '01', '-out', crt_p]
+ cmd = [
+ "openssl",
+ "x509",
+ "-req",
+ "-in",
+ csr_p,
+ "-days",
+ self.default_expiry,
+ "-CA",
+ self.ca_cert,
+ "-CAkey",
+ self.ca_key,
+ "-set_serial",
+ "01",
+ "-out",
+ crt_p,
+ ]
else:
- cmd = ['openssl', 'ca', '-config', self.signing_conf,
- '-extensions', 'req_extensions',
- '-days', self.default_expiry, '-notext',
- '-in', csr_p, '-out', crt_p, '-subj', subj, '-batch']
+ cmd = [
+ "openssl",
+ "ca",
+ "-config",
+ self.signing_conf,
+ "-extensions",
+ "req_extensions",
+ "-days",
+ self.default_expiry,
+ "-notext",
+ "-in",
+ csr_p,
+ "-out",
+ crt_p,
+ "-subj",
+ subj,
+ "-batch",
+ ]
log("running %s" % " ".join(cmd), level=DEBUG)
subprocess.check_call(cmd, stderr=subprocess.PIPE)
diff --git a/hooks/charmhelpers/core/decorators.py b/hooks/charmhelpers/core/decorators.py
index 6ad41ee..a64805e 100644
--- a/hooks/charmhelpers/core/decorators.py
+++ b/hooks/charmhelpers/core/decorators.py
@@ -21,16 +21,14 @@
import time
-from charmhelpers.core.hookenv import (
- log,
- INFO,
-)
+from charmhelpers.core.hookenv import log, INFO
def retry_on_exception(num_retries, base_delay=0, exc_type=Exception):
"""If the decorated function raises exception exc_type, allow num_retries
retry attempts before raise the exception.
"""
+
def _retry_on_exception_inner_1(f):
def _retry_on_exception_inner_2(*args, **kwargs):
retries = num_retries
@@ -44,8 +42,11 @@ def retry_on_exception(num_retries, base_delay=0, exc_type=Exception):
delay = base_delay * multiplier
multiplier += 1
- log("Retrying '%s' %d more times (delay=%s)" %
- (f.__name__, retries, delay), level=INFO)
+ log(
+ "Retrying '%s' %d more times (delay=%s)"
+ % (f.__name__, retries, delay),
+ level=INFO,
+ )
retries -= 1
if delay:
time.sleep(delay)
diff --git a/hooks/charmhelpers/core/files.py b/hooks/charmhelpers/core/files.py
index fdd82b7..347816e 100644
--- a/hooks/charmhelpers/core/files.py
+++ b/hooks/charmhelpers/core/files.py
@@ -15,13 +15,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-__author__ = 'Jorge Niedbalski <niedbalski@xxxxxxxxxx>'
+__author__ = "Jorge Niedbalski <niedbalski@xxxxxxxxxx>"
import os
import subprocess
-def sed(filename, before, after, flags='g'):
+def sed(filename, before, after, flags="g"):
"""
Search and replaces the given pattern on filename.
@@ -35,9 +35,8 @@ def sed(filename, before, after, flags='g'):
:returns: If the sed command exit code was zero then return,
otherwise raise CalledProcessError.
"""
- expression = r's/{0}/{1}/{2}'.format(before,
- after, flags)
+ expression = r"s/{0}/{1}/{2}".format(before, after, flags)
- return subprocess.check_call(["sed", "-i", "-r", "-e",
- expression,
- os.path.expanduser(filename)])
+ return subprocess.check_call(
+ ["sed", "-i", "-r", "-e", expression, os.path.expanduser(filename)]
+ )
diff --git a/hooks/charmhelpers/core/fstab.py b/hooks/charmhelpers/core/fstab.py
index d9fa915..db4b990 100644
--- a/hooks/charmhelpers/core/fstab.py
+++ b/hooks/charmhelpers/core/fstab.py
@@ -18,7 +18,7 @@
import io
import os
-__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@xxxxxxxxxxxxx>'
+__author__ = "Jorge Niedbalski R. <jorge.niedbalski@xxxxxxxxxxxxx>"
class Fstab(io.FileIO):
@@ -29,8 +29,8 @@ class Fstab(io.FileIO):
class Entry(object):
"""Entry class represents a non-comment line on the `/etc/fstab` file
"""
- def __init__(self, device, mountpoint, filesystem,
- options, d=0, p=0):
+
+ def __init__(self, device, mountpoint, filesystem, options, d=0, p=0):
self.device = device
self.mountpoint = mountpoint
self.filesystem = filesystem
@@ -46,34 +46,36 @@ class Fstab(io.FileIO):
return str(self) == str(o)
def __str__(self):
- return "{} {} {} {} {} {}".format(self.device,
- self.mountpoint,
- self.filesystem,
- self.options,
- self.d,
- self.p)
+ return "{} {} {} {} {} {}".format(
+ self.device,
+ self.mountpoint,
+ self.filesystem,
+ self.options,
+ self.d,
+ self.p,
+ )
- DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
+ DEFAULT_PATH = os.path.join(os.path.sep, "etc", "fstab")
def __init__(self, path=None):
if path:
self._path = path
else:
self._path = self.DEFAULT_PATH
- super(Fstab, self).__init__(self._path, 'rb+')
+ super(Fstab, self).__init__(self._path, "rb+")
def _hydrate_entry(self, line):
# NOTE: use split with no arguments to split on any
# whitespace including tabs
- return Fstab.Entry(*filter(
- lambda x: x not in ('', None),
- line.strip("\n").split()))
+ return Fstab.Entry(
+ *filter(lambda x: x not in ("", None), line.strip("\n").split())
+ )
@property
def entries(self):
self.seek(0)
for line in self.readlines():
- line = line.decode('us-ascii')
+ line = line.decode("us-ascii")
try:
if line.strip() and not line.strip().startswith("#"):
yield self._hydrate_entry(line)
@@ -88,17 +90,17 @@ class Fstab(io.FileIO):
return None
def add_entry(self, entry):
- if self.get_entry_by_attr('device', entry.device):
+ if self.get_entry_by_attr("device", entry.device):
return False
- self.write((str(entry) + '\n').encode('us-ascii'))
+ self.write((str(entry) + "\n").encode("us-ascii"))
self.truncate()
return entry
def remove_entry(self, entry):
self.seek(0)
- lines = [l.decode('us-ascii') for l in self.readlines()]
+ lines = [l.decode("us-ascii") for l in self.readlines()]
found = False
for index, line in enumerate(lines):
@@ -113,20 +115,20 @@ class Fstab(io.FileIO):
lines.remove(line)
self.seek(0)
- self.write(''.join(lines).encode('us-ascii'))
+ self.write("".join(lines).encode("us-ascii"))
self.truncate()
return True
@classmethod
def remove_by_mountpoint(cls, mountpoint, path=None):
fstab = cls(path=path)
- entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
+ entry = fstab.get_entry_by_attr("mountpoint", mountpoint)
if entry:
return fstab.remove_entry(entry)
return False
@classmethod
def add(cls, device, mountpoint, filesystem, options=None, path=None):
- return cls(path=path).add_entry(Fstab.Entry(device,
- mountpoint, filesystem,
- options=options))
+ return cls(path=path).add_entry(
+ Fstab.Entry(device, mountpoint, filesystem, options=options)
+ )
diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py
index d7c37c1..c48ce30 100644
--- a/hooks/charmhelpers/core/hookenv.py
+++ b/hooks/charmhelpers/core/hookenv.py
@@ -38,6 +38,7 @@ from subprocess import CalledProcessError
from charmhelpers import deprecate
import six
+
if not six.PY3:
from UserDict import UserDict
else:
@@ -54,16 +55,18 @@ MARKER = object()
SH_MAX_ARG = 131071
-RANGE_WARNING = ('Passing NO_PROXY string that includes a cidr. '
- 'This may not be compatible with software you are '
- 'running in your shell.')
+RANGE_WARNING = (
+ "Passing NO_PROXY string that includes a cidr. "
+ "This may not be compatible with software you are "
+ "running in your shell."
+)
class WORKLOAD_STATES(Enum):
- ACTIVE = 'active'
- BLOCKED = 'blocked'
- MAINTENANCE = 'maintenance'
- WAITING = 'waiting'
+ ACTIVE = "active"
+ BLOCKED = "blocked"
+ MAINTENANCE = "maintenance"
+ WAITING = "waiting"
cache = {}
@@ -82,6 +85,7 @@ def cached(func):
will cache the result of unit_get + 'test' for future calls.
"""
+
@wraps(func)
def wrapper(*args, **kwargs):
global cache
@@ -93,6 +97,7 @@ def cached(func):
res = func(*args, **kwargs)
cache[key] = res
return res
+
wrapper._wrapped = func
return wrapper
@@ -110,9 +115,9 @@ def flush(key):
def log(message, level=None):
"""Write a message to the juju log"""
- command = ['juju-log']
+ command = ["juju-log"]
if level:
- command += ['-l', level]
+ command += ["-l", level]
if not isinstance(message, six.string_types):
message = repr(message)
command += [message[:SH_MAX_ARG]]
@@ -132,7 +137,7 @@ def log(message, level=None):
def function_log(message):
"""Write a function progress message"""
- command = ['function-log']
+ command = ["function-log"]
if not isinstance(message, six.string_types):
message = repr(message)
command += [message[:SH_MAX_ARG]]
@@ -190,57 +195,59 @@ class Serializable(UserDict):
def execution_environment():
"""A convenient bundling of the current execution context"""
context = {}
- context['conf'] = config()
+ context["conf"] = config()
if relation_id():
- context['reltype'] = relation_type()
- context['relid'] = relation_id()
- context['rel'] = relation_get()
- context['unit'] = local_unit()
- context['rels'] = relations()
- context['env'] = os.environ
+ context["reltype"] = relation_type()
+ context["relid"] = relation_id()
+ context["rel"] = relation_get()
+ context["unit"] = local_unit()
+ context["rels"] = relations()
+ context["env"] = os.environ
return context
def in_relation_hook():
"""Determine whether we're running in a relation hook"""
- return 'JUJU_RELATION' in os.environ
+ return "JUJU_RELATION" in os.environ
def relation_type():
"""The scope for the current relation hook"""
- return os.environ.get('JUJU_RELATION', None)
+ return os.environ.get("JUJU_RELATION", None)
@cached
def relation_id(relation_name=None, service_or_unit=None):
"""The relation ID for the current or a specified relation"""
if not relation_name and not service_or_unit:
- return os.environ.get('JUJU_RELATION_ID', None)
+ return os.environ.get("JUJU_RELATION_ID", None)
elif relation_name and service_or_unit:
- service_name = service_or_unit.split('/')[0]
+ service_name = service_or_unit.split("/")[0]
for relid in relation_ids(relation_name):
remote_service = remote_service_name(relid)
if remote_service == service_name:
return relid
else:
- raise ValueError('Must specify neither or both of relation_name and service_or_unit')
+ raise ValueError(
+ "Must specify neither or both of relation_name and service_or_unit"
+ )
def local_unit():
"""Local unit ID"""
- return os.environ['JUJU_UNIT_NAME']
+ return os.environ["JUJU_UNIT_NAME"]
def remote_unit():
"""The remote unit for the current relation hook"""
- return os.environ.get('JUJU_REMOTE_UNIT', None)
+ return os.environ.get("JUJU_REMOTE_UNIT", None)
def application_name():
"""
The name of the deployed application this unit belongs to.
"""
- return local_unit().split('/')[0]
+ return local_unit().split("/")[0]
def service_name():
@@ -255,23 +262,23 @@ def model_name():
"""
Name of the model that this unit is deployed in.
"""
- return os.environ['JUJU_MODEL_NAME']
+ return os.environ["JUJU_MODEL_NAME"]
def model_uuid():
"""
UUID of the model that this unit is deployed in.
"""
- return os.environ['JUJU_MODEL_UUID']
+ return os.environ["JUJU_MODEL_UUID"]
def principal_unit():
"""Returns the principal unit of this unit, otherwise None"""
# Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT
- principal_unit = os.environ.get('JUJU_PRINCIPAL_UNIT', None)
+ principal_unit = os.environ.get("JUJU_PRINCIPAL_UNIT", None)
# If it's empty, then this unit is the principal
- if principal_unit == '':
- return os.environ['JUJU_UNIT_NAME']
+ if principal_unit == "":
+ return os.environ["JUJU_UNIT_NAME"]
elif principal_unit is not None:
return principal_unit
# For Juju 2.1 and below, let's try work out the principle unit by
@@ -282,7 +289,7 @@ def principal_unit():
md = _metadata_unit(unit)
if not md:
continue
- subordinate = md.pop('subordinate', None)
+ subordinate = md.pop("subordinate", None)
if not subordinate:
return unit
return None
@@ -296,12 +303,12 @@ def remote_service_name(relid=None):
else:
units = related_units(relid)
unit = units[0] if units else None
- return unit.split('/')[0] if unit else None
+ return unit.split("/")[0] if unit else None
def hook_name():
"""The name of the currently executing hook"""
- return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0]))
+ return os.environ.get("JUJU_HOOK_NAME", os.path.basename(sys.argv[0]))
class Config(dict):
@@ -342,7 +349,8 @@ class Config(dict):
'myval'
"""
- CONFIG_FILE_NAME = '.juju-persistent-config'
+
+ CONFIG_FILE_NAME = ".juju-persistent-config"
def __init__(self, *args, **kw):
super(Config, self).__init__(*args, **kw)
@@ -372,8 +380,10 @@ class Config(dict):
try:
self._prev_dict = json.load(f)
except ValueError as e:
- log('Unable to parse previous config data - {}'.format(str(e)),
- level=ERROR)
+ log(
+ "Unable to parse previous config data - {}".format(str(e)),
+ level=ERROR,
+ )
for k, v in copy.deepcopy(self._prev_dict).items():
if k not in self:
self[k] = v
@@ -408,7 +418,7 @@ class Config(dict):
instance.
"""
- with open(self.path, 'w') as f:
+ with open(self.path, "w") as f:
os.fchmod(f.fileno(), 0o600)
json.dump(self, f)
@@ -432,7 +442,7 @@ def config(scope=None):
:rtype: Any
"""
global _cache_config
- config_cmd_line = ['config-get', '--all', '--format=json']
+ config_cmd_line = ["config-get", "--all", "--format=json"]
try:
# JSON Decode Exception for Python3.5+
exc_json = json.decoder.JSONDecodeError
@@ -442,30 +452,33 @@ def config(scope=None):
try:
if _cache_config is None:
config_data = json.loads(
- subprocess.check_output(config_cmd_line).decode('UTF-8'))
+ subprocess.check_output(config_cmd_line).decode("UTF-8")
+ )
_cache_config = Config(config_data)
if scope is not None:
return _cache_config.get(scope)
return _cache_config
except (exc_json, UnicodeDecodeError) as e:
- log('Unable to parse output from config-get: config_cmd_line="{}" '
- 'message="{}"'
- .format(config_cmd_line, str(e)), level=ERROR)
+ log(
+ 'Unable to parse output from config-get: config_cmd_line="{}" '
+ 'message="{}"'.format(config_cmd_line, str(e)),
+ level=ERROR,
+ )
return None
@cached
def relation_get(attribute=None, unit=None, rid=None):
"""Get relation information"""
- _args = ['relation-get', '--format=json']
+ _args = ["relation-get", "--format=json"]
if rid:
- _args.append('-r')
+ _args.append("-r")
_args.append(rid)
- _args.append(attribute or '-')
+ _args.append(attribute or "-")
if unit:
_args.append(unit)
try:
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
+ return json.loads(subprocess.check_output(_args).decode("UTF-8"))
except ValueError:
return None
except CalledProcessError as e:
@@ -477,11 +490,12 @@ def relation_get(attribute=None, unit=None, rid=None):
def relation_set(relation_id=None, relation_settings=None, **kwargs):
"""Set relation information for the current unit"""
relation_settings = relation_settings if relation_settings else {}
- relation_cmd_line = ['relation-set']
+ relation_cmd_line = ["relation-set"]
accepts_file = "--file" in subprocess.check_output(
- relation_cmd_line + ["--help"], universal_newlines=True)
+ relation_cmd_line + ["--help"], universal_newlines=True
+ )
if relation_id is not None:
- relation_cmd_line.extend(('-r', relation_id))
+ relation_cmd_line.extend(("-r", relation_id))
settings = relation_settings.copy()
settings.update(kwargs)
for key, value in settings.items():
@@ -496,40 +510,36 @@ def relation_set(relation_id=None, relation_settings=None, **kwargs):
# stdin, but that feature is broken in 1.23.2: Bug #1454678.
with tempfile.NamedTemporaryFile(delete=False) as settings_file:
settings_file.write(yaml.safe_dump(settings).encode("utf-8"))
- subprocess.check_call(
- relation_cmd_line + ["--file", settings_file.name])
+ subprocess.check_call(relation_cmd_line + ["--file", settings_file.name])
os.remove(settings_file.name)
else:
for key, value in settings.items():
if value is None:
- relation_cmd_line.append('{}='.format(key))
+ relation_cmd_line.append("{}=".format(key))
else:
- relation_cmd_line.append('{}={}'.format(key, value))
+ relation_cmd_line.append("{}={}".format(key, value))
subprocess.check_call(relation_cmd_line)
# Flush cache of any relation-gets for local unit
flush(local_unit())
def relation_clear(r_id=None):
- ''' Clears any relation data already set on relation r_id '''
- settings = relation_get(rid=r_id,
- unit=local_unit())
+ """ Clears any relation data already set on relation r_id """
+ settings = relation_get(rid=r_id, unit=local_unit())
for setting in settings:
- if setting not in ['public-address', 'private-address']:
+ if setting not in ["public-address", "private-address"]:
settings[setting] = None
- relation_set(relation_id=r_id,
- **settings)
+ relation_set(relation_id=r_id, **settings)
@cached
def relation_ids(reltype=None):
"""A list of relation_ids"""
reltype = reltype or relation_type()
- relid_cmd_line = ['relation-ids', '--format=json']
+ relid_cmd_line = ["relation-ids", "--format=json"]
if reltype is not None:
relid_cmd_line.append(reltype)
- return json.loads(
- subprocess.check_output(relid_cmd_line).decode('UTF-8')) or []
+ return json.loads(subprocess.check_output(relid_cmd_line).decode("UTF-8")) or []
return []
@@ -537,11 +547,10 @@ def relation_ids(reltype=None):
def related_units(relid=None):
"""A list of related units"""
relid = relid or relation_id()
- units_cmd_line = ['relation-list', '--format=json']
+ units_cmd_line = ["relation-list", "--format=json"]
if relid is not None:
- units_cmd_line.extend(('-r', relid))
- return json.loads(
- subprocess.check_output(units_cmd_line).decode('UTF-8')) or []
+ units_cmd_line.extend(("-r", relid))
+ return json.loads(subprocess.check_output(units_cmd_line).decode("UTF-8")) or []
def expected_peer_units():
@@ -567,8 +576,7 @@ def expected_peer_units():
# goal-state first appeared in 2.4.0.
raise NotImplementedError("goal-state")
_goal_state = goal_state()
- return (key for key in _goal_state['units']
- if '/' in key and key != local_unit())
+ return (key for key in _goal_state["units"] if "/" in key and key != local_unit())
def expected_related_units(reltype=None):
@@ -602,7 +610,7 @@ def expected_related_units(reltype=None):
raise NotImplementedError("goal-state relation unit count")
reltype = reltype or relation_type()
_goal_state = goal_state()
- return (key for key in _goal_state['relations'][reltype] if '/' in key)
+ return (key for key in _goal_state["relations"][reltype] if "/" in key)
@cached
@@ -611,9 +619,9 @@ def relation_for_unit(unit=None, rid=None):
unit = unit or remote_unit()
relation = relation_get(unit=unit, rid=rid)
for key in relation:
- if key.endswith('-list'):
+ if key.endswith("-list"):
relation[key] = relation[key].split()
- relation['__unit__'] = unit
+ relation["__unit__"] = unit
return relation
@@ -624,7 +632,7 @@ def relations_for_id(relid=None):
relid = relid or relation_ids()
for unit in related_units(relid):
unit_data = relation_for_unit(unit, relid)
- unit_data['__relid__'] = relid
+ unit_data["__relid__"] = relid
relation_data.append(unit_data)
return relation_data
@@ -636,7 +644,7 @@ def relations_of_type(reltype=None):
reltype = reltype or relation_type()
for relid in relation_ids(reltype):
for relation in relations_for_id(relid):
- relation['__relid__'] = relid
+ relation["__relid__"] = relid
relation_data.append(relation)
return relation_data
@@ -644,7 +652,7 @@ def relations_of_type(reltype=None):
@cached
def metadata():
"""Get the current charm metadata.yaml contents as a python object"""
- with open(os.path.join(charm_dir(), 'metadata.yaml')) as md:
+ with open(os.path.join(charm_dir(), "metadata.yaml")) as md:
return yaml.safe_load(md)
@@ -658,8 +666,8 @@ def _metadata_unit(unit):
"""
basedir = os.sep.join(charm_dir().split(os.sep)[:-2])
- unitdir = 'unit-{}'.format(unit.replace(os.sep, '-'))
- joineddir = os.path.join(basedir, unitdir, 'charm', 'metadata.yaml')
+ unitdir = "unit-{}".format(unit.replace(os.sep, "-"))
+ joineddir = os.path.join(basedir, unitdir, "charm", "metadata.yaml")
if not os.path.exists(joineddir):
return None
with open(joineddir) as md:
@@ -671,7 +679,7 @@ def relation_types():
"""Get a list of relation types supported by this charm"""
rel_types = []
md = metadata()
- for key in ('provides', 'requires', 'peers'):
+ for key in ("provides", "requires", "peers"):
section = md.get(key)
if section:
rel_types.extend(section.keys())
@@ -680,9 +688,9 @@ def relation_types():
@cached
def peer_relation_id():
- '''Get the peers relation id if a peers relation has been joined, else None.'''
+ """Get the peers relation id if a peers relation has been joined, else None."""
md = metadata()
- section = md.get('peers')
+ section = md.get("peers")
if section:
for key in section:
relids = relation_ids(key)
@@ -710,8 +718,8 @@ def relation_to_role_and_interface(relation_name):
:returns: A tuple containing ``(role, interface)``, or ``(None, None)``.
"""
_metadata = metadata()
- for role in ('provides', 'requires', 'peers'):
- interface = _metadata.get(role, {}).get(relation_name, {}).get('interface')
+ for role in ("provides", "requires", "peers"):
+ interface = _metadata.get(role, {}).get(relation_name, {}).get("interface")
if interface:
return role, interface
return None, None
@@ -729,7 +737,7 @@ def role_and_interface_to_relations(role, interface_name):
_metadata = metadata()
results = []
for relation_name, relation in _metadata.get(role, {}).items():
- if relation['interface'] == interface_name:
+ if relation["interface"] == interface_name:
results.append(relation_name)
return results
@@ -743,7 +751,7 @@ def interface_to_relations(interface_name):
:returns: A list of relation names.
"""
results = []
- for role in ('provides', 'requires', 'peers'):
+ for role in ("provides", "requires", "peers"):
results.extend(role_and_interface_to_relations(role, interface_name))
return results
@@ -751,7 +759,7 @@ def interface_to_relations(interface_name):
@cached
def charm_name():
"""Get the name of the current charm as is specified on metadata.yaml"""
- return metadata().get('name')
+ return metadata().get("name")
@cached
@@ -771,20 +779,19 @@ def relations():
@cached
-def is_relation_made(relation, keys='private-address'):
- '''
+def is_relation_made(relation, keys="private-address"):
+ """
Determine whether a relation is established by checking for
presence of key(s). If a list of keys is provided, they
must all be present for the relation to be identified as made
- '''
+ """
if isinstance(keys, str):
keys = [keys]
for r_id in relation_ids(relation):
for unit in related_units(r_id):
context = {}
for k in keys:
- context[k] = relation_get(k, rid=r_id,
- unit=unit)
+ context[k] = relation_get(k, rid=r_id, unit=unit)
if None not in context.values():
return True
return False
@@ -797,7 +804,7 @@ def _port_op(op_name, port, protocol="TCP"):
if icmp:
_args.append(protocol)
else:
- _args.append('{}/{}'.format(port, protocol))
+ _args.append("{}/{}".format(port, protocol))
try:
subprocess.check_call(_args)
except subprocess.CalledProcessError:
@@ -809,25 +816,25 @@ def _port_op(op_name, port, protocol="TCP"):
def open_port(port, protocol="TCP"):
"""Open a service network port"""
- _port_op('open-port', port, protocol)
+ _port_op("open-port", port, protocol)
def close_port(port, protocol="TCP"):
"""Close a service network port"""
- _port_op('close-port', port, protocol)
+ _port_op("close-port", port, protocol)
def open_ports(start, end, protocol="TCP"):
"""Opens a range of service network ports"""
- _args = ['open-port']
- _args.append('{}-{}/{}'.format(start, end, protocol))
+ _args = ["open-port"]
+ _args.append("{}-{}/{}".format(start, end, protocol))
subprocess.check_call(_args)
def close_ports(start, end, protocol="TCP"):
"""Close a range of service network ports"""
- _args = ['close-port']
- _args.append('{}-{}/{}'.format(start, end, protocol))
+ _args = ["close-port"]
+ _args.append("{}-{}/{}".format(start, end, protocol))
subprocess.check_call(_args)
@@ -838,40 +845,40 @@ def opened_ports():
:returns: Opened ports as a list of strings: ``['8080/tcp', '8081-8083/tcp']``
"""
- _args = ['opened-ports', '--format=json']
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
+ _args = ["opened-ports", "--format=json"]
+ return json.loads(subprocess.check_output(_args).decode("UTF-8"))
@cached
def unit_get(attribute):
"""Get the unit ID for the remote unit"""
- _args = ['unit-get', '--format=json', attribute]
+ _args = ["unit-get", "--format=json", attribute]
try:
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
+ return json.loads(subprocess.check_output(_args).decode("UTF-8"))
except ValueError:
return None
def unit_public_ip():
"""Get this unit's public IP address"""
- return unit_get('public-address')
+ return unit_get("public-address")
def unit_private_ip():
"""Get this unit's private IP address"""
- return unit_get('private-address')
+ return unit_get("private-address")
@cached
def storage_get(attribute=None, storage_id=None):
"""Get storage attributes"""
- _args = ['storage-get', '--format=json']
+ _args = ["storage-get", "--format=json"]
if storage_id:
- _args.extend(('-s', storage_id))
+ _args.extend(("-s", storage_id))
if attribute:
_args.append(attribute)
try:
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
+ return json.loads(subprocess.check_output(_args).decode("UTF-8"))
except ValueError:
return None
@@ -879,15 +886,16 @@ def storage_get(attribute=None, storage_id=None):
@cached
def storage_list(storage_name=None):
"""List the storage IDs for the unit"""
- _args = ['storage-list', '--format=json']
+ _args = ["storage-list", "--format=json"]
if storage_name:
_args.append(storage_name)
try:
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
+ return json.loads(subprocess.check_output(_args).decode("UTF-8"))
except ValueError:
return None
except OSError as e:
import errno
+
if e.errno == errno.ENOENT:
# storage-list does not exist
return []
@@ -896,6 +904,7 @@ def storage_list(storage_name=None):
class UnregisteredHookError(Exception):
"""Raised when an undefined hook is called"""
+
pass
@@ -951,15 +960,16 @@ class Hooks(object):
def hook(self, *hook_names):
"""Decorator, registering them as hooks"""
+
def wrapper(decorated):
for hook_name in hook_names:
self.register(hook_name, decorated)
else:
self.register(decorated.__name__, decorated)
- if '_' in decorated.__name__:
- self.register(
- decorated.__name__.replace('_', '-'), decorated)
+ if "_" in decorated.__name__:
+ self.register(decorated.__name__.replace("_", "-"), decorated)
return decorated
+
return wrapper
@@ -969,10 +979,10 @@ class NoNetworkBinding(Exception):
def charm_dir():
"""Return the root directory of the current charm"""
- d = os.environ.get('JUJU_CHARM_DIR')
+ d = os.environ.get("JUJU_CHARM_DIR")
if d is not None:
return d
- return os.environ.get('CHARM_DIR')
+ return os.environ.get("CHARM_DIR")
def cmd_exists(cmd):
@@ -992,26 +1002,26 @@ def action_get(key=None):
Gets the value of an action parameter, or all key/value param pairs.
"""
- cmd = ['action-get']
+ cmd = ["action-get"]
if key is not None:
cmd.append(key)
- cmd.append('--format=json')
- action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8'))
+ cmd.append("--format=json")
+ action_data = json.loads(subprocess.check_output(cmd).decode("UTF-8"))
return action_data
@cached
def function_get(key=None):
"""Gets the value of an action parameter, or all key/value param pairs"""
- cmd = ['function-get']
+ cmd = ["function-get"]
# Fallback for older charms.
- if not cmd_exists('function-get'):
- cmd = ['action-get']
+ if not cmd_exists("function-get"):
+ cmd = ["action-get"]
if key is not None:
cmd.append(key)
- cmd.append('--format=json')
- function_data = json.loads(subprocess.check_output(cmd).decode('UTF-8'))
+ cmd.append("--format=json")
+ function_data = json.loads(subprocess.check_output(cmd).decode("UTF-8"))
return function_data
@@ -1023,21 +1033,21 @@ def action_set(values):
Sets the values to be returned after the action finishes.
"""
- cmd = ['action-set']
+ cmd = ["action-set"]
for k, v in list(values.items()):
- cmd.append('{}={}'.format(k, v))
+ cmd.append("{}={}".format(k, v))
subprocess.check_call(cmd)
def function_set(values):
"""Sets the values to be returned after the function finishes"""
- cmd = ['function-set']
+ cmd = ["function-set"]
# Fallback for older charms.
- if not cmd_exists('function-get'):
- cmd = ['action-set']
+ if not cmd_exists("function-get"):
+ cmd = ["action-set"]
for k, v in list(values.items()):
- cmd.append('{}={}'.format(k, v))
+ cmd.append("{}={}".format(k, v))
subprocess.check_call(cmd)
@@ -1051,17 +1061,17 @@ def action_fail(message):
The results set by action_set are preserved.
"""
- subprocess.check_call(['action-fail', message])
+ subprocess.check_call(["action-fail", message])
def function_fail(message):
"""Sets the function status to failed and sets the error message.
The results set by function_set are preserved."""
- cmd = ['function-fail']
+ cmd = ["function-fail"]
# Fallback for older charms.
- if not cmd_exists('function-fail'):
- cmd = ['action-fail']
+ if not cmd_exists("function-fail"):
+ cmd = ["action-fail"]
cmd.append(message)
subprocess.check_call(cmd)
@@ -1069,32 +1079,32 @@ def function_fail(message):
def action_name():
"""Get the name of the currently executing action."""
- return os.environ.get('JUJU_ACTION_NAME')
+ return os.environ.get("JUJU_ACTION_NAME")
def function_name():
"""Get the name of the currently executing function."""
- return os.environ.get('JUJU_FUNCTION_NAME') or action_name()
+ return os.environ.get("JUJU_FUNCTION_NAME") or action_name()
def action_uuid():
"""Get the UUID of the currently executing action."""
- return os.environ.get('JUJU_ACTION_UUID')
+ return os.environ.get("JUJU_ACTION_UUID")
def function_id():
"""Get the ID of the currently executing function."""
- return os.environ.get('JUJU_FUNCTION_ID') or action_uuid()
+ return os.environ.get("JUJU_FUNCTION_ID") or action_uuid()
def action_tag():
"""Get the tag for the currently executing action."""
- return os.environ.get('JUJU_ACTION_TAG')
+ return os.environ.get("JUJU_ACTION_TAG")
def function_tag():
"""Get the tag for the currently executing function."""
- return os.environ.get('JUJU_FUNCTION_TAG') or action_tag()
+ return os.environ.get("JUJU_FUNCTION_TAG") or action_tag()
def status_set(workload_state, message, application=False):
@@ -1108,7 +1118,7 @@ def status_set(workload_state, message, application=False):
message -- status update message
application -- Whether this is an application state set
"""
- bad_state_msg = '{!r} is not a valid workload state'
+ bad_state_msg = "{!r} is not a valid workload state"
if isinstance(workload_state, str):
try:
@@ -1120,9 +1130,9 @@ def status_set(workload_state, message, application=False):
if workload_state not in WORKLOAD_STATES:
raise ValueError(bad_state_msg.format(workload_state))
- cmd = ['status-set']
+ cmd = ["status-set"]
if application:
- cmd.append('--application')
+ cmd.append("--application")
cmd.extend([workload_state.value, message])
try:
ret = subprocess.call(cmd)
@@ -1131,9 +1141,8 @@ def status_set(workload_state, message, application=False):
except OSError as e:
if e.errno != errno.ENOENT:
raise
- log_message = 'status-set failed: {} {}'.format(workload_state.value,
- message)
- log(log_message, level='INFO')
+ log_message = "status-set failed: {} {}".format(workload_state.value, message)
+ log(log_message, level="INFO")
def status_get():
@@ -1143,12 +1152,12 @@ def status_get():
return 'unknown', ""
"""
- cmd = ['status-get', "--format=json", "--include-data"]
+ cmd = ["status-get", "--format=json", "--include-data"]
try:
raw_status = subprocess.check_output(cmd)
except OSError as e:
if e.errno == errno.ENOENT:
- return ('unknown', "")
+ return ("unknown", "")
else:
raise
else:
@@ -1176,7 +1185,7 @@ def application_version_set(version):
for instance postgres version 9.5. It could also be a build number or
version control revision identifier, for instance git sha 6fb7ba68. """
- cmd = ['application-version-set']
+ cmd = ["application-version-set"]
cmd.append(version)
try:
subprocess.check_call(cmd)
@@ -1188,8 +1197,8 @@ def application_version_set(version):
@cached
def goal_state():
"""Juju goal state values"""
- cmd = ['goal-state', '--format=json']
- return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
+ cmd = ["goal-state", "--format=json"]
+ return json.loads(subprocess.check_output(cmd).decode("UTF-8"))
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
@@ -1198,15 +1207,15 @@ def is_leader():
Uses juju to determine whether the current unit is the leader of its peers
"""
- cmd = ['is-leader', '--format=json']
- return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
+ cmd = ["is-leader", "--format=json"]
+ return json.loads(subprocess.check_output(cmd).decode("UTF-8"))
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
def leader_get(attribute=None):
"""Juju leader get value(s)"""
- cmd = ['leader-get', '--format=json'] + [attribute or '-']
- return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
+ cmd = ["leader-get", "--format=json"] + [attribute or "-"]
+ return json.loads(subprocess.check_output(cmd).decode("UTF-8"))
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
@@ -1214,14 +1223,14 @@ def leader_set(settings=None, **kwargs):
"""Juju leader set value(s)"""
# Don't log secrets.
# log("Juju leader-set '%s'" % (settings), level=DEBUG)
- cmd = ['leader-set']
+ cmd = ["leader-set"]
settings = settings or {}
settings.update(kwargs)
for k, v in settings.items():
if v is None:
- cmd.append('{}='.format(k))
+ cmd.append("{}=".format(k))
else:
- cmd.append('{}={}'.format(k, v))
+ cmd.append("{}={}".format(k, v))
subprocess.check_call(cmd)
@@ -1229,7 +1238,7 @@ def leader_set(settings=None, **kwargs):
def payload_register(ptype, klass, pid):
""" is used while a hook is running to let Juju know that a
payload has been started."""
- cmd = ['payload-register']
+ cmd = ["payload-register"]
for x in [ptype, klass, pid]:
cmd.append(x)
subprocess.check_call(cmd)
@@ -1241,7 +1250,7 @@ def payload_unregister(klass, pid):
that a payload has been manually stopped. The <class> and <id> provided
must match a payload that has been previously registered with juju using
payload-register."""
- cmd = ['payload-unregister']
+ cmd = ["payload-unregister"]
for x in [klass, pid]:
cmd.append(x)
subprocess.check_call(cmd)
@@ -1253,7 +1262,7 @@ def payload_status_set(klass, pid, status):
The <class> and <id> provided must match a payload that has been previously
registered with juju using payload-register. The <status> must be one of the
follow: starting, started, stopping, stopped"""
- cmd = ['payload-status-set']
+ cmd = ["payload-status-set"]
for x in [klass, pid, status]:
cmd.append(x)
subprocess.check_call(cmd)
@@ -1270,9 +1279,9 @@ def resource_get(name):
if not name:
return False
- cmd = ['resource-get', name]
+ cmd = ["resource-get", name]
try:
- return subprocess.check_output(cmd).decode('UTF-8')
+ return subprocess.check_output(cmd).decode("UTF-8")
except subprocess.CalledProcessError:
return False
@@ -1281,9 +1290,8 @@ def resource_get(name):
def juju_version():
"""Full version string (eg. '1.23.3.1-trusty-amd64')"""
# Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1
- jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0]
- return subprocess.check_output([jujud, 'version'],
- universal_newlines=True).strip()
+ jujud = glob.glob("/var/lib/juju/tools/machine-*/jujud")[0]
+ return subprocess.check_output([jujud, "version"], universal_newlines=True).strip()
def has_juju_version(minimum_version):
@@ -1296,7 +1304,7 @@ _atstart = []
def atstart(callback, *args, **kwargs):
- '''Schedule a callback to run before the main hook.
+ """Schedule a callback to run before the main hook.
Callbacks are run in the order they were added.
@@ -1313,20 +1321,20 @@ def atstart(callback, *args, **kwargs):
your object is instantiated or module imported.
This is not at all useful after your hook framework as been launched.
- '''
+ """
global _atstart
_atstart.append((callback, args, kwargs))
def atexit(callback, *args, **kwargs):
- '''Schedule a callback to run on successful hook completion.
+ """Schedule a callback to run on successful hook completion.
- Callbacks are run in the reverse order that they were added.'''
+ Callbacks are run in the reverse order that they were added."""
_atexit.append((callback, args, kwargs))
def _run_atstart():
- '''Hook frameworks must invoke this before running the main hook body.'''
+ """Hook frameworks must invoke this before running the main hook body."""
global _atstart
for callback, args, kwargs in _atstart:
callback(*args, **kwargs)
@@ -1334,8 +1342,8 @@ def _run_atstart():
def _run_atexit():
- '''Hook frameworks must invoke this after the main hook body has
- successfully completed. Do not invoke it if the hook fails.'''
+ """Hook frameworks must invoke this after the main hook body has
+ successfully completed. Do not invoke it if the hook fails."""
global _atexit
for callback, args, kwargs in reversed(_atexit):
callback(*args, **kwargs)
@@ -1344,7 +1352,7 @@ def _run_atexit():
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
def network_get_primary_address(binding):
- '''
+ """
Deprecated since Juju 2.3; use network_get()
Retrieve the primary network address for a named binding
@@ -1352,16 +1360,17 @@ def network_get_primary_address(binding):
:param binding: string. The name of a relation of extra-binding
:return: string. The primary IP address for the named binding
:raise: NotImplementedError if run on Juju < 2.0
- '''
- cmd = ['network-get', '--primary-address', binding]
+ """
+ cmd = ["network-get", "--primary-address", binding]
try:
- response = subprocess.check_output(
- cmd,
- stderr=subprocess.STDOUT).decode('UTF-8').strip()
+ response = (
+ subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+ .decode("UTF-8")
+ .strip()
+ )
except CalledProcessError as e:
- if 'no network config found for binding' in e.output.decode('UTF-8'):
- raise NoNetworkBinding("No network binding for {}"
- .format(binding))
+ if "no network config found for binding" in e.output.decode("UTF-8"):
+ raise NoNetworkBinding("No network binding for {}".format(binding))
else:
raise
return response
@@ -1376,18 +1385,20 @@ def network_get(endpoint, relation_id=None):
:return: dict. The loaded YAML output of the network-get query.
:raise: NotImplementedError if request not supported by the Juju version.
"""
- if not has_juju_version('2.2'):
- raise NotImplementedError(juju_version()) # earlier versions require --primary-address
- if relation_id and not has_juju_version('2.3'):
+ if not has_juju_version("2.2"):
+ raise NotImplementedError(
+ juju_version()
+ ) # earlier versions require --primary-address
+ if relation_id and not has_juju_version("2.3"):
raise NotImplementedError # 2.3 added the -r option
- cmd = ['network-get', endpoint, '--format', 'yaml']
+ cmd = ["network-get", endpoint, "--format", "yaml"]
if relation_id:
- cmd.append('-r')
+ cmd.append("-r")
cmd.append(relation_id)
- response = subprocess.check_output(
- cmd,
- stderr=subprocess.STDOUT).decode('UTF-8').strip()
+ response = (
+ subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode("UTF-8").strip()
+ )
return yaml.safe_load(response)
@@ -1396,10 +1407,10 @@ def add_metric(*args, **kwargs):
metric names containing dashes, these may be expressed as one or more
'key=value' positional arguments. May only be called from the collect-metrics
hook."""
- _args = ['add-metric']
+ _args = ["add-metric"]
_kvpairs = []
_kvpairs.extend(args)
- _kvpairs.extend(['{}={}'.format(k, v) for k, v in kwargs.items()])
+ _kvpairs.extend(["{}={}".format(k, v) for k, v in kwargs.items()])
_args.extend(sorted(_kvpairs))
try:
subprocess.check_call(_args)
@@ -1407,19 +1418,19 @@ def add_metric(*args, **kwargs):
except EnvironmentError as e:
if e.errno != errno.ENOENT:
raise
- log_message = 'add-metric failed: {}'.format(' '.join(_kvpairs))
- log(log_message, level='INFO')
+ log_message = "add-metric failed: {}".format(" ".join(_kvpairs))
+ log(log_message, level="INFO")
def meter_status():
"""Get the meter status, if running in the meter-status-changed hook."""
- return os.environ.get('JUJU_METER_STATUS')
+ return os.environ.get("JUJU_METER_STATUS")
def meter_info():
"""Get the meter status information, if running in the meter-status-changed
hook."""
- return os.environ.get('JUJU_METER_INFO')
+ return os.environ.get("JUJU_METER_INFO")
def iter_units_for_relation_name(relation_name):
@@ -1435,7 +1446,7 @@ def iter_units_for_relation_name(relation_name):
:param relation_name: string relation name
:yield: Named Tuple with rid and unit field names
"""
- RelatedUnit = namedtuple('RelatedUnit', 'rid, unit')
+ RelatedUnit = namedtuple("RelatedUnit", "rid, unit")
for rid in relation_ids(relation_name):
for unit in related_units(rid):
yield RelatedUnit(rid, unit)
@@ -1471,8 +1482,7 @@ def ingress_address(rid=None, unit=None):
:return: string IP address
"""
settings = relation_get(rid=rid, unit=unit)
- return (settings.get('ingress-address') or
- settings.get('private-address'))
+ return settings.get("ingress-address") or settings.get("private-address")
def egress_subnets(rid=None, unit=None):
@@ -1495,20 +1505,21 @@ def egress_subnets(rid=None, unit=None):
:side effect: calls relation_get
:return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128']
"""
+
def _to_range(addr):
- if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None:
- addr += '/32'
- elif ':' in addr and '/' not in addr: # IPv6
- addr += '/128'
+ if re.search(r"^(?:\d{1,3}\.){3}\d{1,3}$", addr) is not None:
+ addr += "/32"
+ elif ":" in addr and "/" not in addr: # IPv6
+ addr += "/128"
return addr
settings = relation_get(rid=rid, unit=unit)
- if 'egress-subnets' in settings:
- return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()]
- if 'ingress-address' in settings:
- return [_to_range(settings['ingress-address'])]
- if 'private-address' in settings:
- return [_to_range(settings['private-address'])]
+ if "egress-subnets" in settings:
+ return [n.strip() for n in settings["egress-subnets"].split(",") if n.strip()]
+ if "ingress-address" in settings:
+ return [_to_range(settings["ingress-address"])]
+ if "private-address" in settings:
+ return [_to_range(settings["private-address"])]
return [] # Should never happen
@@ -1534,12 +1545,12 @@ def unit_doomed(unit=None):
if unit is None:
unit = local_unit()
gs = goal_state()
- units = gs.get('units', {})
+ units = gs.get("units", {})
if unit not in units:
return True
# I don't think 'dead' units ever show up in the goal-state, but
# check anyway in addition to 'dying'.
- return units[unit]['status'] in ('dying', 'dead')
+ return units[unit]["status"] in ("dying", "dead")
def env_proxy_settings(selected_settings=None):
@@ -1563,16 +1574,15 @@ def env_proxy_settings(selected_settings=None):
:rtype: Option(None, dict[str, str])
"""
SUPPORTED_SETTINGS = {
- 'http': 'HTTP_PROXY',
- 'https': 'HTTPS_PROXY',
- 'no_proxy': 'NO_PROXY',
- 'ftp': 'FTP_PROXY'
+ "http": "HTTP_PROXY",
+ "https": "HTTPS_PROXY",
+ "no_proxy": "NO_PROXY",
+ "ftp": "FTP_PROXY",
}
if selected_settings is None:
selected_settings = SUPPORTED_SETTINGS
- selected_vars = [v for k, v in SUPPORTED_SETTINGS.items()
- if k in selected_settings]
+ selected_vars = [v for k, v in SUPPORTED_SETTINGS.items() if k in selected_settings]
proxy_settings = {}
for var in selected_vars:
var_val = os.getenv(var)
@@ -1581,12 +1591,12 @@ def env_proxy_settings(selected_settings=None):
proxy_settings[var.lower()] = var_val
# Now handle juju-prefixed environment variables. The legacy vs new
# environment variable usage is mutually exclusive
- charm_var_val = os.getenv('JUJU_CHARM_{}'.format(var))
+ charm_var_val = os.getenv("JUJU_CHARM_{}".format(var))
if charm_var_val:
proxy_settings[var] = charm_var_val
proxy_settings[var.lower()] = charm_var_val
- if 'no_proxy' in proxy_settings:
- if _contains_range(proxy_settings['no_proxy']):
+ if "no_proxy" in proxy_settings:
+ if _contains_range(proxy_settings["no_proxy"]):
log(RANGE_WARNING, level=WARNING)
return proxy_settings if proxy_settings else None
@@ -1603,9 +1613,11 @@ def _contains_range(addresses):
"""
return (
# Test for cidr (e.g. 10.20.20.0/24)
- "/" in addresses or
+ "/" in addresses
+ or
# Test for wildcard domains (*.foo.com or .foo.com)
- "*" in addresses or
- addresses.startswith(".") or
- ",." in addresses or
- " ." in addresses)
+ "*" in addresses
+ or addresses.startswith(".")
+ or ",." in addresses
+ or " ." in addresses
+ )
diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py
index b33ac90..310efc1 100644
--- a/hooks/charmhelpers/core/host.py
+++ b/hooks/charmhelpers/core/host.py
@@ -47,7 +47,7 @@ if __platform__ == "ubuntu":
cmp_pkgrevno,
CompareHostReleases,
get_distrib_codename,
- arch
+ arch,
) # flake8: noqa -- ignore F401 for this import
elif __platform__ == "centos":
from charmhelpers.core.host_factory.centos import ( # NOQA:F401
@@ -58,7 +58,7 @@ elif __platform__ == "centos":
CompareHostReleases,
) # flake8: noqa -- ignore F401 for this import
-UPDATEDB_PATH = '/etc/updatedb.conf'
+UPDATEDB_PATH = "/etc/updatedb.conf"
def service_start(service_name, **kwargs):
@@ -84,7 +84,7 @@ def service_start(service_name, **kwargs):
parameters to the init system's commandline. kwargs
are ignored for systemd enabled systems.
"""
- return service('start', service_name, **kwargs)
+ return service("start", service_name, **kwargs)
def service_stop(service_name, **kwargs):
@@ -110,7 +110,7 @@ def service_stop(service_name, **kwargs):
parameters to the init system's commandline. kwargs
are ignored for systemd enabled systems.
"""
- return service('stop', service_name, **kwargs)
+ return service("stop", service_name, **kwargs)
def service_restart(service_name, **kwargs):
@@ -137,7 +137,7 @@ def service_restart(service_name, **kwargs):
are ignored for init systems not allowing additional
parameters via the commandline (systemd).
"""
- return service('restart', service_name)
+ return service("restart", service_name)
def service_reload(service_name, restart_on_failure=False, **kwargs):
@@ -167,14 +167,15 @@ def service_reload(service_name, restart_on_failure=False, **kwargs):
are ignored for init systems not allowing additional
parameters via the commandline (systemd).
"""
- service_result = service('reload', service_name, **kwargs)
+ service_result = service("reload", service_name, **kwargs)
if not service_result and restart_on_failure:
- service_result = service('restart', service_name, **kwargs)
+ service_result = service("restart", service_name, **kwargs)
return service_result
-def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d",
- **kwargs):
+def service_pause(
+ service_name, init_dir="/etc/init", initd_dir="/etc/init.d", **kwargs
+):
"""Pause a system service.
Stop it, and prevent it from starting again at boot.
@@ -194,25 +195,25 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d",
upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
sysv_file = os.path.join(initd_dir, service_name)
if init_is_systemd():
- service('disable', service_name)
- service('mask', service_name)
+ service("disable", service_name)
+ service("mask", service_name)
elif os.path.exists(upstart_file):
- override_path = os.path.join(
- init_dir, '{}.override'.format(service_name))
- with open(override_path, 'w') as fh:
+ override_path = os.path.join(init_dir, "{}.override".format(service_name))
+ with open(override_path, "w") as fh:
fh.write("manual\n")
elif os.path.exists(sysv_file):
subprocess.check_call(["update-rc.d", service_name, "disable"])
else:
raise ValueError(
"Unable to detect {0} as SystemD, Upstart {1} or"
- " SysV {2}".format(
- service_name, upstart_file, sysv_file))
+ " SysV {2}".format(service_name, upstart_file, sysv_file)
+ )
return stopped
-def service_resume(service_name, init_dir="/etc/init",
- initd_dir="/etc/init.d", **kwargs):
+def service_resume(
+ service_name, init_dir="/etc/init", initd_dir="/etc/init.d", **kwargs
+):
"""Resume a system service.
Reenable starting again at boot. Start the service.
@@ -228,11 +229,10 @@ def service_resume(service_name, init_dir="/etc/init",
upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
sysv_file = os.path.join(initd_dir, service_name)
if init_is_systemd():
- service('unmask', service_name)
- service('enable', service_name)
+ service("unmask", service_name)
+ service("enable", service_name)
elif os.path.exists(upstart_file):
- override_path = os.path.join(
- init_dir, '{}.override'.format(service_name))
+ override_path = os.path.join(init_dir, "{}.override".format(service_name))
if os.path.exists(override_path):
os.unlink(override_path)
elif os.path.exists(sysv_file):
@@ -240,8 +240,8 @@ def service_resume(service_name, init_dir="/etc/init",
else:
raise ValueError(
"Unable to detect {0} as SystemD, Upstart {1} or"
- " SysV {2}".format(
- service_name, upstart_file, sysv_file))
+ " SysV {2}".format(service_name, upstart_file, sysv_file)
+ )
started = service_running(service_name, **kwargs)
if not started:
@@ -258,11 +258,11 @@ def service(action, service_name, **kwargs):
the form of key=value.
"""
if init_is_systemd():
- cmd = ['systemctl', action, service_name]
+ cmd = ["systemctl", action, service_name]
else:
- cmd = ['service', service_name, action]
+ cmd = ["service", service_name, action]
for key, value in six.iteritems(kwargs):
- parameter = '%s=%s' % (key, value)
+ parameter = "%s=%s" % (key, value)
cmd.append(parameter)
return subprocess.call(cmd) == 0
@@ -282,45 +282,55 @@ def service_running(service_name, **kwargs):
are ignored in systemd services.
"""
if init_is_systemd():
- return service('is-active', service_name)
+ return service("is-active", service_name)
else:
if os.path.exists(_UPSTART_CONF.format(service_name)):
try:
- cmd = ['status', service_name]
+ cmd = ["status", service_name]
for key, value in six.iteritems(kwargs):
- parameter = '%s=%s' % (key, value)
+ parameter = "%s=%s" % (key, value)
cmd.append(parameter)
- output = subprocess.check_output(
- cmd, stderr=subprocess.STDOUT).decode('UTF-8')
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode(
+ "UTF-8"
+ )
except subprocess.CalledProcessError:
return False
else:
# This works for upstart scripts where the 'service' command
# returns a consistent string to represent running
# 'start/running'
- if ("start/running" in output or
- "is running" in output or
- "up and running" in output):
+ if (
+ "start/running" in output
+ or "is running" in output
+ or "up and running" in output
+ ):
return True
elif os.path.exists(_INIT_D_CONF.format(service_name)):
# Check System V scripts init script return codes
- return service('status', service_name)
+ return service("status", service_name)
return False
-SYSTEMD_SYSTEM = '/run/systemd/system'
+SYSTEMD_SYSTEM = "/run/systemd/system"
def init_is_systemd():
"""Return True if the host system uses systemd, False otherwise."""
- if lsb_release()['DISTRIB_CODENAME'] == 'trusty':
+ if lsb_release()["DISTRIB_CODENAME"] == "trusty":
return False
return os.path.isdir(SYSTEMD_SYSTEM)
-def adduser(username, password=None, shell='/bin/bash',
- system_user=False, primary_group=None,
- secondary_groups=None, uid=None, home_dir=None):
+def adduser(
+ username,
+ password=None,
+ shell="/bin/bash",
+ system_user=False,
+ primary_group=None,
+ secondary_groups=None,
+ uid=None,
+ home_dir=None,
+):
"""Add a user to the system.
Will log but otherwise succeed if the user already exists.
@@ -338,25 +348,21 @@ def adduser(username, password=None, shell='/bin/bash',
"""
try:
user_info = pwd.getpwnam(username)
- log('user {0} already exists!'.format(username))
+ log("user {0} already exists!".format(username))
if uid:
user_info = pwd.getpwuid(int(uid))
- log('user with uid {0} already exists!'.format(uid))
+ log("user with uid {0} already exists!".format(uid))
except KeyError:
- log('creating user {0}'.format(username))
- cmd = ['useradd']
+ log("creating user {0}".format(username))
+ cmd = ["useradd"]
if uid:
- cmd.extend(['--uid', str(uid)])
+ cmd.extend(["--uid", str(uid)])
if home_dir:
- cmd.extend(['--home', str(home_dir)])
+ cmd.extend(["--home", str(home_dir)])
if system_user or password is None:
- cmd.append('--system')
+ cmd.append("--system")
else:
- cmd.extend([
- '--create-home',
- '--shell', shell,
- '--password', password,
- ])
+ cmd.extend(["--create-home", "--shell", shell, "--password", password])
if not primary_group:
try:
grp.getgrnam(username)
@@ -364,9 +370,9 @@ def adduser(username, password=None, shell='/bin/bash',
except KeyError:
pass
if primary_group:
- cmd.extend(['-g', primary_group])
+ cmd.extend(["-g", primary_group])
if secondary_groups:
- cmd.extend(['-G', ','.join(secondary_groups)])
+ cmd.extend(["-G", ",".join(secondary_groups)])
cmd.append(username)
subprocess.check_call(cmd)
user_info = pwd.getpwnam(username)
@@ -426,12 +432,12 @@ def add_group(group_name, system_group=False, gid=None):
"""
try:
group_info = grp.getgrnam(group_name)
- log('group {0} already exists!'.format(group_name))
+ log("group {0} already exists!".format(group_name))
if gid:
group_info = grp.getgrgid(gid)
- log('group with gid {0} already exists!'.format(gid))
+ log("group with gid {0} already exists!".format(gid))
except KeyError:
- log('creating group {0}'.format(group_name))
+ log("creating group {0}".format(group_name))
add_new_group(group_name, system_group, gid)
group_info = grp.getgrnam(group_name)
return group_info
@@ -439,13 +445,21 @@ def add_group(group_name, system_group=False, gid=None):
def add_user_to_group(username, group):
"""Add a user to a group"""
- cmd = ['gpasswd', '-a', username, group]
+ cmd = ["gpasswd", "-a", username, group]
log("Adding user {} to group {}".format(username, group))
subprocess.check_call(cmd)
-def chage(username, lastday=None, expiredate=None, inactive=None,
- mindays=None, maxdays=None, root=None, warndays=None):
+def chage(
+ username,
+ lastday=None,
+ expiredate=None,
+ inactive=None,
+ mindays=None,
+ maxdays=None,
+ root=None,
+ warndays=None,
+):
"""Change user password expiry information
:param str username: User to update
@@ -467,57 +481,55 @@ def chage(username, lastday=None, expiredate=None, inactive=None,
change is required
:raises subprocess.CalledProcessError: if call to chage fails
"""
- cmd = ['chage']
+ cmd = ["chage"]
if root:
- cmd.extend(['--root', root])
+ cmd.extend(["--root", root])
if lastday:
- cmd.extend(['--lastday', lastday])
+ cmd.extend(["--lastday", lastday])
if expiredate:
- cmd.extend(['--expiredate', expiredate])
+ cmd.extend(["--expiredate", expiredate])
if inactive:
- cmd.extend(['--inactive', inactive])
+ cmd.extend(["--inactive", inactive])
if mindays:
- cmd.extend(['--mindays', mindays])
+ cmd.extend(["--mindays", mindays])
if maxdays:
- cmd.extend(['--maxdays', maxdays])
+ cmd.extend(["--maxdays", maxdays])
if warndays:
- cmd.extend(['--warndays', warndays])
+ cmd.extend(["--warndays", warndays])
cmd.append(username)
subprocess.check_call(cmd)
-remove_password_expiry = functools.partial(chage, expiredate='-1', inactive='-1', mindays='0', maxdays='-1')
+remove_password_expiry = functools.partial(
+ chage, expiredate="-1", inactive="-1", mindays="0", maxdays="-1"
+)
-def rsync(from_path, to_path, flags='-r', options=None, timeout=None):
+def rsync(from_path, to_path, flags="-r", options=None, timeout=None):
"""Replicate the contents of a path"""
- options = options or ['--delete', '--executability']
- cmd = ['/usr/bin/rsync', flags]
+ options = options or ["--delete", "--executability"]
+ cmd = ["/usr/bin/rsync", flags]
if timeout:
- cmd = ['timeout', str(timeout)] + cmd
+ cmd = ["timeout", str(timeout)] + cmd
cmd.extend(options)
cmd.append(from_path)
cmd.append(to_path)
log(" ".join(cmd))
- return subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8').strip()
+ return (
+ subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode("UTF-8").strip()
+ )
def symlink(source, destination):
"""Create a symbolic link"""
log("Symlinking {} as {}".format(source, destination))
- cmd = [
- 'ln',
- '-sf',
- source,
- destination,
- ]
+ cmd = ["ln", "-sf", source, destination]
subprocess.check_call(cmd)
-def mkdir(path, owner='root', group='root', perms=0o555, force=False):
+def mkdir(path, owner="root", group="root", perms=0o555, force=False):
"""Create a directory"""
- log("Making dir {} {}:{} {:o}".format(path, owner, group,
- perms))
+ log("Making dir {} {}:{} {:o}".format(path, owner, group, perms))
uid = pwd.getpwnam(owner).pw_uid
gid = grp.getgrnam(group).gr_gid
realpath = os.path.abspath(path)
@@ -533,7 +545,7 @@ def mkdir(path, owner='root', group='root', perms=0o555, force=False):
os.chmod(realpath, perms)
-def write_file(path, content, owner='root', group='root', perms=0o444):
+def write_file(path, content, owner="root", group="root", perms=0o444):
"""Create or overwrite a file with the contents of a byte string."""
uid = pwd.getpwnam(owner).pw_uid
gid = grp.getgrnam(group).gr_gid
@@ -542,37 +554,50 @@ def write_file(path, content, owner='root', group='root', perms=0o444):
existing_content = None
existing_uid, existing_gid, existing_perms = None, None, None
try:
- with open(path, 'rb') as target:
+ with open(path, "rb") as target:
existing_content = target.read()
stat = os.stat(path)
existing_uid, existing_gid, existing_perms = (
- stat.st_uid, stat.st_gid, stat.st_mode
+ stat.st_uid,
+ stat.st_gid,
+ stat.st_mode,
)
except Exception:
pass
if content != existing_content:
- log("Writing file {} {}:{} {:o}".format(path, owner, group, perms),
- level=DEBUG)
- with open(path, 'wb') as target:
+ log("Writing file {} {}:{} {:o}".format(path, owner, group, perms), level=DEBUG)
+ with open(path, "wb") as target:
os.fchown(target.fileno(), uid, gid)
os.fchmod(target.fileno(), perms)
if six.PY3 and isinstance(content, six.string_types):
- content = content.encode('UTF-8')
+ content = content.encode("UTF-8")
target.write(content)
return
# the contents were the same, but we might still need to change the
# ownership or permissions.
if existing_uid != uid:
- log("Changing uid on already existing content: {} -> {}"
- .format(existing_uid, uid), level=DEBUG)
+ log(
+ "Changing uid on already existing content: {} -> {}".format(
+ existing_uid, uid
+ ),
+ level=DEBUG,
+ )
os.chown(path, uid, -1)
if existing_gid != gid:
- log("Changing gid on already existing content: {} -> {}"
- .format(existing_gid, gid), level=DEBUG)
+ log(
+ "Changing gid on already existing content: {} -> {}".format(
+ existing_gid, gid
+ ),
+ level=DEBUG,
+ )
os.chown(path, -1, gid)
if existing_perms != perms:
- log("Changing permissions on existing content: {} -> {}"
- .format(existing_perms, perms), level=DEBUG)
+ log(
+ "Changing permissions on existing content: {} -> {}".format(
+ existing_perms, perms
+ ),
+ level=DEBUG,
+ )
os.chmod(path, perms)
@@ -588,14 +613,14 @@ def fstab_add(dev, mp, fs, options=None):
def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
"""Mount a filesystem at a particular mountpoint"""
- cmd_args = ['mount']
+ cmd_args = ["mount"]
if options is not None:
- cmd_args.extend(['-o', options])
+ cmd_args.extend(["-o", options])
cmd_args.extend([device, mountpoint])
try:
subprocess.check_output(cmd_args)
except subprocess.CalledProcessError as e:
- log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
+ log("Error mounting {} at {}\n{}".format(device, mountpoint, e.output))
return False
if persist:
@@ -605,11 +630,11 @@ def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
def umount(mountpoint, persist=False):
"""Unmount a filesystem"""
- cmd_args = ['umount', mountpoint]
+ cmd_args = ["umount", mountpoint]
try:
subprocess.check_output(cmd_args)
except subprocess.CalledProcessError as e:
- log('Error unmounting {}\n{}'.format(mountpoint, e.output))
+ log("Error unmounting {}\n{}".format(mountpoint, e.output))
return False
if persist:
@@ -619,25 +644,24 @@ def umount(mountpoint, persist=False):
def mounts():
"""Get a list of all mounted volumes as [[mountpoint,device],[...]]"""
- with open('/proc/mounts') as f:
+ with open("/proc/mounts") as f:
# [['/mount/point','/dev/path'],[...]]
- system_mounts = [m[1::-1] for m in [l.strip().split()
- for l in f.readlines()]]
+ system_mounts = [m[1::-1] for m in [l.strip().split() for l in f.readlines()]]
return system_mounts
def fstab_mount(mountpoint):
"""Mount filesystem using fstab"""
- cmd_args = ['mount', mountpoint]
+ cmd_args = ["mount", mountpoint]
try:
subprocess.check_output(cmd_args)
except subprocess.CalledProcessError as e:
- log('Error unmounting {}\n{}'.format(mountpoint, e.output))
+ log("Error unmounting {}\n{}".format(mountpoint, e.output))
return False
return True
-def file_hash(path, hash_type='md5'):
+def file_hash(path, hash_type="md5"):
"""Generate a hash checksum of the contents of 'path' or None if not found.
:param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
@@ -645,7 +669,7 @@ def file_hash(path, hash_type='md5'):
"""
if os.path.exists(path):
h = getattr(hashlib, hash_type)()
- with open(path, 'rb') as source:
+ with open(path, "rb") as source:
h.update(source.read())
return h.hexdigest()
else:
@@ -660,13 +684,10 @@ def path_hash(path):
:return: dict: A { filename: hash } dictionary for all matched files.
Empty if none found.
"""
- return {
- filename: file_hash(filename)
- for filename in glob.iglob(path)
- }
+ return {filename: file_hash(filename) for filename in glob.iglob(path)}
-def check_hash(path, checksum, hash_type='md5'):
+def check_hash(path, checksum, hash_type="md5"):
"""Validate a file using a cryptographic checksum.
:param str checksum: Value of the checksum used to validate the file.
@@ -683,6 +704,7 @@ def check_hash(path, checksum, hash_type='md5'):
class ChecksumError(ValueError):
"""A class derived from Value error to indicate the checksum failed."""
+
pass
@@ -711,18 +733,22 @@ def restart_on_change(restart_map, stopstart=False, restart_functions=None):
{svc: func, ...}
@returns result from decorated function
"""
+
def wrap(f):
@functools.wraps(f)
def wrapped_f(*args, **kwargs):
return restart_on_change_helper(
- (lambda: f(*args, **kwargs)), restart_map, stopstart,
- restart_functions)
+ (lambda: f(*args, **kwargs)), restart_map, stopstart, restart_functions
+ )
+
return wrapped_f
+
return wrap
-def restart_on_change_helper(lambda_f, restart_map, stopstart=False,
- restart_functions=None):
+def restart_on_change_helper(
+ lambda_f, restart_map, stopstart=False, restart_functions=None
+):
"""Helper function to perform the restart_on_change function.
This is provided for decorators to restart services if files described
@@ -740,13 +766,13 @@ def restart_on_change_helper(lambda_f, restart_map, stopstart=False,
checksums = {path: path_hash(path) for path in restart_map}
r = lambda_f()
# create a list of lists of the services to restart
- restarts = [restart_map[path]
- for path in restart_map
- if path_hash(path) != checksums[path]]
+ restarts = [
+ restart_map[path] for path in restart_map if path_hash(path) != checksums[path]
+ ]
# create a flat list of ordered services without duplicates from lists
services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts)))
if services_list:
- actions = ('stop', 'start') if stopstart else ('restart',)
+ actions = ("stop", "start") if stopstart else ("restart",)
for service_name in services_list:
if service_name in restart_functions:
restart_functions[service_name](service_name)
@@ -762,23 +788,22 @@ def pwgen(length=None):
# A random length is ok to use a weak PRNG
length = random.choice(range(35, 45))
alphanumeric_chars = [
- l for l in (string.ascii_letters + string.digits)
- if l not in 'l0QD1vAEIOUaeiou']
+ l for l in (string.ascii_letters + string.digits) if l not in "l0QD1vAEIOUaeiou"
+ ]
# Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the
# actual password
random_generator = random.SystemRandom()
- random_chars = [
- random_generator.choice(alphanumeric_chars) for _ in range(length)]
- return(''.join(random_chars))
+ random_chars = [random_generator.choice(alphanumeric_chars) for _ in range(length)]
+ return "".join(random_chars)
def is_phy_iface(interface):
"""Returns True if interface is not virtual, otherwise False."""
if interface:
- sys_net = '/sys/class/net'
+ sys_net = "/sys/class/net"
if os.path.isdir(sys_net):
- for iface in glob.glob(os.path.join(sys_net, '*')):
- if '/virtual/' in os.path.realpath(iface):
+ for iface in glob.glob(os.path.join(sys_net, "*")):
+ if "/virtual/" in os.path.realpath(iface):
continue
if interface == os.path.basename(iface):
@@ -793,16 +818,16 @@ def get_bond_master(interface):
NOTE: the provided interface is expected to be physical
"""
if interface:
- iface_path = '/sys/class/net/%s' % (interface)
+ iface_path = "/sys/class/net/%s" % (interface)
if os.path.exists(iface_path):
- if '/virtual/' in os.path.realpath(iface_path):
+ if "/virtual/" in os.path.realpath(iface_path):
return None
- master = os.path.join(iface_path, 'master')
+ master = os.path.join(iface_path, "master")
if os.path.exists(master):
master = os.path.realpath(master)
# make sure it is a bond master
- if os.path.exists(os.path.join(master, 'bonding')):
+ if os.path.exists(os.path.join(master, "bonding")):
return os.path.basename(master)
return None
@@ -818,14 +843,15 @@ def list_nics(nic_type=None):
interfaces = []
if nic_type:
for int_type in int_types:
- cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
- ip_output = subprocess.check_output(cmd).decode('UTF-8')
- ip_output = ip_output.split('\n')
+ cmd = ["ip", "addr", "show", "label", int_type + "*"]
+ ip_output = subprocess.check_output(cmd).decode("UTF-8")
+ ip_output = ip_output.split("\n")
ip_output = (line for line in ip_output if line)
for line in ip_output:
if line.split()[1].startswith(int_type):
- matched = re.search('.*: (' + int_type +
- r'[0-9]+\.[0-9]+)@.*', line)
+ matched = re.search(
+ ".*: (" + int_type + r"[0-9]+\.[0-9]+)@.*", line
+ )
if matched:
iface = matched.groups()[0]
else:
@@ -834,11 +860,11 @@ def list_nics(nic_type=None):
if iface not in interfaces:
interfaces.append(iface)
else:
- cmd = ['ip', 'a']
- ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
+ cmd = ["ip", "a"]
+ ip_output = subprocess.check_output(cmd).decode("UTF-8").split("\n")
ip_output = (line.strip() for line in ip_output if line)
- key = re.compile(r'^[0-9]+:\s+(.+):')
+ key = re.compile(r"^[0-9]+:\s+(.+):")
for line in ip_output:
matched = re.search(key, line)
if matched:
@@ -852,30 +878,30 @@ def list_nics(nic_type=None):
def set_nic_mtu(nic, mtu):
"""Set the Maximum Transmission Unit (MTU) on a network interface."""
- cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
+ cmd = ["ip", "link", "set", nic, "mtu", mtu]
subprocess.check_call(cmd)
def get_nic_mtu(nic):
"""Return the Maximum Transmission Unit (MTU) for a network interface."""
- cmd = ['ip', 'addr', 'show', nic]
- ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
+ cmd = ["ip", "addr", "show", nic]
+ ip_output = subprocess.check_output(cmd).decode("UTF-8").split("\n")
mtu = ""
for line in ip_output:
words = line.split()
- if 'mtu' in words:
+ if "mtu" in words:
mtu = words[words.index("mtu") + 1]
return mtu
def get_nic_hwaddr(nic):
"""Return the Media Access Control (MAC) for a network interface."""
- cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
- ip_output = subprocess.check_output(cmd).decode('UTF-8')
+ cmd = ["ip", "-o", "-0", "addr", "show", nic]
+ ip_output = subprocess.check_output(cmd).decode("UTF-8")
hwaddr = ""
words = ip_output.split()
- if 'link/ether' in words:
- hwaddr = words[words.index('link/ether') + 1]
+ if "link/ether" in words:
+ hwaddr = words[words.index("link/ether") + 1]
return hwaddr
@@ -955,17 +981,17 @@ def get_total_ram():
This is what is reported by the OS, and may be overcommitted when
there are multiple containers hosted on the same machine.
"""
- with open('/proc/meminfo', 'r') as f:
+ with open("/proc/meminfo", "r") as f:
for line in f.readlines():
if line:
key, value, unit = line.split()
- if key == 'MemTotal:':
- assert unit == 'kB', 'Unknown unit'
+ if key == "MemTotal:":
+ assert unit == "kB", "Unknown unit"
return int(value) * 1024 # Classic, not KiB.
raise NotImplementedError()
-UPSTART_CONTAINER_TYPE = '/run/container_type'
+UPSTART_CONTAINER_TYPE = "/run/container_type"
def is_container():
@@ -975,8 +1001,7 @@ def is_container():
"""
if init_is_systemd():
# Detect using systemd-detect-virt
- return subprocess.call(['systemd-detect-virt',
- '--container']) == 0
+ return subprocess.call(["systemd-detect-virt", "--container"]) == 0
else:
# Detect using upstart container file marker
return os.path.exists(UPSTART_CONTAINER_TYPE)
@@ -997,7 +1022,7 @@ def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH):
# the local system
return
- with open(updatedb_path, 'r+') as f_id:
+ with open(updatedb_path, "r+") as f_id:
updatedb_text = f_id.read()
output = updatedb(updatedb_text, path)
f_id.seek(0)
@@ -1009,11 +1034,11 @@ def updatedb(updatedb_text, new_path):
lines = [line for line in updatedb_text.split("\n")]
for i, line in enumerate(lines):
if line.startswith("PRUNEPATHS="):
- paths_line = line.split("=")[1].replace('"', '')
+ paths_line = line.split("=")[1].replace('"', "")
paths = paths_line.split(" ")
if new_path not in paths:
paths.append(new_path)
- lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths))
+ lines[i] = 'PRUNEPATHS="{}"'.format(" ".join(paths))
output = "\n".join(lines)
return output
@@ -1045,7 +1070,7 @@ def modulo_distribution(modulo=3, wait=30, non_zero_wait=False):
leader nodes which are often given priority.
@return: int Calculated time to wait for unit operation
"""
- unit_number = int(local_unit().split('/')[1])
+ unit_number = int(local_unit().split("/")[1])
calculated_wait_time = (unit_number % modulo) * wait
if non_zero_wait and calculated_wait_time == 0:
return modulo * wait
@@ -1065,16 +1090,16 @@ def install_ca_cert(ca_cert, name=None):
if not ca_cert:
return
if not isinstance(ca_cert, bytes):
- ca_cert = ca_cert.encode('utf8')
+ ca_cert = ca_cert.encode("utf8")
if not name:
- name = 'juju-{}'.format(charm_name())
- cert_file = '/usr/local/share/ca-certificates/{}.crt'.format(name)
+ name = "juju-{}".format(charm_name())
+ cert_file = "/usr/local/share/ca-certificates/{}.crt".format(name)
new_hash = hashlib.md5(ca_cert).hexdigest()
if file_hash(cert_file) == new_hash:
return
log("Installing new CA cert at: {}".format(cert_file), level=INFO)
write_file(cert_file, ca_cert)
- subprocess.check_call(['update-ca-certificates', '--fresh'])
+ subprocess.check_call(["update-ca-certificates", "--fresh"])
def get_system_env(key, default=None):
@@ -1088,16 +1113,15 @@ def get_system_env(key, default=None):
:rtype: any
:raises: subprocess.CalledProcessError
"""
- env_file = '/etc/environment'
+ env_file = "/etc/environment"
# use the shell and env(1) to parse the global environments file. This is
# done to get the correct result even if the user has shell variable
# substitutions or other shell logic in that file.
output = subprocess.check_output(
- ['env', '-i', '/bin/bash', '-c',
- 'set -a && source {} && env'.format(env_file)],
- universal_newlines=True)
- for k, v in (line.split('=', 1)
- for line in output.splitlines() if '=' in line):
+ ["env", "-i", "/bin/bash", "-c", "set -a && source {} && env".format(env_file)],
+ universal_newlines=True,
+ )
+ for k, v in (line.split("=", 1) for line in output.splitlines() if "=" in line):
if k == key:
return v
else:
diff --git a/hooks/charmhelpers/core/host_factory/centos.py b/hooks/charmhelpers/core/host_factory/centos.py
index 7781a39..8bba7fd 100644
--- a/hooks/charmhelpers/core/host_factory/centos.py
+++ b/hooks/charmhelpers/core/host_factory/centos.py
@@ -15,25 +15,24 @@ class CompareHostReleases(BasicStringComparator):
"""
def __init__(self, item):
- raise NotImplementedError(
- "CompareHostReleases() is not implemented for CentOS")
+ raise NotImplementedError("CompareHostReleases() is not implemented for CentOS")
def service_available(service_name):
# """Determine whether a system service is available."""
- if os.path.isdir('/run/systemd/system'):
- cmd = ['systemctl', 'is-enabled', service_name]
+ if os.path.isdir("/run/systemd/system"):
+ cmd = ["systemctl", "is-enabled", service_name]
else:
- cmd = ['service', service_name, 'is-enabled']
+ cmd = ["service", service_name, "is-enabled"]
return subprocess.call(cmd) == 0
def add_new_group(group_name, system_group=False, gid=None):
- cmd = ['groupadd']
+ cmd = ["groupadd"]
if gid:
- cmd.extend(['--gid', str(gid)])
+ cmd.extend(["--gid", str(gid)])
if system_group:
- cmd.append('-r')
+ cmd.append("-r")
cmd.append(group_name)
subprocess.check_call(cmd)
@@ -41,9 +40,9 @@ def add_new_group(group_name, system_group=False, gid=None):
def lsb_release():
"""Return /etc/os-release in a dict."""
d = {}
- with open('/etc/os-release', 'r') as lsb:
+ with open("/etc/os-release", "r") as lsb:
for l in lsb:
- s = l.split('=')
+ s = l.split("=")
if len(s) != 2:
continue
d[s[0].strip()] = s[1].strip()
@@ -63,7 +62,7 @@ def cmp_pkgrevno(package, revno, pkgcache=None):
if not pkgcache:
y = yum.YumBase()
packages = y.doPackageLists()
- pkgcache = {i.Name: i.version for i in packages['installed']}
+ pkgcache = {i.Name: i.version for i in packages["installed"]}
pkg = pkgcache[package]
if pkg > revno:
return 1
diff --git a/hooks/charmhelpers/core/host_factory/ubuntu.py b/hooks/charmhelpers/core/host_factory/ubuntu.py
index 3edc068..e9e41b0 100644
--- a/hooks/charmhelpers/core/host_factory/ubuntu.py
+++ b/hooks/charmhelpers/core/host_factory/ubuntu.py
@@ -5,27 +5,27 @@ from charmhelpers.core.strutils import BasicStringComparator
UBUNTU_RELEASES = (
- 'lucid',
- 'maverick',
- 'natty',
- 'oneiric',
- 'precise',
- 'quantal',
- 'raring',
- 'saucy',
- 'trusty',
- 'utopic',
- 'vivid',
- 'wily',
- 'xenial',
- 'yakkety',
- 'zesty',
- 'artful',
- 'bionic',
- 'cosmic',
- 'disco',
- 'eoan',
- 'focal'
+ "lucid",
+ "maverick",
+ "natty",
+ "oneiric",
+ "precise",
+ "quantal",
+ "raring",
+ "saucy",
+ "trusty",
+ "utopic",
+ "vivid",
+ "wily",
+ "xenial",
+ "yakkety",
+ "zesty",
+ "artful",
+ "bionic",
+ "cosmic",
+ "disco",
+ "eoan",
+ "focal",
)
@@ -37,6 +37,7 @@ class CompareHostReleases(BasicStringComparator):
if CompareHostReleases(release) > 'trusty':
# do something with mitaka
"""
+
_list = UBUNTU_RELEASES
@@ -44,24 +45,22 @@ def service_available(service_name):
"""Determine whether a system service is available"""
try:
subprocess.check_output(
- ['service', service_name, 'status'],
- stderr=subprocess.STDOUT).decode('UTF-8')
+ ["service", service_name, "status"], stderr=subprocess.STDOUT
+ ).decode("UTF-8")
except subprocess.CalledProcessError as e:
- return b'unrecognized service' not in e.output
+ return b"unrecognized service" not in e.output
else:
return True
def add_new_group(group_name, system_group=False, gid=None):
- cmd = ['addgroup']
+ cmd = ["addgroup"]
if gid:
- cmd.extend(['--gid', str(gid)])
+ cmd.extend(["--gid", str(gid)])
if system_group:
- cmd.append('--system')
+ cmd.append("--system")
else:
- cmd.extend([
- '--group',
- ])
+ cmd.extend(["--group"])
cmd.append(group_name)
subprocess.check_call(cmd)
@@ -69,9 +68,9 @@ def add_new_group(group_name, system_group=False, gid=None):
def lsb_release():
"""Return /etc/lsb-release in a dict"""
d = {}
- with open('/etc/lsb-release', 'r') as lsb:
+ with open("/etc/lsb-release", "r") as lsb:
for l in lsb:
- k, v = l.split('=')
+ k, v = l.split("=")
d[k.strip()] = v.strip()
return d
@@ -81,7 +80,7 @@ def get_distrib_codename():
:returns: The codename
:rtype: str
"""
- return lsb_release()['DISTRIB_CODENAME'].lower()
+ return lsb_release()["DISTRIB_CODENAME"].lower()
def cmp_pkgrevno(package, revno, pkgcache=None):
@@ -96,8 +95,10 @@ def cmp_pkgrevno(package, revno, pkgcache=None):
you call this function, or pass an apt_pkg.Cache() instance.
"""
from charmhelpers.fetch import apt_pkg
+
if not pkgcache:
from charmhelpers.fetch import apt_cache
+
pkgcache = apt_cache()
pkg = pkgcache[package]
return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
@@ -111,6 +112,8 @@ def arch():
:rtype: str
:raises: subprocess.CalledProcessError if dpkg command fails
"""
- return subprocess.check_output(
- ['dpkg', '--print-architecture']
- ).rstrip().decode('UTF-8')
+ return (
+ subprocess.check_output(["dpkg", "--print-architecture"])
+ .rstrip()
+ .decode("UTF-8")
+ )
diff --git a/hooks/charmhelpers/core/hugepage.py b/hooks/charmhelpers/core/hugepage.py
index 54b5b5e..36b9161 100644
--- a/hooks/charmhelpers/core/hugepage.py
+++ b/hooks/charmhelpers/core/hugepage.py
@@ -17,19 +17,21 @@
import yaml
from charmhelpers.core import fstab
from charmhelpers.core import sysctl
-from charmhelpers.core.host import (
- add_group,
- add_user_to_group,
- fstab_mount,
- mkdir,
-)
+from charmhelpers.core.host import add_group, add_user_to_group, fstab_mount, mkdir
from charmhelpers.core.strutils import bytes_from_string
from subprocess import check_output
-def hugepage_support(user, group='hugetlb', nr_hugepages=256,
- max_map_count=65536, mnt_point='/run/hugepages/kvm',
- pagesize='2MB', mount=True, set_shmmax=False):
+def hugepage_support(
+ user,
+ group="hugetlb",
+ nr_hugepages=256,
+ max_map_count=65536,
+ mnt_point="/run/hugepages/kvm",
+ pagesize="2MB",
+ mount=True,
+ set_shmmax=False,
+):
"""Enable hugepages on system.
Args:
@@ -47,23 +49,29 @@ def hugepage_support(user, group='hugetlb', nr_hugepages=256,
if max_map_count < 2 * nr_hugepages:
max_map_count = 2 * nr_hugepages
sysctl_settings = {
- 'vm.nr_hugepages': nr_hugepages,
- 'vm.max_map_count': max_map_count,
- 'vm.hugetlb_shm_group': gid,
+ "vm.nr_hugepages": nr_hugepages,
+ "vm.max_map_count": max_map_count,
+ "vm.hugetlb_shm_group": gid,
}
if set_shmmax:
- shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax']))
+ shmmax_current = int(check_output(["sysctl", "-n", "kernel.shmmax"]))
shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages
if shmmax_minsize > shmmax_current:
- sysctl_settings['kernel.shmmax'] = shmmax_minsize
- sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf')
- mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False)
+ sysctl_settings["kernel.shmmax"] = shmmax_minsize
+ sysctl.create(yaml.dump(sysctl_settings), "/etc/sysctl.d/10-hugepage.conf")
+ mkdir(mnt_point, owner="root", group="root", perms=0o755, force=False)
lfstab = fstab.Fstab()
- fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point)
+ fstab_entry = lfstab.get_entry_by_attr("mountpoint", mnt_point)
if fstab_entry:
lfstab.remove_entry(fstab_entry)
- entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs',
- 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0)
+ entry = lfstab.Entry(
+ "nodev",
+ mnt_point,
+ "hugetlbfs",
+ "mode=1770,gid={},pagesize={}".format(gid, pagesize),
+ 0,
+ 0,
+ )
lfstab.add_entry(entry)
if mount:
fstab_mount(mnt_point)
diff --git a/hooks/charmhelpers/core/kernel.py b/hooks/charmhelpers/core/kernel.py
index e01f4f8..e550b06 100644
--- a/hooks/charmhelpers/core/kernel.py
+++ b/hooks/charmhelpers/core/kernel.py
@@ -19,10 +19,7 @@ import re
import subprocess
from charmhelpers.osplatform import get_platform
-from charmhelpers.core.hookenv import (
- log,
- INFO
-)
+from charmhelpers.core.hookenv import log, INFO
__platform__ = get_platform()
if __platform__ == "ubuntu":
@@ -41,9 +38,9 @@ __author__ = "Jorge Niedbalski <jorge.niedbalski@xxxxxxxxxxxxx>"
def modprobe(module, persist=True):
"""Load a kernel module and configure for auto-load on reboot."""
- cmd = ['modprobe', module]
+ cmd = ["modprobe", module]
- log('Loading kernel module %s' % module, level=INFO)
+ log("Loading kernel module %s" % module, level=INFO)
subprocess.check_call(cmd)
if persist:
@@ -52,21 +49,20 @@ def modprobe(module, persist=True):
def rmmod(module, force=False):
"""Remove a module from the linux kernel"""
- cmd = ['rmmod']
+ cmd = ["rmmod"]
if force:
- cmd.append('-f')
+ cmd.append("-f")
cmd.append(module)
- log('Removing kernel module %s' % module, level=INFO)
+ log("Removing kernel module %s" % module, level=INFO)
return subprocess.check_call(cmd)
def lsmod():
"""Shows what kernel modules are currently loaded"""
- return subprocess.check_output(['lsmod'],
- universal_newlines=True)
+ return subprocess.check_output(["lsmod"], universal_newlines=True)
def is_module_loaded(module):
"""Checks if a kernel module is already loaded"""
- matches = re.findall('^%s[ ]+' % module, lsmod(), re.M)
+ matches = re.findall("^%s[ ]+" % module, lsmod(), re.M)
return len(matches) > 0
diff --git a/hooks/charmhelpers/core/kernel_factory/centos.py b/hooks/charmhelpers/core/kernel_factory/centos.py
index 1c402c1..c789df8 100644
--- a/hooks/charmhelpers/core/kernel_factory/centos.py
+++ b/hooks/charmhelpers/core/kernel_factory/centos.py
@@ -4,14 +4,14 @@ import os
def persistent_modprobe(module):
"""Load a kernel module and configure for auto-load on reboot."""
- if not os.path.exists('/etc/rc.modules'):
- open('/etc/rc.modules', 'a')
- os.chmod('/etc/rc.modules', 111)
- with open('/etc/rc.modules', 'r+') as modules:
+ if not os.path.exists("/etc/rc.modules"):
+ open("/etc/rc.modules", "a")
+ os.chmod("/etc/rc.modules", 111)
+ with open("/etc/rc.modules", "r+") as modules:
if module not in modules.read():
- modules.write('modprobe %s\n' % module)
+ modules.write("modprobe %s\n" % module)
-def update_initramfs(version='all'):
+def update_initramfs(version="all"):
"""Updates an initramfs image."""
return subprocess.check_call(["dracut", "-f", version])
diff --git a/hooks/charmhelpers/core/kernel_factory/ubuntu.py b/hooks/charmhelpers/core/kernel_factory/ubuntu.py
index 3de372f..49c680c 100644
--- a/hooks/charmhelpers/core/kernel_factory/ubuntu.py
+++ b/hooks/charmhelpers/core/kernel_factory/ubuntu.py
@@ -3,11 +3,11 @@ import subprocess
def persistent_modprobe(module):
"""Load a kernel module and configure for auto-load on reboot."""
- with open('/etc/modules', 'r+') as modules:
+ with open("/etc/modules", "r+") as modules:
if module not in modules.read():
modules.write(module + "\n")
-def update_initramfs(version='all'):
+def update_initramfs(version="all"):
"""Updates an initramfs image."""
return subprocess.check_call(["update-initramfs", "-k", version, "-u"])
diff --git a/hooks/charmhelpers/core/services/base.py b/hooks/charmhelpers/core/services/base.py
index 179ad4f..ee6b5c7 100644
--- a/hooks/charmhelpers/core/services/base.py
+++ b/hooks/charmhelpers/core/services/base.py
@@ -21,9 +21,16 @@ from charmhelpers.core import host
from charmhelpers.core import hookenv
-__all__ = ['ServiceManager', 'ManagerCallback',
- 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
- 'service_restart', 'service_stop']
+__all__ = [
+ "ServiceManager",
+ "ManagerCallback",
+ "PortManagerCallback",
+ "open_ports",
+ "close_ports",
+ "manage_ports",
+ "service_restart",
+ "service_stop",
+]
class ServiceManager(object):
@@ -115,11 +122,11 @@ class ServiceManager(object):
])
manager.manage()
"""
- self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
+ self._ready_file = os.path.join(hookenv.charm_dir(), "READY-SERVICES.json")
self._ready = None
self.services = OrderedDict()
for service in services or []:
- service_name = service['service']
+ service_name = service["service"]
self.services[service_name] = service
def manage(self):
@@ -129,7 +136,7 @@ class ServiceManager(object):
hookenv._run_atstart()
try:
hook_name = hookenv.hook_name()
- if hook_name == 'stop':
+ if hook_name == "stop":
self.stop_services()
else:
self.reconfigure_services()
@@ -163,12 +170,12 @@ class ServiceManager(object):
"""
for service_name, service in self.services.items():
service_ready = self.is_ready(service_name)
- for provider in service.get('provided_data', []):
+ for provider in service.get("provided_data", []):
for relid in hookenv.relation_ids(provider.name):
units = hookenv.related_units(relid)
if not units:
continue
- remote_service = units[0].split('/')[0]
+ remote_service = units[0].split("/")[0]
argspec = getargspec(provider.provide_data)
if len(argspec.args) > 1:
data = provider.provide_data(remote_service, service_ready)
@@ -186,17 +193,17 @@ class ServiceManager(object):
"""
for service_name in service_names or self.services.keys():
if self.is_ready(service_name):
- self.fire_event('data_ready', service_name)
- self.fire_event('start', service_name, default=[
- service_restart,
- manage_ports])
+ self.fire_event("data_ready", service_name)
+ self.fire_event(
+ "start", service_name, default=[service_restart, manage_ports]
+ )
self.save_ready(service_name)
else:
if self.was_ready(service_name):
- self.fire_event('data_lost', service_name)
- self.fire_event('stop', service_name, default=[
- manage_ports,
- service_stop])
+ self.fire_event("data_lost", service_name)
+ self.fire_event(
+ "stop", service_name, default=[manage_ports, service_stop]
+ )
self.save_lost(service_name)
def stop_services(self, *service_names):
@@ -206,9 +213,7 @@ class ServiceManager(object):
If no service names are given, stops all registered services.
"""
for service_name in service_names or self.services.keys():
- self.fire_event('stop', service_name, default=[
- manage_ports,
- service_stop])
+ self.fire_event("stop", service_name, default=[manage_ports, service_stop])
def get_service(self, service_name):
"""
@@ -216,7 +221,7 @@ class ServiceManager(object):
"""
service = self.services.get(service_name)
if not service:
- raise KeyError('Service not registered: %s' % service_name)
+ raise KeyError("Service not registered: %s" % service_name)
return service
def fire_event(self, event_name, service_name, default=None):
@@ -243,7 +248,7 @@ class ServiceManager(object):
if `bool(item)` evaluates as True.
"""
service = self.get_service(service_name)
- reqs = service.get('required_data', [])
+ reqs = service.get("required_data", [])
return all(bool(req) for req in reqs)
def _load_ready_file(self):
@@ -258,7 +263,7 @@ class ServiceManager(object):
def _save_ready_file(self):
if self._ready is None:
return
- with open(self._ready_file, 'w') as fp:
+ with open(self._ready_file, "w") as fp:
json.dump(list(self._ready), fp)
def save_ready(self, service_name):
@@ -296,6 +301,7 @@ class ManagerCallback(object):
* `service_name` The name of the service it's being triggered for
* `event_name` The name of the event that this callback is handling
"""
+
def __call__(self, manager, service_name, event_name):
raise NotImplementedError()
@@ -305,34 +311,35 @@ class PortManagerCallback(ManagerCallback):
Callback class that will open or close ports, for use as either
a start or stop action.
"""
+
def __call__(self, manager, service_name, event_name):
service = manager.get_service(service_name)
# turn this generator into a list,
# as we'll be going over it multiple times
- new_ports = list(service.get('ports', []))
- port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
+ new_ports = list(service.get("ports", []))
+ port_file = os.path.join(hookenv.charm_dir(), ".{}.ports".format(service_name))
if os.path.exists(port_file):
with open(port_file) as fp:
- old_ports = fp.read().split(',')
+ old_ports = fp.read().split(",")
for old_port in old_ports:
if bool(old_port) and not self.ports_contains(old_port, new_ports):
hookenv.close_port(old_port)
- with open(port_file, 'w') as fp:
- fp.write(','.join(str(port) for port in new_ports))
+ with open(port_file, "w") as fp:
+ fp.write(",".join(str(port) for port in new_ports))
for port in new_ports:
# A port is either a number or 'ICMP'
- protocol = 'TCP'
- if str(port).upper() == 'ICMP':
- protocol = 'ICMP'
- if event_name == 'start':
+ protocol = "TCP"
+ if str(port).upper() == "ICMP":
+ protocol = "ICMP"
+ if event_name == "start":
hookenv.open_port(port, protocol)
- elif event_name == 'stop':
+ elif event_name == "stop":
hookenv.close_port(port, protocol)
def ports_contains(self, port, ports):
if not bool(port):
return False
- if str(port).upper() != 'ICMP':
+ if str(port).upper() != "ICMP":
port = int(port)
return port in ports
diff --git a/hooks/charmhelpers/core/services/helpers.py b/hooks/charmhelpers/core/services/helpers.py
index 3e6e30d..849d84b 100644
--- a/hooks/charmhelpers/core/services/helpers.py
+++ b/hooks/charmhelpers/core/services/helpers.py
@@ -22,8 +22,7 @@ from charmhelpers.core import templating
from charmhelpers.core.services.base import ManagerCallback
-__all__ = ['RelationContext', 'TemplateCallback',
- 'render_template', 'template']
+__all__ = ["RelationContext", "TemplateCallback", "render_template", "template"]
class RelationContext(dict):
@@ -43,11 +42,12 @@ class RelationContext(dict):
:param str name: Override the relation :attr:`name`, since it can vary from charm to charm
:param list additional_required_keys: Extend the list of :attr:`required_keys`
"""
+
name = None
interface = None
def __init__(self, name=None, additional_required_keys=None):
- if not hasattr(self, 'required_keys'):
+ if not hasattr(self, "required_keys"):
self.required_keys = []
if name is not None:
@@ -73,7 +73,9 @@ class RelationContext(dict):
"""
ready = len(self.get(self.name, [])) > 0
if not ready:
- hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
+ hookenv.log(
+ "Incomplete relation: {}".format(self.__class__.__name__), hookenv.DEBUG
+ )
return ready
def _is_ready(self, unit_data):
@@ -134,11 +136,12 @@ class MysqlRelation(RelationContext):
:param str name: Override the relation :attr:`name`, since it can vary from charm to charm
:param list additional_required_keys: Extend the list of :attr:`required_keys`
"""
- name = 'db'
- interface = 'mysql'
+
+ name = "db"
+ interface = "mysql"
def __init__(self, *args, **kwargs):
- self.required_keys = ['host', 'user', 'password', 'database']
+ self.required_keys = ["host", "user", "password", "database"]
RelationContext.__init__(self, *args, **kwargs)
@@ -149,18 +152,16 @@ class HttpRelation(RelationContext):
:param str name: Override the relation :attr:`name`, since it can vary from charm to charm
:param list additional_required_keys: Extend the list of :attr:`required_keys`
"""
- name = 'website'
- interface = 'http'
+
+ name = "website"
+ interface = "http"
def __init__(self, *args, **kwargs):
- self.required_keys = ['host', 'port']
+ self.required_keys = ["host", "port"]
RelationContext.__init__(self, *args, **kwargs)
def provide_data(self):
- return {
- 'host': hookenv.unit_get('private-address'),
- 'port': 80,
- }
+ return {"host": hookenv.unit_get("private-address"), "port": 80}
class RequiredConfig(dict):
@@ -177,19 +178,19 @@ class RequiredConfig(dict):
def __init__(self, *args):
self.required_options = args
- self['config'] = hookenv.config()
- with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
- self.config = yaml.load(fp).get('options', {})
+ self["config"] = hookenv.config()
+ with open(os.path.join(hookenv.charm_dir(), "config.yaml")) as fp:
+ self.config = yaml.load(fp).get("options", {})
def __bool__(self):
for option in self.required_options:
- if option not in self['config']:
+ if option not in self["config"]:
return False
- current_value = self['config'][option]
- default_value = self.config[option].get('default')
+ current_value = self["config"][option]
+ default_value = self.config[option].get("default")
if current_value == default_value:
return False
- if current_value in (None, '') and default_value in (None, ''):
+ if current_value in (None, "") and default_value in (None, ""):
return False
return True
@@ -205,6 +206,7 @@ class StoredContext(dict):
will thereafter use the same value that was originally generated, instead
of generating a new value each time it is run.
"""
+
def __init__(self, file_name, config_data):
"""
If the file exists, populate `self` with the data from the file.
@@ -219,14 +221,14 @@ class StoredContext(dict):
def store_context(self, file_name, config_data):
if not os.path.isabs(file_name):
file_name = os.path.join(hookenv.charm_dir(), file_name)
- with open(file_name, 'w') as file_stream:
+ with open(file_name, "w") as file_stream:
os.fchmod(file_stream.fileno(), 0o600)
yaml.dump(config_data, file_stream)
def read_context(self, file_name):
if not os.path.isabs(file_name):
file_name = os.path.join(hookenv.charm_dir(), file_name)
- with open(file_name, 'r') as file_stream:
+ with open(file_name, "r") as file_stream:
data = yaml.load(file_stream)
if not data:
raise OSError("%s is empty" % file_name)
@@ -251,9 +253,17 @@ class TemplateCallback(ManagerCallback):
:return str: The rendered template
"""
- def __init__(self, source, target,
- owner='root', group='root', perms=0o444,
- on_change_action=None, template_loader=None):
+
+ def __init__(
+ self,
+ source,
+ target,
+ owner="root",
+ group="root",
+ perms=0o444,
+ on_change_action=None,
+ template_loader=None,
+ ):
self.source = source
self.target = target
self.owner = owner
@@ -263,23 +273,27 @@ class TemplateCallback(ManagerCallback):
self.template_loader = template_loader
def __call__(self, manager, service_name, event_name):
- pre_checksum = ''
+ pre_checksum = ""
if self.on_change_action and os.path.isfile(self.target):
pre_checksum = host.file_hash(self.target)
service = manager.get_service(service_name)
- context = {'ctx': {}}
- for ctx in service.get('required_data', []):
+ context = {"ctx": {}}
+ for ctx in service.get("required_data", []):
context.update(ctx)
- context['ctx'].update(ctx)
-
- result = templating.render(self.source, self.target, context,
- self.owner, self.group, self.perms,
- template_loader=self.template_loader)
+ context["ctx"].update(ctx)
+
+ result = templating.render(
+ self.source,
+ self.target,
+ context,
+ self.owner,
+ self.group,
+ self.perms,
+ template_loader=self.template_loader,
+ )
if self.on_change_action:
if pre_checksum == host.file_hash(self.target):
- hookenv.log(
- 'No change detected: {}'.format(self.target),
- hookenv.DEBUG)
+ hookenv.log("No change detected: {}".format(self.target), hookenv.DEBUG)
else:
self.on_change_action()
diff --git a/hooks/charmhelpers/core/strutils.py b/hooks/charmhelpers/core/strutils.py
index e8df045..5288018 100644
--- a/hooks/charmhelpers/core/strutils.py
+++ b/hooks/charmhelpers/core/strutils.py
@@ -32,9 +32,9 @@ def bool_from_string(value):
value = value.strip().lower()
- if value in ['y', 'yes', 'true', 't', 'on']:
+ if value in ["y", "yes", "true", "t", "on"]:
return True
- elif value in ['n', 'no', 'false', 'f', 'off']:
+ elif value in ["n", "no", "false", "f", "off"]:
return False
msg = "Unable to interpret string value '%s' as boolean" % (value)
@@ -47,16 +47,16 @@ def bytes_from_string(value):
Returns int
"""
BYTE_POWER = {
- 'K': 1,
- 'KB': 1,
- 'M': 2,
- 'MB': 2,
- 'G': 3,
- 'GB': 3,
- 'T': 4,
- 'TB': 4,
- 'P': 5,
- 'PB': 5,
+ "K": 1,
+ "KB": 1,
+ "M": 2,
+ "MB": 2,
+ "G": 3,
+ "GB": 3,
+ "T": 4,
+ "TB": 4,
+ "P": 5,
+ "PB": 5,
}
if isinstance(value, six.string_types):
value = six.text_type(value)
@@ -91,8 +91,7 @@ class BasicStringComparator(object):
try:
self.index = self._list.index(item)
except Exception:
- raise KeyError("Item '{}' is not in list '{}'"
- .format(item, self._list))
+ raise KeyError("Item '{}' is not in list '{}'".format(item, self._list))
def __eq__(self, other):
assert isinstance(other, str) or isinstance(other, self.__class__)
diff --git a/hooks/charmhelpers/core/sysctl.py b/hooks/charmhelpers/core/sysctl.py
index 386428d..93f1175 100644
--- a/hooks/charmhelpers/core/sysctl.py
+++ b/hooks/charmhelpers/core/sysctl.py
@@ -19,16 +19,11 @@ import yaml
from subprocess import check_call, CalledProcessError
-from charmhelpers.core.hookenv import (
- log,
- DEBUG,
- ERROR,
- WARNING,
-)
+from charmhelpers.core.hookenv import log, DEBUG, ERROR, WARNING
from charmhelpers.core.host import is_container
-__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@xxxxxxxxxxxxx>'
+__author__ = "Jorge Niedbalski R. <jorge.niedbalski@xxxxxxxxxxxxx>"
def create(sysctl_dict, sysctl_file, ignore=False):
@@ -47,8 +42,7 @@ def create(sysctl_dict, sysctl_file, ignore=False):
try:
sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
except yaml.YAMLError:
- log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
- level=ERROR)
+ log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), level=ERROR)
return
else:
sysctl_dict_parsed = sysctl_dict
@@ -57,9 +51,10 @@ def create(sysctl_dict, sysctl_file, ignore=False):
for key, value in sysctl_dict_parsed.items():
fd.write("{}={}\n".format(key, value))
- log("Updating sysctl_file: {} values: {}".format(sysctl_file,
- sysctl_dict_parsed),
- level=DEBUG)
+ log(
+ "Updating sysctl_file: {} values: {}".format(sysctl_file, sysctl_dict_parsed),
+ level=DEBUG,
+ )
call = ["sysctl", "-p", sysctl_file]
if ignore:
@@ -69,7 +64,9 @@ def create(sysctl_dict, sysctl_file, ignore=False):
check_call(call)
except CalledProcessError as e:
if is_container():
- log("Error setting some sysctl keys in this container: {}".format(e.output),
- level=WARNING)
+ log(
+ "Error setting some sysctl keys in this container: {}".format(e.output),
+ level=WARNING,
+ )
else:
raise e
diff --git a/hooks/charmhelpers/core/templating.py b/hooks/charmhelpers/core/templating.py
index 9014015..3a7dd06 100644
--- a/hooks/charmhelpers/core/templating.py
+++ b/hooks/charmhelpers/core/templating.py
@@ -19,9 +19,18 @@ from charmhelpers.core import host
from charmhelpers.core import hookenv
-def render(source, target, context, owner='root', group='root',
- perms=0o444, templates_dir=None, encoding='UTF-8',
- template_loader=None, config_template=None):
+def render(
+ source,
+ target,
+ context,
+ owner="root",
+ group="root",
+ perms=0o444,
+ templates_dir=None,
+ encoding="UTF-8",
+ template_loader=None,
+ config_template=None,
+):
"""
Render a template.
@@ -53,21 +62,23 @@ def render(source, target, context, owner='root', group='root',
try:
from charmhelpers.fetch import apt_install
except ImportError:
- hookenv.log('Could not import jinja2, and could not import '
- 'charmhelpers.fetch to install it',
- level=hookenv.ERROR)
+ hookenv.log(
+ "Could not import jinja2, and could not import "
+ "charmhelpers.fetch to install it",
+ level=hookenv.ERROR,
+ )
raise
if sys.version_info.major == 2:
- apt_install('python-jinja2', fatal=True)
+ apt_install("python-jinja2", fatal=True)
else:
- apt_install('python3-jinja2', fatal=True)
+ apt_install("python3-jinja2", fatal=True)
from jinja2 import FileSystemLoader, Environment, exceptions
if template_loader:
template_env = Environment(loader=template_loader)
else:
if templates_dir is None:
- templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
+ templates_dir = os.path.join(hookenv.charm_dir(), "templates")
template_env = Environment(loader=FileSystemLoader(templates_dir))
# load from a string if provided explicitly
@@ -78,9 +89,10 @@ def render(source, target, context, owner='root', group='root',
source = source
template = template_env.get_template(source)
except exceptions.TemplateNotFound as e:
- hookenv.log('Could not load template %s from %s.' %
- (source, templates_dir),
- level=hookenv.ERROR)
+ hookenv.log(
+ "Could not load template %s from %s." % (source, templates_dir),
+ level=hookenv.ERROR,
+ )
raise e
content = template.render(context)
if target is not None:
diff --git a/hooks/charmhelpers/core/unitdata.py b/hooks/charmhelpers/core/unitdata.py
index ab55432..a33f7fe 100644
--- a/hooks/charmhelpers/core/unitdata.py
+++ b/hooks/charmhelpers/core/unitdata.py
@@ -156,7 +156,7 @@ import pprint
import sqlite3
import sys
-__author__ = 'Kapil Thangavelu <kapil.foss@xxxxxxxxx>'
+__author__ = "Kapil Thangavelu <kapil.foss@xxxxxxxxx>"
class Storage(object):
@@ -171,18 +171,20 @@ class Storage(object):
path parameter which causes sqlite3 to only build the db in memory.
This should only be used for testing purposes.
"""
+
def __init__(self, path=None):
self.db_path = path
if path is None:
- if 'UNIT_STATE_DB' in os.environ:
- self.db_path = os.environ['UNIT_STATE_DB']
+ if "UNIT_STATE_DB" in os.environ:
+ self.db_path = os.environ["UNIT_STATE_DB"]
else:
self.db_path = os.path.join(
- os.environ.get('CHARM_DIR', ''), '.unit-state.db')
- if self.db_path != ':memory:':
- with open(self.db_path, 'a') as f:
+ os.environ.get("CHARM_DIR", ""), ".unit-state.db"
+ )
+ if self.db_path != ":memory:":
+ with open(self.db_path, "a") as f:
os.fchmod(f.fileno(), 0o600)
- self.conn = sqlite3.connect('%s' % self.db_path)
+ self.conn = sqlite3.connect("%s" % self.db_path)
self.cursor = self.conn.cursor()
self.revision = None
self._closed = False
@@ -197,7 +199,7 @@ class Storage(object):
self._closed = True
def get(self, key, default=None, record=False):
- self.cursor.execute('select data from kv where key=?', [key])
+ self.cursor.execute("select data from kv where key=?", [key])
result = self.cursor.fetchone()
if not result:
return default
@@ -215,16 +217,16 @@ class Storage(object):
names in the returned dict
:return dict: A (possibly empty) dict of key-value mappings
"""
- self.cursor.execute("select key, data from kv where key like ?",
- ['%s%%' % key_prefix])
+ self.cursor.execute(
+ "select key, data from kv where key like ?", ["%s%%" % key_prefix]
+ )
result = self.cursor.fetchall()
if not result:
return {}
if not strip:
- key_prefix = ''
- return dict([
- (k[len(key_prefix):], json.loads(v)) for k, v in result])
+ key_prefix = ""
+ return dict([(k[len(key_prefix) :], json.loads(v)) for k, v in result])
def update(self, mapping, prefix=""):
"""
@@ -241,11 +243,12 @@ class Storage(object):
"""
Remove a key from the database entirely.
"""
- self.cursor.execute('delete from kv where key=?', [key])
+ self.cursor.execute("delete from kv where key=?", [key])
if self.revision and self.cursor.rowcount:
self.cursor.execute(
- 'insert into kv_revisions values (?, ?, ?)',
- [key, self.revision, json.dumps('DELETED')])
+ "insert into kv_revisions values (?, ?, ?)",
+ [key, self.revision, json.dumps("DELETED")],
+ )
def unsetrange(self, keys=None, prefix=""):
"""
@@ -257,19 +260,27 @@ class Storage(object):
before removing.
"""
if keys is not None:
- keys = ['%s%s' % (prefix, key) for key in keys]
- self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys)
+ keys = ["%s%s" % (prefix, key) for key in keys]
+ self.cursor.execute(
+ "delete from kv where key in (%s)" % ",".join(["?"] * len(keys)), keys
+ )
if self.revision and self.cursor.rowcount:
self.cursor.execute(
- 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)),
- list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys)))
+ "insert into kv_revisions values %s"
+ % ",".join(["(?, ?, ?)"] * len(keys)),
+ list(
+ itertools.chain.from_iterable(
+ (key, self.revision, json.dumps("DELETED")) for key in keys
+ )
+ ),
+ )
else:
- self.cursor.execute('delete from kv where key like ?',
- ['%s%%' % prefix])
+ self.cursor.execute("delete from kv where key like ?", ["%s%%" % prefix])
if self.revision and self.cursor.rowcount:
self.cursor.execute(
- 'insert into kv_revisions values (?, ?, ?)',
- ['%s%%' % prefix, self.revision, json.dumps('DELETED')])
+ "insert into kv_revisions values (?, ?, ?)",
+ ["%s%%" % prefix, self.revision, json.dumps("DELETED")],
+ )
def set(self, key, value):
"""
@@ -280,7 +291,7 @@ class Storage(object):
"""
serialized = json.dumps(value)
- self.cursor.execute('select data from kv where key=?', [key])
+ self.cursor.execute("select data from kv where key=?", [key])
exists = self.cursor.fetchone()
# Skip mutations to the same value
@@ -290,36 +301,42 @@ class Storage(object):
if not exists:
self.cursor.execute(
- 'insert into kv (key, data) values (?, ?)',
- (key, serialized))
+ "insert into kv (key, data) values (?, ?)", (key, serialized)
+ )
else:
- self.cursor.execute('''
+ self.cursor.execute(
+ """
update kv
set data = ?
- where key = ?''', [serialized, key])
+ where key = ?""",
+ [serialized, key],
+ )
# Save
if not self.revision:
return value
self.cursor.execute(
- 'select 1 from kv_revisions where key=? and revision=?',
- [key, self.revision])
+ "select 1 from kv_revisions where key=? and revision=?",
+ [key, self.revision],
+ )
exists = self.cursor.fetchone()
if not exists:
self.cursor.execute(
- '''insert into kv_revisions (
- revision, key, data) values (?, ?, ?)''',
- (self.revision, key, serialized))
+ """insert into kv_revisions (
+ revision, key, data) values (?, ?, ?)""",
+ (self.revision, key, serialized),
+ )
else:
self.cursor.execute(
- '''
+ """
update kv_revisions
set data = ?
where key = ?
- and revision = ?''',
- [serialized, key, self.revision])
+ and revision = ?""",
+ [serialized, key, self.revision],
+ )
return value
@@ -358,9 +375,9 @@ class Storage(object):
revision."""
assert not self.revision
self.cursor.execute(
- 'insert into hooks (hook, date) values (?, ?)',
- (name or sys.argv[0],
- datetime.datetime.utcnow().isoformat()))
+ "insert into hooks (hook, date) values (?, ?)",
+ (name or sys.argv[0], datetime.datetime.utcnow().isoformat()),
+ )
self.revision = self.cursor.lastrowid
try:
yield self.revision
@@ -381,50 +398,63 @@ class Storage(object):
self.conn.rollback()
def _init(self):
- self.cursor.execute('''
+ self.cursor.execute(
+ """
create table if not exists kv (
key text,
data text,
primary key (key)
- )''')
- self.cursor.execute('''
+ )"""
+ )
+ self.cursor.execute(
+ """
create table if not exists kv_revisions (
key text,
revision integer,
data text,
primary key (key, revision)
- )''')
- self.cursor.execute('''
+ )"""
+ )
+ self.cursor.execute(
+ """
create table if not exists hooks (
version integer primary key autoincrement,
hook text,
date text
- )''')
+ )"""
+ )
self.conn.commit()
def gethistory(self, key, deserialize=False):
self.cursor.execute(
- '''
+ """
select kv.revision, kv.key, kv.data, h.hook, h.date
from kv_revisions kv,
hooks h
where kv.key=?
and kv.revision = h.version
- ''', [key])
+ """,
+ [key],
+ )
if deserialize is False:
return self.cursor.fetchall()
return map(_parse_history, self.cursor.fetchall())
def debug(self, fh=sys.stderr):
- self.cursor.execute('select * from kv')
+ self.cursor.execute("select * from kv")
pprint.pprint(self.cursor.fetchall(), stream=fh)
- self.cursor.execute('select * from kv_revisions')
+ self.cursor.execute("select * from kv_revisions")
pprint.pprint(self.cursor.fetchall(), stream=fh)
def _parse_history(d):
- return (d[0], d[1], json.loads(d[2]), d[3],
- datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f"))
+ return (
+ d[0],
+ d[1],
+ json.loads(d[2]),
+ d[3],
+ datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f"),
+ )
class HookData(object):
@@ -459,6 +489,7 @@ class HookData(object):
hook.execute()
"""
+
def __init__(self):
self.kv = kv()
self.conf = None
@@ -467,6 +498,7 @@ class HookData(object):
@contextlib.contextmanager
def __call__(self):
from charmhelpers.core import hookenv
+
hook_name = hookenv.hook_name()
with self.kv.hook_scope(hook_name):
@@ -479,21 +511,20 @@ class HookData(object):
# to charm authors as they don't control the revision.
# so logic dependnent on revision is not particularly
# useful, however it is useful for debugging analysis.
- charm_rev = open(
- os.path.join(charm_dir, 'revision')).read().strip()
- charm_rev = charm_rev or '0'
- revs = self.kv.get('charm_revisions', [])
+ charm_rev = open(os.path.join(charm_dir, "revision")).read().strip()
+ charm_rev = charm_rev or "0"
+ revs = self.kv.get("charm_revisions", [])
if charm_rev not in revs:
- revs.append(charm_rev.strip() or '0')
- self.kv.set('charm_revisions', revs)
+ revs.append(charm_rev.strip() or "0")
+ self.kv.set("charm_revisions", revs)
def _record_hook(self, hookenv):
data = hookenv.execution_environment()
- self.conf = conf_delta = self.kv.delta(data['conf'], 'config')
- self.rels = rels_delta = self.kv.delta(data['rels'], 'rels')
- self.kv.set('env', dict(data['env']))
- self.kv.set('unit', data['unit'])
- self.kv.set('relid', data.get('relid'))
+ self.conf = conf_delta = self.kv.delta(data["conf"], "config")
+ self.rels = rels_delta = self.kv.delta(data["rels"], "rels")
+ self.kv.set("env", dict(data["env"]))
+ self.kv.set("unit", data["unit"])
+ self.kv.set("relid", data.get("relid"))
return conf_delta, rels_delta
@@ -512,7 +543,7 @@ class DeltaSet(Record):
__slots__ = ()
-Delta = collections.namedtuple('Delta', ['previous', 'current'])
+Delta = collections.namedtuple("Delta", ["previous", "current"])
_KV = None
diff --git a/hooks/charmhelpers/fetch/__init__.py b/hooks/charmhelpers/fetch/__init__.py
index 0cc7fc8..5836bd7 100644
--- a/hooks/charmhelpers/fetch/__init__.py
+++ b/hooks/charmhelpers/fetch/__init__.py
@@ -15,12 +15,10 @@
import importlib
from charmhelpers.osplatform import get_platform
from yaml import safe_load
-from charmhelpers.core.hookenv import (
- config,
- log,
-)
+from charmhelpers.core.hookenv import config, log
import six
+
if six.PY3:
from urllib.parse import urlparse, urlunparse
else:
@@ -30,9 +28,9 @@ else:
# The order of this list is very important. Handlers should be listed in from
# least- to most-specific URL matching.
FETCH_HANDLERS = (
- 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
- 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
- 'charmhelpers.fetch.giturl.GitUrlFetchHandler',
+ "charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler",
+ "charmhelpers.fetch.bzrurl.BzrUrlFetchHandler",
+ "charmhelpers.fetch.giturl.GitUrlFetchHandler",
)
@@ -52,6 +50,7 @@ class GPGKeyError(Exception):
"""Exception occurs when a GPG key cannot be fetched or used. The message
indicates what the problem is.
"""
+
pass
@@ -75,7 +74,7 @@ class BaseFetchHandler(object):
def base_url(self, url):
"""Return url without querystring or fragment"""
parts = list(self.parse_url(url))
- parts[4:] = ['' for i in parts[4:]]
+ parts[4:] = ["" for i in parts[4:]]
return urlunparse(parts)
@@ -109,9 +108,9 @@ elif __platform__ == "centos":
yum_search = fetch.yum_search
-def configure_sources(update=False,
- sources_var='install_sources',
- keys_var='install_keys'):
+def configure_sources(
+ update=False, sources_var="install_sources", keys_var="install_keys"
+):
"""Configure multiple sources from charm configuration.
The lists are encoded as yaml fragments in the configuration.
@@ -128,8 +127,8 @@ def configure_sources(update=False,
Note that 'null' (a.k.a. None) should not be quoted.
"""
- sources = safe_load((config(sources_var) or '').strip()) or []
- keys = safe_load((config(keys_var) or '').strip()) or None
+ sources = safe_load((config(sources_var) or "").strip()) or []
+ keys = safe_load((config(keys_var) or "").strip()) or None
if isinstance(sources, six.string_types):
sources = [sources]
@@ -143,7 +142,8 @@ def configure_sources(update=False,
if len(sources) != len(keys):
raise SourceConfigError(
- 'Install sources and keys lists are different lengths')
+ "Install sources and keys lists are different lengths"
+ )
for source, key in zip(sources, keys):
add_source(source, key)
if update:
@@ -178,8 +178,7 @@ def install_remote(source, *args, **kwargs):
try:
return handler.install(source, *args, **kwargs)
except UnhandledSource as e:
- log('Install source attempt unsuccessful: {}'.format(e),
- level='WARNING')
+ log("Install source attempt unsuccessful: {}".format(e), level="WARNING")
raise UnhandledSource("No handler found for source {}".format(source))
@@ -195,15 +194,12 @@ def plugins(fetch_handlers=None):
fetch_handlers = FETCH_HANDLERS
plugin_list = []
for handler_name in fetch_handlers:
- package, classname = handler_name.rsplit('.', 1)
+ package, classname = handler_name.rsplit(".", 1)
try:
- handler_class = getattr(
- importlib.import_module(package),
- classname)
+ handler_class = getattr(importlib.import_module(package), classname)
plugin_list.append(handler_class())
except NotImplementedError:
# Skip missing plugins so that they can be ommitted from
# installation if desired
- log("FetchHandler {} not found, skipping plugin".format(
- handler_name))
+ log("FetchHandler {} not found, skipping plugin".format(handler_name))
return plugin_list
diff --git a/hooks/charmhelpers/fetch/archiveurl.py b/hooks/charmhelpers/fetch/archiveurl.py
index d25587a..b8b9bd9 100644
--- a/hooks/charmhelpers/fetch/archiveurl.py
+++ b/hooks/charmhelpers/fetch/archiveurl.py
@@ -16,37 +16,39 @@ import os
import hashlib
import re
-from charmhelpers.fetch import (
- BaseFetchHandler,
- UnhandledSource
-)
-from charmhelpers.payload.archive import (
- get_archive_handler,
- extract,
-)
+from charmhelpers.fetch import BaseFetchHandler, UnhandledSource
+from charmhelpers.payload.archive import get_archive_handler, extract
from charmhelpers.core.host import mkdir, check_hash
import six
+
if six.PY3:
from urllib.request import (
- build_opener, install_opener, urlopen, urlretrieve,
- HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
+ build_opener,
+ install_opener,
+ urlopen,
+ urlretrieve,
+ HTTPPasswordMgrWithDefaultRealm,
+ HTTPBasicAuthHandler,
)
from urllib.parse import urlparse, urlunparse, parse_qs
from urllib.error import URLError
else:
from urllib import urlretrieve
from urllib2 import (
- build_opener, install_opener, urlopen,
- HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
- URLError
+ build_opener,
+ install_opener,
+ urlopen,
+ HTTPPasswordMgrWithDefaultRealm,
+ HTTPBasicAuthHandler,
+ URLError,
)
from urlparse import urlparse, urlunparse, parse_qs
def splituser(host):
- '''urllib.splituser(), but six's support of this seems broken'''
- _userprog = re.compile('^(.*)@(.*)$')
+ """urllib.splituser(), but six's support of this seems broken"""
+ _userprog = re.compile("^(.*)@(.*)$")
match = _userprog.match(host)
if match:
return match.group(1, 2)
@@ -54,8 +56,8 @@ def splituser(host):
def splitpasswd(user):
- '''urllib.splitpasswd(), but six's support of this is missing'''
- _passwdprog = re.compile('^([^:]*):(.*)$', re.S)
+ """urllib.splitpasswd(), but six's support of this is missing"""
+ _passwdprog = re.compile("^([^:]*):(.*)$", re.S)
match = _passwdprog.match(user)
if match:
return match.group(1, 2)
@@ -72,9 +74,10 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
Installs the contents of the archive in $CHARM_DIR/fetched/.
"""
+
def can_handle(self, source):
url_parts = self.parse_url(source)
- if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
+ if url_parts.scheme not in ("http", "https", "ftp", "file"):
# XXX: Why is this returning a boolean and a string? It's
# doomed to fail since "bool(can_handle('foo://'))" will be True.
return "Wrong source type"
@@ -92,7 +95,7 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
# propagate all exceptions
# URLError, OSError, etc
proto, netloc, path, params, query, fragment = urlparse(source)
- if proto in ('http', 'https'):
+ if proto in ("http", "https"):
auth, barehost = splituser(netloc)
if auth is not None:
source = urlunparse((proto, barehost, path, params, query, fragment))
@@ -106,7 +109,7 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
install_opener(opener)
response = urlopen(source)
try:
- with open(dest, 'wb') as dest_file:
+ with open(dest, "wb") as dest_file:
dest_file.write(response.read())
except Exception as e:
if os.path.isfile(dest):
@@ -119,7 +122,7 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
check_hash(tempfile, hashsum, validate)
return tempfile
- def install(self, source, dest=None, checksum=None, hash_type='sha1'):
+ def install(self, source, dest=None, checksum=None, hash_type="sha1"):
"""
Download and install an archive file, with optional checksum validation.
@@ -138,7 +141,7 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
"""
url_parts = self.parse_url(source)
- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
+ dest_dir = os.path.join(os.environ.get("CHARM_DIR"), "fetched")
if not os.path.exists(dest_dir):
mkdir(dest_dir, perms=0o755)
dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))
@@ -156,8 +159,7 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
algorithms = hashlib.algorithms_available
if key in algorithms:
if len(value) != 1:
- raise TypeError(
- "Expected 1 hash value, not %d" % len(value))
+ raise TypeError("Expected 1 hash value, not %d" % len(value))
expected = value[0]
check_hash(dld_file, expected, key)
if checksum:
diff --git a/hooks/charmhelpers/fetch/bzrurl.py b/hooks/charmhelpers/fetch/bzrurl.py
index c4ab3ff..2ff0397 100644
--- a/hooks/charmhelpers/fetch/bzrurl.py
+++ b/hooks/charmhelpers/fetch/bzrurl.py
@@ -23,10 +23,10 @@ from charmhelpers.fetch import (
from charmhelpers.core.host import mkdir
-if filter_installed_packages(['bzr']) != []:
- install(['bzr'])
- if filter_installed_packages(['bzr']) != []:
- raise NotImplementedError('Unable to install bzr')
+if filter_installed_packages(["bzr"]) != []:
+ install(["bzr"])
+ if filter_installed_packages(["bzr"]) != []:
+ raise NotImplementedError("Unable to install bzr")
class BzrUrlFetchHandler(BaseFetchHandler):
@@ -34,10 +34,10 @@ class BzrUrlFetchHandler(BaseFetchHandler):
def can_handle(self, source):
url_parts = self.parse_url(source)
- if url_parts.scheme not in ('bzr+ssh', 'lp', ''):
+ if url_parts.scheme not in ("bzr+ssh", "lp", ""):
return False
elif not url_parts.scheme:
- return os.path.exists(os.path.join(source, '.bzr'))
+ return os.path.exists(os.path.join(source, ".bzr"))
else:
return True
@@ -46,13 +46,13 @@ class BzrUrlFetchHandler(BaseFetchHandler):
raise UnhandledSource("Cannot handle {}".format(source))
cmd_opts = []
if revno:
- cmd_opts += ['-r', str(revno)]
+ cmd_opts += ["-r", str(revno)]
if os.path.exists(dest):
- cmd = ['bzr', 'pull']
+ cmd = ["bzr", "pull"]
cmd += cmd_opts
- cmd += ['--overwrite', '-d', dest, source]
+ cmd += ["--overwrite", "-d", dest, source]
else:
- cmd = ['bzr', 'branch']
+ cmd = ["bzr", "branch"]
cmd += cmd_opts
cmd += [source, dest]
check_output(cmd, stderr=STDOUT)
@@ -63,8 +63,7 @@ class BzrUrlFetchHandler(BaseFetchHandler):
if dest:
dest_dir = os.path.join(dest, branch_name)
else:
- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
- branch_name)
+ dest_dir = os.path.join(os.environ.get("CHARM_DIR"), "fetched", branch_name)
if dest and not os.path.exists(dest):
mkdir(dest, perms=0o755)
diff --git a/hooks/charmhelpers/fetch/centos.py b/hooks/charmhelpers/fetch/centos.py
index a91dcff..30698a5 100644
--- a/hooks/charmhelpers/fetch/centos.py
+++ b/hooks/charmhelpers/fetch/centos.py
@@ -30,7 +30,7 @@ def filter_installed_packages(packages):
"""Return a list of packages that require installation."""
yb = yum.YumBase()
package_list = yb.doPackageLists()
- temp_cache = {p.base_package_name: 1 for p in package_list['installed']}
+ temp_cache = {p.base_package_name: 1 for p in package_list["installed"]}
_pkgs = [p for p in packages if not temp_cache.get(p, False)]
return _pkgs
@@ -38,39 +38,38 @@ def filter_installed_packages(packages):
def install(packages, options=None, fatal=False):
"""Install one or more packages."""
- cmd = ['yum', '--assumeyes']
+ cmd = ["yum", "--assumeyes"]
if options is not None:
cmd.extend(options)
- cmd.append('install')
+ cmd.append("install")
if isinstance(packages, six.string_types):
cmd.append(packages)
else:
cmd.extend(packages)
- log("Installing {} with options: {}".format(packages,
- options))
+ log("Installing {} with options: {}".format(packages, options))
_run_yum_command(cmd, fatal)
def upgrade(options=None, fatal=False, dist=False):
"""Upgrade all packages."""
- cmd = ['yum', '--assumeyes']
+ cmd = ["yum", "--assumeyes"]
if options is not None:
cmd.extend(options)
- cmd.append('upgrade')
+ cmd.append("upgrade")
log("Upgrading with options: {}".format(options))
_run_yum_command(cmd, fatal)
def update(fatal=False):
"""Update local yum cache."""
- cmd = ['yum', '--assumeyes', 'update']
+ cmd = ["yum", "--assumeyes", "update"]
log("Update with fatal: {}".format(fatal))
_run_yum_command(cmd, fatal)
def purge(packages, fatal=False):
"""Purge one or more packages."""
- cmd = ['yum', '--assumeyes', 'remove']
+ cmd = ["yum", "--assumeyes", "remove"]
if isinstance(packages, six.string_types):
cmd.append(packages)
else:
@@ -82,7 +81,7 @@ def purge(packages, fatal=False):
def yum_search(packages):
"""Search for a package."""
output = {}
- cmd = ['yum', 'search']
+ cmd = ["yum", "search"]
if isinstance(packages, six.string_types):
cmd.append(packages)
else:
@@ -107,34 +106,34 @@ def add_source(source, key=None):
placing your Juju environment at risk.
"""
if source is None:
- log('Source is not present. Skipping')
+ log("Source is not present. Skipping")
return
- if source.startswith('http'):
- directory = '/etc/yum.repos.d/'
+ if source.startswith("http"):
+ directory = "/etc/yum.repos.d/"
for filename in os.listdir(directory):
- with open(directory + filename, 'r') as rpm_file:
+ with open(directory + filename, "r") as rpm_file:
if source in rpm_file.read():
break
else:
log("Add source: {!r}".format(source))
# write in the charms.repo
- with open(directory + 'Charms.repo', 'a') as rpm_file:
- rpm_file.write('[%s]\n' % source[7:].replace('/', '_'))
- rpm_file.write('name=%s\n' % source[7:])
- rpm_file.write('baseurl=%s\n\n' % source)
+ with open(directory + "Charms.repo", "a") as rpm_file:
+ rpm_file.write("[%s]\n" % source[7:].replace("/", "_"))
+ rpm_file.write("name=%s\n" % source[7:])
+ rpm_file.write("baseurl=%s\n\n" % source)
else:
log("Unknown source: {!r}".format(source))
if key:
- if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
- with NamedTemporaryFile('w+') as key_file:
+ if "-----BEGIN PGP PUBLIC KEY BLOCK-----" in key:
+ with NamedTemporaryFile("w+") as key_file:
key_file.write(key)
key_file.flush()
key_file.seek(0)
- subprocess.check_call(['rpm', '--import', key_file.name])
+ subprocess.check_call(["rpm", "--import", key_file.name])
else:
- subprocess.check_call(['rpm', '--import', key])
+ subprocess.check_call(["rpm", "--import", key])
def _run_yum_command(cmd, fatal=False):
@@ -163,8 +162,10 @@ def _run_yum_command(cmd, fatal=False):
if retry_count > YUM_NO_LOCK_RETRY_COUNT:
raise
result = e.returncode
- log("Couldn't acquire YUM lock. Will retry in {} seconds."
- "".format(YUM_NO_LOCK_RETRY_DELAY))
+ log(
+ "Couldn't acquire YUM lock. Will retry in {} seconds."
+ "".format(YUM_NO_LOCK_RETRY_DELAY)
+ )
time.sleep(YUM_NO_LOCK_RETRY_DELAY)
else:
diff --git a/hooks/charmhelpers/fetch/giturl.py b/hooks/charmhelpers/fetch/giturl.py
index 070ca9b..8ad2f34 100644
--- a/hooks/charmhelpers/fetch/giturl.py
+++ b/hooks/charmhelpers/fetch/giturl.py
@@ -21,10 +21,10 @@ from charmhelpers.fetch import (
install,
)
-if filter_installed_packages(['git']) != []:
- install(['git'])
- if filter_installed_packages(['git']) != []:
- raise NotImplementedError('Unable to install git')
+if filter_installed_packages(["git"]) != []:
+ install(["git"])
+ if filter_installed_packages(["git"]) != []:
+ raise NotImplementedError("Unable to install git")
class GitUrlFetchHandler(BaseFetchHandler):
@@ -33,10 +33,10 @@ class GitUrlFetchHandler(BaseFetchHandler):
def can_handle(self, source):
url_parts = self.parse_url(source)
# TODO (mattyw) no support for ssh git@ yet
- if url_parts.scheme not in ('http', 'https', 'git', ''):
+ if url_parts.scheme not in ("http", "https", "git", ""):
return False
elif not url_parts.scheme:
- return os.path.exists(os.path.join(source, '.git'))
+ return os.path.exists(os.path.join(source, ".git"))
else:
return True
@@ -45,11 +45,11 @@ class GitUrlFetchHandler(BaseFetchHandler):
raise UnhandledSource("Cannot handle {}".format(source))
if os.path.exists(dest):
- cmd = ['git', '-C', dest, 'pull', source, branch]
+ cmd = ["git", "-C", dest, "pull", source, branch]
else:
- cmd = ['git', 'clone', source, dest, '--branch', branch]
+ cmd = ["git", "clone", source, dest, "--branch", branch]
if depth:
- cmd.extend(['--depth', depth])
+ cmd.extend(["--depth", depth])
check_output(cmd, stderr=STDOUT)
def install(self, source, branch="master", dest=None, depth=None):
@@ -58,8 +58,7 @@ class GitUrlFetchHandler(BaseFetchHandler):
if dest:
dest_dir = os.path.join(dest, branch_name)
else:
- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
- branch_name)
+ dest_dir = os.path.join(os.environ.get("CHARM_DIR"), "fetched", branch_name)
try:
self.clone(source, dest_dir, branch, depth)
except CalledProcessError as e:
diff --git a/hooks/charmhelpers/fetch/python/debug.py b/hooks/charmhelpers/fetch/python/debug.py
index 757135e..fdfc009 100644
--- a/hooks/charmhelpers/fetch/python/debug.py
+++ b/hooks/charmhelpers/fetch/python/debug.py
@@ -21,12 +21,7 @@ import atexit
import sys
from charmhelpers.fetch.python.rpdb import Rpdb
-from charmhelpers.core.hookenv import (
- open_port,
- close_port,
- ERROR,
- log
-)
+from charmhelpers.core.hookenv import open_port, close_port, ERROR, log
__author__ = "Jorge Niedbalski <jorge.niedbalski@xxxxxxxxxxxxx>"
@@ -44,11 +39,9 @@ def set_trace(addr=DEFAULT_ADDR, port=DEFAULT_PORT):
"""
atexit.register(close_port, port)
try:
- log("Starting a remote python debugger session on %s:%s" % (addr,
- port))
+ log("Starting a remote python debugger session on %s:%s" % (addr, port))
open_port(port)
debugger = Rpdb(addr=addr, port=port)
debugger.set_trace(sys._getframe().f_back)
except Exception:
- _error("Cannot start a remote debug session on %s:%s" % (addr,
- port))
+ _error("Cannot start a remote debug session on %s:%s" % (addr, port))
diff --git a/hooks/charmhelpers/fetch/python/packages.py b/hooks/charmhelpers/fetch/python/packages.py
index 6e95028..ab00184 100644
--- a/hooks/charmhelpers/fetch/python/packages.py
+++ b/hooks/charmhelpers/fetch/python/packages.py
@@ -41,9 +41,9 @@ def pip_execute(*args, **kwargs):
except ImportError:
apt_update()
if six.PY2:
- apt_install('python-pip')
+ apt_install("python-pip")
else:
- apt_install('python3-pip')
+ apt_install("python3-pip")
from pip import main as _pip_execute
_pip_execute(*args, **kwargs)
finally:
@@ -67,47 +67,48 @@ def pip_install_requirements(requirements, constraints=None, **options):
"""
command = ["install"]
- available_options = ('proxy', 'src', 'log', )
+ available_options = ("proxy", "src", "log")
for option in parse_options(options, available_options):
command.append(option)
command.append("-r {0}".format(requirements))
if constraints:
command.append("-c {0}".format(constraints))
- log("Installing from file: {} with constraints {} "
- "and options: {}".format(requirements, constraints, command))
+ log(
+ "Installing from file: {} with constraints {} "
+ "and options: {}".format(requirements, constraints, command)
+ )
else:
- log("Installing from file: {} with options: {}".format(requirements,
- command))
+ log("Installing from file: {} with options: {}".format(requirements, command))
pip_execute(command)
-def pip_install(package, fatal=False, upgrade=False, venv=None,
- constraints=None, **options):
+def pip_install(
+ package, fatal=False, upgrade=False, venv=None, constraints=None, **options
+):
"""Install a python package"""
if venv:
- venv_python = os.path.join(venv, 'bin/pip')
+ venv_python = os.path.join(venv, "bin/pip")
command = [venv_python, "install"]
else:
command = ["install"]
- available_options = ('proxy', 'src', 'log', 'index-url', )
+ available_options = ("proxy", "src", "log", "index-url")
for option in parse_options(options, available_options):
command.append(option)
if upgrade:
- command.append('--upgrade')
+ command.append("--upgrade")
if constraints:
- command.extend(['-c', constraints])
+ command.extend(["-c", constraints])
if isinstance(package, list):
command.extend(package)
else:
command.append(package)
- log("Installing {} package with options: {}".format(package,
- command))
+ log("Installing {} package with options: {}".format(package, command))
if venv:
subprocess.check_call(command)
else:
@@ -118,7 +119,7 @@ def pip_uninstall(package, **options):
"""Uninstall a python package"""
command = ["uninstall", "-q", "-y"]
- available_options = ('proxy', 'log', )
+ available_options = ("proxy", "log")
for option in parse_options(options, available_options):
command.append(option)
@@ -127,8 +128,7 @@ def pip_uninstall(package, **options):
else:
command.append(package)
- log("Uninstalling {} package with options: {}".format(package,
- command))
+ log("Uninstalling {} package with options: {}".format(package, command))
pip_execute(command)
@@ -141,14 +141,14 @@ def pip_list():
def pip_create_virtualenv(path=None):
"""Create an isolated Python environment."""
if six.PY2:
- apt_install('python-virtualenv')
+ apt_install("python-virtualenv")
else:
- apt_install('python3-virtualenv')
+ apt_install("python3-virtualenv")
if path:
venv_path = path
else:
- venv_path = os.path.join(charm_dir(), 'venv')
+ venv_path = os.path.join(charm_dir(), "venv")
if not os.path.exists(venv_path):
- subprocess.check_call(['virtualenv', venv_path])
+ subprocess.check_call(["virtualenv", venv_path])
diff --git a/hooks/charmhelpers/fetch/python/rpdb.py b/hooks/charmhelpers/fetch/python/rpdb.py
index 9b31610..c69523b 100644
--- a/hooks/charmhelpers/fetch/python/rpdb.py
+++ b/hooks/charmhelpers/fetch/python/rpdb.py
@@ -23,7 +23,6 @@ __version__ = "0.1.3"
class Rpdb(pdb.Pdb):
-
def __init__(self, addr="127.0.0.1", port=4444):
"""Initialize the socket and initialize pdb."""
@@ -37,8 +36,8 @@ class Rpdb(pdb.Pdb):
self.skt.bind((addr, port))
self.skt.listen(1)
(clientsocket, address) = self.skt.accept()
- handle = clientsocket.makefile('rw')
- pdb.Pdb.__init__(self, completekey='tab', stdin=handle, stdout=handle)
+ handle = clientsocket.makefile("rw")
+ pdb.Pdb.__init__(self, completekey="tab", stdin=handle, stdout=handle)
sys.stdout = sys.stdin = handle
def shutdown(self):
diff --git a/hooks/charmhelpers/fetch/python/version.py b/hooks/charmhelpers/fetch/python/version.py
index 3eb4210..69df22d 100644
--- a/hooks/charmhelpers/fetch/python/version.py
+++ b/hooks/charmhelpers/fetch/python/version.py
@@ -27,6 +27,6 @@ def current_version():
def current_version_string():
"""Current system python version as string major.minor.micro"""
- return "{0}.{1}.{2}".format(sys.version_info.major,
- sys.version_info.minor,
- sys.version_info.micro)
+ return "{0}.{1}.{2}".format(
+ sys.version_info.major, sys.version_info.minor, sys.version_info.micro
+ )
diff --git a/hooks/charmhelpers/fetch/snap.py b/hooks/charmhelpers/fetch/snap.py
index fc70aa9..d446cbc 100644
--- a/hooks/charmhelpers/fetch/snap.py
+++ b/hooks/charmhelpers/fetch/snap.py
@@ -22,19 +22,14 @@ import os
from time import sleep
from charmhelpers.core.hookenv import log
-__author__ = 'Joseph Borg <joseph.borg@xxxxxxxxxxxxx>'
+__author__ = "Joseph Borg <joseph.borg@xxxxxxxxxxxxx>"
# The return code for "couldn't acquire lock" in Snap
# (hopefully this will be improved).
SNAP_NO_LOCK = 1
SNAP_NO_LOCK_RETRY_DELAY = 10 # Wait X seconds between Snap lock checks.
SNAP_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times.
-SNAP_CHANNELS = [
- 'edge',
- 'beta',
- 'candidate',
- 'stable',
-]
+SNAP_CHANNELS = ["edge", "beta", "candidate", "stable"]
class CouldNotAcquireLockException(Exception):
@@ -59,17 +54,22 @@ def _snap_exec(commands):
while return_code is None or return_code == SNAP_NO_LOCK:
try:
- return_code = subprocess.check_call(['snap'] + commands,
- env=os.environ)
+ return_code = subprocess.check_call(["snap"] + commands, env=os.environ)
except subprocess.CalledProcessError as e:
- retry_count += + 1
+ retry_count += +1
if retry_count > SNAP_NO_LOCK_RETRY_COUNT:
raise CouldNotAcquireLockException(
- 'Could not aquire lock after {} attempts'
- .format(SNAP_NO_LOCK_RETRY_COUNT))
+ "Could not aquire lock after {} attempts".format(
+ SNAP_NO_LOCK_RETRY_COUNT
+ )
+ )
return_code = e.returncode
- log('Snap failed to acquire lock, trying again in {} seconds.'
- .format(SNAP_NO_LOCK_RETRY_DELAY), level='WARN')
+ log(
+ "Snap failed to acquire lock, trying again in {} seconds.".format(
+ SNAP_NO_LOCK_RETRY_DELAY
+ ),
+ level="WARN",
+ )
sleep(SNAP_NO_LOCK_RETRY_DELAY)
return return_code
@@ -88,12 +88,12 @@ def snap_install(packages, *flags):
flags = list(flags)
- message = 'Installing snap(s) "%s"' % ', '.join(packages)
+ message = 'Installing snap(s) "%s"' % ", ".join(packages)
if flags:
- message += ' with option(s) "%s"' % ', '.join(flags)
+ message += ' with option(s) "%s"' % ", ".join(flags)
- log(message, level='INFO')
- return _snap_exec(['install'] + flags + packages)
+ log(message, level="INFO")
+ return _snap_exec(["install"] + flags + packages)
def snap_remove(packages, *flags):
@@ -109,12 +109,12 @@ def snap_remove(packages, *flags):
flags = list(flags)
- message = 'Removing snap(s) "%s"' % ', '.join(packages)
+ message = 'Removing snap(s) "%s"' % ", ".join(packages)
if flags:
- message += ' with options "%s"' % ', '.join(flags)
+ message += ' with options "%s"' % ", ".join(flags)
- log(message, level='INFO')
- return _snap_exec(['remove'] + flags + packages)
+ log(message, level="INFO")
+ return _snap_exec(["remove"] + flags + packages)
def snap_refresh(packages, *flags):
@@ -130,12 +130,12 @@ def snap_refresh(packages, *flags):
flags = list(flags)
- message = 'Refreshing snap(s) "%s"' % ', '.join(packages)
+ message = 'Refreshing snap(s) "%s"' % ", ".join(packages)
if flags:
- message += ' with options "%s"' % ', '.join(flags)
+ message += ' with options "%s"' % ", ".join(flags)
- log(message, level='INFO')
- return _snap_exec(['refresh'] + flags + packages)
+ log(message, level="INFO")
+ return _snap_exec(["refresh"] + flags + packages)
def valid_snap_channel(channel):
diff --git a/hooks/charmhelpers/fetch/ubuntu.py b/hooks/charmhelpers/fetch/ubuntu.py
index 3ddaf0d..e414018 100644
--- a/hooks/charmhelpers/fetch/ubuntu.py
+++ b/hooks/charmhelpers/fetch/ubuntu.py
@@ -22,174 +22,171 @@ import time
from charmhelpers.core.host import get_distrib_codename, get_system_env
-from charmhelpers.core.hookenv import (
- log,
- DEBUG,
- WARNING,
- env_proxy_settings,
-)
+from charmhelpers.core.hookenv import log, DEBUG, WARNING, env_proxy_settings
from charmhelpers.fetch import SourceConfigError, GPGKeyError
from charmhelpers.fetch import ubuntu_apt_pkg
PROPOSED_POCKET = (
"# Proposed\n"
"deb http://archive.ubuntu.com/ubuntu {}-proposed main universe "
- "multiverse restricted\n")
+ "multiverse restricted\n"
+)
PROPOSED_PORTS_POCKET = (
"# Proposed\n"
"deb http://ports.ubuntu.com/ubuntu-ports {}-proposed main universe "
- "multiverse restricted\n")
+ "multiverse restricted\n"
+)
# Only supports 64bit and ppc64 at the moment.
ARCH_TO_PROPOSED_POCKET = {
- 'x86_64': PROPOSED_POCKET,
- 'ppc64le': PROPOSED_PORTS_POCKET,
- 'aarch64': PROPOSED_PORTS_POCKET,
- 's390x': PROPOSED_PORTS_POCKET,
+ "x86_64": PROPOSED_POCKET,
+ "ppc64le": PROPOSED_PORTS_POCKET,
+ "aarch64": PROPOSED_PORTS_POCKET,
+ "s390x": PROPOSED_PORTS_POCKET,
}
CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
-CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
+CLOUD_ARCHIVE_KEY_ID = "5EDB1B62EC4926EA"
CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
"""
CLOUD_ARCHIVE_POCKETS = {
# Folsom
- 'folsom': 'precise-updates/folsom',
- 'folsom/updates': 'precise-updates/folsom',
- 'precise-folsom': 'precise-updates/folsom',
- 'precise-folsom/updates': 'precise-updates/folsom',
- 'precise-updates/folsom': 'precise-updates/folsom',
- 'folsom/proposed': 'precise-proposed/folsom',
- 'precise-folsom/proposed': 'precise-proposed/folsom',
- 'precise-proposed/folsom': 'precise-proposed/folsom',
+ "folsom": "precise-updates/folsom",
+ "folsom/updates": "precise-updates/folsom",
+ "precise-folsom": "precise-updates/folsom",
+ "precise-folsom/updates": "precise-updates/folsom",
+ "precise-updates/folsom": "precise-updates/folsom",
+ "folsom/proposed": "precise-proposed/folsom",
+ "precise-folsom/proposed": "precise-proposed/folsom",
+ "precise-proposed/folsom": "precise-proposed/folsom",
# Grizzly
- 'grizzly': 'precise-updates/grizzly',
- 'grizzly/updates': 'precise-updates/grizzly',
- 'precise-grizzly': 'precise-updates/grizzly',
- 'precise-grizzly/updates': 'precise-updates/grizzly',
- 'precise-updates/grizzly': 'precise-updates/grizzly',
- 'grizzly/proposed': 'precise-proposed/grizzly',
- 'precise-grizzly/proposed': 'precise-proposed/grizzly',
- 'precise-proposed/grizzly': 'precise-proposed/grizzly',
+ "grizzly": "precise-updates/grizzly",
+ "grizzly/updates": "precise-updates/grizzly",
+ "precise-grizzly": "precise-updates/grizzly",
+ "precise-grizzly/updates": "precise-updates/grizzly",
+ "precise-updates/grizzly": "precise-updates/grizzly",
+ "grizzly/proposed": "precise-proposed/grizzly",
+ "precise-grizzly/proposed": "precise-proposed/grizzly",
+ "precise-proposed/grizzly": "precise-proposed/grizzly",
# Havana
- 'havana': 'precise-updates/havana',
- 'havana/updates': 'precise-updates/havana',
- 'precise-havana': 'precise-updates/havana',
- 'precise-havana/updates': 'precise-updates/havana',
- 'precise-updates/havana': 'precise-updates/havana',
- 'havana/proposed': 'precise-proposed/havana',
- 'precise-havana/proposed': 'precise-proposed/havana',
- 'precise-proposed/havana': 'precise-proposed/havana',
+ "havana": "precise-updates/havana",
+ "havana/updates": "precise-updates/havana",
+ "precise-havana": "precise-updates/havana",
+ "precise-havana/updates": "precise-updates/havana",
+ "precise-updates/havana": "precise-updates/havana",
+ "havana/proposed": "precise-proposed/havana",
+ "precise-havana/proposed": "precise-proposed/havana",
+ "precise-proposed/havana": "precise-proposed/havana",
# Icehouse
- 'icehouse': 'precise-updates/icehouse',
- 'icehouse/updates': 'precise-updates/icehouse',
- 'precise-icehouse': 'precise-updates/icehouse',
- 'precise-icehouse/updates': 'precise-updates/icehouse',
- 'precise-updates/icehouse': 'precise-updates/icehouse',
- 'icehouse/proposed': 'precise-proposed/icehouse',
- 'precise-icehouse/proposed': 'precise-proposed/icehouse',
- 'precise-proposed/icehouse': 'precise-proposed/icehouse',
+ "icehouse": "precise-updates/icehouse",
+ "icehouse/updates": "precise-updates/icehouse",
+ "precise-icehouse": "precise-updates/icehouse",
+ "precise-icehouse/updates": "precise-updates/icehouse",
+ "precise-updates/icehouse": "precise-updates/icehouse",
+ "icehouse/proposed": "precise-proposed/icehouse",
+ "precise-icehouse/proposed": "precise-proposed/icehouse",
+ "precise-proposed/icehouse": "precise-proposed/icehouse",
# Juno
- 'juno': 'trusty-updates/juno',
- 'juno/updates': 'trusty-updates/juno',
- 'trusty-juno': 'trusty-updates/juno',
- 'trusty-juno/updates': 'trusty-updates/juno',
- 'trusty-updates/juno': 'trusty-updates/juno',
- 'juno/proposed': 'trusty-proposed/juno',
- 'trusty-juno/proposed': 'trusty-proposed/juno',
- 'trusty-proposed/juno': 'trusty-proposed/juno',
+ "juno": "trusty-updates/juno",
+ "juno/updates": "trusty-updates/juno",
+ "trusty-juno": "trusty-updates/juno",
+ "trusty-juno/updates": "trusty-updates/juno",
+ "trusty-updates/juno": "trusty-updates/juno",
+ "juno/proposed": "trusty-proposed/juno",
+ "trusty-juno/proposed": "trusty-proposed/juno",
+ "trusty-proposed/juno": "trusty-proposed/juno",
# Kilo
- 'kilo': 'trusty-updates/kilo',
- 'kilo/updates': 'trusty-updates/kilo',
- 'trusty-kilo': 'trusty-updates/kilo',
- 'trusty-kilo/updates': 'trusty-updates/kilo',
- 'trusty-updates/kilo': 'trusty-updates/kilo',
- 'kilo/proposed': 'trusty-proposed/kilo',
- 'trusty-kilo/proposed': 'trusty-proposed/kilo',
- 'trusty-proposed/kilo': 'trusty-proposed/kilo',
+ "kilo": "trusty-updates/kilo",
+ "kilo/updates": "trusty-updates/kilo",
+ "trusty-kilo": "trusty-updates/kilo",
+ "trusty-kilo/updates": "trusty-updates/kilo",
+ "trusty-updates/kilo": "trusty-updates/kilo",
+ "kilo/proposed": "trusty-proposed/kilo",
+ "trusty-kilo/proposed": "trusty-proposed/kilo",
+ "trusty-proposed/kilo": "trusty-proposed/kilo",
# Liberty
- 'liberty': 'trusty-updates/liberty',
- 'liberty/updates': 'trusty-updates/liberty',
- 'trusty-liberty': 'trusty-updates/liberty',
- 'trusty-liberty/updates': 'trusty-updates/liberty',
- 'trusty-updates/liberty': 'trusty-updates/liberty',
- 'liberty/proposed': 'trusty-proposed/liberty',
- 'trusty-liberty/proposed': 'trusty-proposed/liberty',
- 'trusty-proposed/liberty': 'trusty-proposed/liberty',
+ "liberty": "trusty-updates/liberty",
+ "liberty/updates": "trusty-updates/liberty",
+ "trusty-liberty": "trusty-updates/liberty",
+ "trusty-liberty/updates": "trusty-updates/liberty",
+ "trusty-updates/liberty": "trusty-updates/liberty",
+ "liberty/proposed": "trusty-proposed/liberty",
+ "trusty-liberty/proposed": "trusty-proposed/liberty",
+ "trusty-proposed/liberty": "trusty-proposed/liberty",
# Mitaka
- 'mitaka': 'trusty-updates/mitaka',
- 'mitaka/updates': 'trusty-updates/mitaka',
- 'trusty-mitaka': 'trusty-updates/mitaka',
- 'trusty-mitaka/updates': 'trusty-updates/mitaka',
- 'trusty-updates/mitaka': 'trusty-updates/mitaka',
- 'mitaka/proposed': 'trusty-proposed/mitaka',
- 'trusty-mitaka/proposed': 'trusty-proposed/mitaka',
- 'trusty-proposed/mitaka': 'trusty-proposed/mitaka',
+ "mitaka": "trusty-updates/mitaka",
+ "mitaka/updates": "trusty-updates/mitaka",
+ "trusty-mitaka": "trusty-updates/mitaka",
+ "trusty-mitaka/updates": "trusty-updates/mitaka",
+ "trusty-updates/mitaka": "trusty-updates/mitaka",
+ "mitaka/proposed": "trusty-proposed/mitaka",
+ "trusty-mitaka/proposed": "trusty-proposed/mitaka",
+ "trusty-proposed/mitaka": "trusty-proposed/mitaka",
# Newton
- 'newton': 'xenial-updates/newton',
- 'newton/updates': 'xenial-updates/newton',
- 'xenial-newton': 'xenial-updates/newton',
- 'xenial-newton/updates': 'xenial-updates/newton',
- 'xenial-updates/newton': 'xenial-updates/newton',
- 'newton/proposed': 'xenial-proposed/newton',
- 'xenial-newton/proposed': 'xenial-proposed/newton',
- 'xenial-proposed/newton': 'xenial-proposed/newton',
+ "newton": "xenial-updates/newton",
+ "newton/updates": "xenial-updates/newton",
+ "xenial-newton": "xenial-updates/newton",
+ "xenial-newton/updates": "xenial-updates/newton",
+ "xenial-updates/newton": "xenial-updates/newton",
+ "newton/proposed": "xenial-proposed/newton",
+ "xenial-newton/proposed": "xenial-proposed/newton",
+ "xenial-proposed/newton": "xenial-proposed/newton",
# Ocata
- 'ocata': 'xenial-updates/ocata',
- 'ocata/updates': 'xenial-updates/ocata',
- 'xenial-ocata': 'xenial-updates/ocata',
- 'xenial-ocata/updates': 'xenial-updates/ocata',
- 'xenial-updates/ocata': 'xenial-updates/ocata',
- 'ocata/proposed': 'xenial-proposed/ocata',
- 'xenial-ocata/proposed': 'xenial-proposed/ocata',
- 'xenial-proposed/ocata': 'xenial-proposed/ocata',
+ "ocata": "xenial-updates/ocata",
+ "ocata/updates": "xenial-updates/ocata",
+ "xenial-ocata": "xenial-updates/ocata",
+ "xenial-ocata/updates": "xenial-updates/ocata",
+ "xenial-updates/ocata": "xenial-updates/ocata",
+ "ocata/proposed": "xenial-proposed/ocata",
+ "xenial-ocata/proposed": "xenial-proposed/ocata",
+ "xenial-proposed/ocata": "xenial-proposed/ocata",
# Pike
- 'pike': 'xenial-updates/pike',
- 'xenial-pike': 'xenial-updates/pike',
- 'xenial-pike/updates': 'xenial-updates/pike',
- 'xenial-updates/pike': 'xenial-updates/pike',
- 'pike/proposed': 'xenial-proposed/pike',
- 'xenial-pike/proposed': 'xenial-proposed/pike',
- 'xenial-proposed/pike': 'xenial-proposed/pike',
+ "pike": "xenial-updates/pike",
+ "xenial-pike": "xenial-updates/pike",
+ "xenial-pike/updates": "xenial-updates/pike",
+ "xenial-updates/pike": "xenial-updates/pike",
+ "pike/proposed": "xenial-proposed/pike",
+ "xenial-pike/proposed": "xenial-proposed/pike",
+ "xenial-proposed/pike": "xenial-proposed/pike",
# Queens
- 'queens': 'xenial-updates/queens',
- 'xenial-queens': 'xenial-updates/queens',
- 'xenial-queens/updates': 'xenial-updates/queens',
- 'xenial-updates/queens': 'xenial-updates/queens',
- 'queens/proposed': 'xenial-proposed/queens',
- 'xenial-queens/proposed': 'xenial-proposed/queens',
- 'xenial-proposed/queens': 'xenial-proposed/queens',
+ "queens": "xenial-updates/queens",
+ "xenial-queens": "xenial-updates/queens",
+ "xenial-queens/updates": "xenial-updates/queens",
+ "xenial-updates/queens": "xenial-updates/queens",
+ "queens/proposed": "xenial-proposed/queens",
+ "xenial-queens/proposed": "xenial-proposed/queens",
+ "xenial-proposed/queens": "xenial-proposed/queens",
# Rocky
- 'rocky': 'bionic-updates/rocky',
- 'bionic-rocky': 'bionic-updates/rocky',
- 'bionic-rocky/updates': 'bionic-updates/rocky',
- 'bionic-updates/rocky': 'bionic-updates/rocky',
- 'rocky/proposed': 'bionic-proposed/rocky',
- 'bionic-rocky/proposed': 'bionic-proposed/rocky',
- 'bionic-proposed/rocky': 'bionic-proposed/rocky',
+ "rocky": "bionic-updates/rocky",
+ "bionic-rocky": "bionic-updates/rocky",
+ "bionic-rocky/updates": "bionic-updates/rocky",
+ "bionic-updates/rocky": "bionic-updates/rocky",
+ "rocky/proposed": "bionic-proposed/rocky",
+ "bionic-rocky/proposed": "bionic-proposed/rocky",
+ "bionic-proposed/rocky": "bionic-proposed/rocky",
# Stein
- 'stein': 'bionic-updates/stein',
- 'bionic-stein': 'bionic-updates/stein',
- 'bionic-stein/updates': 'bionic-updates/stein',
- 'bionic-updates/stein': 'bionic-updates/stein',
- 'stein/proposed': 'bionic-proposed/stein',
- 'bionic-stein/proposed': 'bionic-proposed/stein',
- 'bionic-proposed/stein': 'bionic-proposed/stein',
+ "stein": "bionic-updates/stein",
+ "bionic-stein": "bionic-updates/stein",
+ "bionic-stein/updates": "bionic-updates/stein",
+ "bionic-updates/stein": "bionic-updates/stein",
+ "stein/proposed": "bionic-proposed/stein",
+ "bionic-stein/proposed": "bionic-proposed/stein",
+ "bionic-proposed/stein": "bionic-proposed/stein",
# Train
- 'train': 'bionic-updates/train',
- 'bionic-train': 'bionic-updates/train',
- 'bionic-train/updates': 'bionic-updates/train',
- 'bionic-updates/train': 'bionic-updates/train',
- 'train/proposed': 'bionic-proposed/train',
- 'bionic-train/proposed': 'bionic-proposed/train',
- 'bionic-proposed/train': 'bionic-proposed/train',
+ "train": "bionic-updates/train",
+ "bionic-train": "bionic-updates/train",
+ "bionic-train/updates": "bionic-updates/train",
+ "bionic-updates/train": "bionic-updates/train",
+ "train/proposed": "bionic-proposed/train",
+ "bionic-train/proposed": "bionic-proposed/train",
+ "bionic-proposed/train": "bionic-proposed/train",
# Ussuri
- 'ussuri': 'bionic-updates/ussuri',
- 'bionic-ussuri': 'bionic-updates/ussuri',
- 'bionic-ussuri/updates': 'bionic-updates/ussuri',
- 'bionic-updates/ussuri': 'bionic-updates/ussuri',
- 'ussuri/proposed': 'bionic-proposed/ussuri',
- 'bionic-ussuri/proposed': 'bionic-proposed/ussuri',
- 'bionic-proposed/ussuri': 'bionic-proposed/ussuri',
+ "ussuri": "bionic-updates/ussuri",
+ "bionic-ussuri": "bionic-updates/ussuri",
+ "bionic-ussuri/updates": "bionic-updates/ussuri",
+ "bionic-updates/ussuri": "bionic-updates/ussuri",
+ "ussuri/proposed": "bionic-proposed/ussuri",
+ "bionic-ussuri/proposed": "bionic-proposed/ussuri",
+ "bionic-proposed/ussuri": "bionic-proposed/ussuri",
}
@@ -207,8 +204,10 @@ def filter_installed_packages(packages):
p = cache[package]
p.current_ver or _pkgs.append(package)
except KeyError:
- log('Package {} has no installation candidate.'.format(package),
- level='WARNING')
+ log(
+ "Package {} has no installation candidate.".format(package),
+ level="WARNING",
+ )
_pkgs.append(package)
return _pkgs
@@ -219,10 +218,7 @@ def filter_missing_packages(packages):
:param packages: list of packages to evaluate.
:returns list: Packages that are installed.
"""
- return list(
- set(packages) -
- set(filter_installed_packages(packages))
- )
+ return list(set(packages) - set(filter_installed_packages(packages)))
def apt_cache(*_, **__):
@@ -235,7 +231,7 @@ def apt_cache(*_, **__):
:returns:Object used to interrogate the system apt and dpkg databases.
:rtype:ubuntu_apt_pkg.Cache
"""
- if 'apt_pkg' in sys.modules:
+ if "apt_pkg" in sys.modules:
# NOTE(fnordahl): When our consumer use the upstream ``apt_pkg`` module
# in conjunction with the apt_cache helper function, they may expect us
# to call ``apt_pkg.init()`` for them.
@@ -243,9 +239,12 @@ def apt_cache(*_, **__):
# Detect this situation, log a warning and make the call to
# ``apt_pkg.init()`` to avoid the consumer Python interpreter from
# crashing with a segmentation fault.
- log('Support for use of upstream ``apt_pkg`` module in conjunction'
- 'with charm-helpers is deprecated since 2019-06-25', level=WARNING)
- sys.modules['apt_pkg'].init()
+ log(
+ "Support for use of upstream ``apt_pkg`` module in conjunction"
+ "with charm-helpers is deprecated since 2019-06-25",
+ level=WARNING,
+ )
+ sys.modules["apt_pkg"].init()
return ubuntu_apt_pkg.Cache()
@@ -262,17 +261,16 @@ def apt_install(packages, options=None, fatal=False):
:raises: subprocess.CalledProcessError
"""
if options is None:
- options = ['--option=Dpkg::Options::=--force-confold']
+ options = ["--option=Dpkg::Options::=--force-confold"]
- cmd = ['apt-get', '--assume-yes']
+ cmd = ["apt-get", "--assume-yes"]
cmd.extend(options)
- cmd.append('install')
+ cmd.append("install")
if isinstance(packages, six.string_types):
cmd.append(packages)
else:
cmd.extend(packages)
- log("Installing {} with options: {}".format(packages,
- options))
+ log("Installing {} with options: {}".format(packages, options))
_run_apt_command(cmd, fatal)
@@ -289,21 +287,21 @@ def apt_upgrade(options=None, fatal=False, dist=False):
:raises: subprocess.CalledProcessError
"""
if options is None:
- options = ['--option=Dpkg::Options::=--force-confold']
+ options = ["--option=Dpkg::Options::=--force-confold"]
- cmd = ['apt-get', '--assume-yes']
+ cmd = ["apt-get", "--assume-yes"]
cmd.extend(options)
if dist:
- cmd.append('dist-upgrade')
+ cmd.append("dist-upgrade")
else:
- cmd.append('upgrade')
+ cmd.append("upgrade")
log("Upgrading with options: {}".format(options))
_run_apt_command(cmd, fatal)
def apt_update(fatal=False):
"""Update local apt cache."""
- cmd = ['apt-get', 'update']
+ cmd = ["apt-get", "update"]
_run_apt_command(cmd, fatal)
@@ -317,7 +315,7 @@ def apt_purge(packages, fatal=False):
:type fatal: bool
:raises: subprocess.CalledProcessError
"""
- cmd = ['apt-get', '--assume-yes', 'purge']
+ cmd = ["apt-get", "--assume-yes", "purge"]
if isinstance(packages, six.string_types):
cmd.append(packages)
else:
@@ -335,16 +333,16 @@ def apt_autoremove(purge=True, fatal=False):
:type fatal: bool
:raises: subprocess.CalledProcessError
"""
- cmd = ['apt-get', '--assume-yes', 'autoremove']
+ cmd = ["apt-get", "--assume-yes", "autoremove"]
if purge:
- cmd.append('--purge')
+ cmd.append("--purge")
_run_apt_command(cmd, fatal)
def apt_mark(packages, mark, fatal=False):
"""Flag one or more packages using apt-mark."""
log("Marking {} as {}".format(packages, mark))
- cmd = ['apt-mark', mark]
+ cmd = ["apt-mark", mark]
if isinstance(packages, six.string_types):
cmd.append(packages)
else:
@@ -357,11 +355,11 @@ def apt_mark(packages, mark, fatal=False):
def apt_hold(packages, fatal=False):
- return apt_mark(packages, 'hold', fatal=fatal)
+ return apt_mark(packages, "hold", fatal=fatal)
def apt_unhold(packages, fatal=False):
- return apt_mark(packages, 'unhold', fatal=fatal)
+ return apt_mark(packages, "unhold", fatal=fatal)
def import_key(key):
@@ -382,16 +380,18 @@ def import_key(key):
:raises: GPGKeyError if the key could not be imported
"""
key = key.strip()
- if '-' in key or '\n' in key:
+ if "-" in key or "\n" in key:
# Send everything not obviously a keyid to GPG to import, as
# we trust its validation better than our own. eg. handling
# comments before the key.
log("PGP key found (looks like ASCII Armor format)", level=DEBUG)
- if ('-----BEGIN PGP PUBLIC KEY BLOCK-----' in key and
- '-----END PGP PUBLIC KEY BLOCK-----' in key):
+ if (
+ "-----BEGIN PGP PUBLIC KEY BLOCK-----" in key
+ and "-----END PGP PUBLIC KEY BLOCK-----" in key
+ ):
log("Writing provided PGP key in the binary format", level=DEBUG)
if six.PY3:
- key_bytes = key.encode('utf-8')
+ key_bytes = key.encode("utf-8")
else:
key_bytes = key
key_name = _get_keyid_by_gpg_key(key_bytes)
@@ -401,8 +401,10 @@ def import_key(key):
raise GPGKeyError("ASCII armor markers missing from GPG key")
else:
log("PGP key found (looks like Radix64 format)", level=WARNING)
- log("SECURELY importing PGP key from keyserver; "
- "full key not provided.", level=WARNING)
+ log(
+ "SECURELY importing PGP key from keyserver; " "full key not provided.",
+ level=WARNING,
+ )
# as of bionic add-apt-repository uses curl with an HTTPS keyserver URL
# to retrieve GPG keys. `apt-key adv` command is deprecated as is
# apt-key in general as noted in its manpage. See lp:1433761 for more
@@ -427,17 +429,19 @@ def _get_keyid_by_gpg_key(key_material):
:rtype: str
"""
# Use the same gpg command for both Xenial and Bionic
- cmd = 'gpg --with-colons --with-fingerprint'
- ps = subprocess.Popen(cmd.split(),
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- stdin=subprocess.PIPE)
+ cmd = "gpg --with-colons --with-fingerprint"
+ ps = subprocess.Popen(
+ cmd.split(),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ stdin=subprocess.PIPE,
+ )
out, err = ps.communicate(input=key_material)
if six.PY3:
- out = out.decode('utf-8')
- err = err.decode('utf-8')
- if 'gpg: no valid OpenPGP data found.' in err:
- raise GPGKeyError('Invalid GPG key material provided')
+ out = out.decode("utf-8")
+ err = err.decode("utf-8")
+ if "gpg: no valid OpenPGP data found." in err:
+ raise GPGKeyError("Invalid GPG key material provided")
# from gnupg2 docs: fpr :: Fingerprint (fingerprint is in field 10)
return re.search(r"^fpr:{9}([0-9A-F]{40}):$", out, re.MULTILINE).group(1)
@@ -470,12 +474,13 @@ def _get_key_by_keyid(keyid):
:raises: subprocess.CalledProcessError
"""
# options=mr - machine-readable output (disables html wrappers)
- keyserver_url = ('https://keyserver.ubuntu.com'
- '/pks/lookup?op=get&options=mr&exact=on&search=0x{}')
- curl_cmd = ['curl', keyserver_url.format(keyid)]
+ keyserver_url = (
+ "https://keyserver.ubuntu.com"
+ "/pks/lookup?op=get&options=mr&exact=on&search=0x{}"
+ )
+ curl_cmd = ["curl", keyserver_url.format(keyid)]
# use proxy server settings in order to retrieve the key
- return subprocess.check_output(curl_cmd,
- env=env_proxy_settings(['https']))
+ return subprocess.check_output(curl_cmd, env=env_proxy_settings(["https"]))
def _dearmor_gpg_key(key_asc):
@@ -487,18 +492,22 @@ def _dearmor_gpg_key(key_asc):
:rtype: (str, bytes)
:raises: GPGKeyError
"""
- ps = subprocess.Popen(['gpg', '--dearmor'],
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- stdin=subprocess.PIPE)
+ ps = subprocess.Popen(
+ ["gpg", "--dearmor"],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ stdin=subprocess.PIPE,
+ )
out, err = ps.communicate(input=key_asc)
# no need to decode output as it is binary (invalid utf-8), only error
if six.PY3:
- err = err.decode('utf-8')
- if 'gpg: no valid OpenPGP data found.' in err:
- raise GPGKeyError('Invalid GPG key material. Check your network setup'
- ' (MTU, routing, DNS) and/or proxy server settings'
- ' as well as destination keyserver status.')
+ err = err.decode("utf-8")
+ if "gpg: no valid OpenPGP data found." in err:
+ raise GPGKeyError(
+ "Invalid GPG key material. Check your network setup"
+ " (MTU, routing, DNS) and/or proxy server settings"
+ " as well as destination keyserver status."
+ )
else:
return out
@@ -511,8 +520,7 @@ def _write_apt_gpg_keyfile(key_name, key_material):
:param key_material: A GPG key material (binary)
:type key_material: (str, bytes)
"""
- with open('/etc/apt/trusted.gpg.d/{}.gpg'.format(key_name),
- 'wb') as keyf:
+ with open("/etc/apt/trusted.gpg.d/{}.gpg".format(key_name), "wb") as keyf:
keyf.write(key_material)
@@ -573,18 +581,20 @@ def add_source(source, key=None, fail_invalid=False):
@raises SourceConfigError() if for cloud:<pocket>, the <pocket> is not a
valid pocket in CLOUD_ARCHIVE_POCKETS
"""
- _mapping = OrderedDict([
- (r"^distro$", lambda: None), # This is a NOP
- (r"^(?:proposed|distro-proposed)$", _add_proposed),
- (r"^cloud-archive:(.*)$", _add_apt_repository),
- (r"^((?:deb |http:|https:|ppa:).*)$", _add_apt_repository),
- (r"^cloud:(.*)-(.*)\/staging$", _add_cloud_staging),
- (r"^cloud:(.*)-(.*)$", _add_cloud_distro_check),
- (r"^cloud:(.*)$", _add_cloud_pocket),
- (r"^snap:.*-(.*)-(.*)$", _add_cloud_distro_check),
- ])
+ _mapping = OrderedDict(
+ [
+ (r"^distro$", lambda: None), # This is a NOP
+ (r"^(?:proposed|distro-proposed)$", _add_proposed),
+ (r"^cloud-archive:(.*)$", _add_apt_repository),
+ (r"^((?:deb |http:|https:|ppa:).*)$", _add_apt_repository),
+ (r"^cloud:(.*)-(.*)\/staging$", _add_cloud_staging),
+ (r"^cloud:(.*)-(.*)$", _add_cloud_distro_check),
+ (r"^cloud:(.*)$", _add_cloud_pocket),
+ (r"^snap:.*-(.*)-(.*)$", _add_cloud_distro_check),
+ ]
+ )
if source is None:
- source = ''
+ source = ""
for r, fn in six.iteritems(_mapping):
m = re.match(r, source)
if m:
@@ -619,9 +629,10 @@ def _add_proposed():
release = get_distrib_codename()
arch = platform.machine()
if arch not in six.iterkeys(ARCH_TO_PROPOSED_POCKET):
- raise SourceConfigError("Arch {} not supported for (distro-)proposed"
- .format(arch))
- with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
+ raise SourceConfigError(
+ "Arch {} not supported for (distro-)proposed".format(arch)
+ )
+ with open("/etc/apt/sources.list.d/proposed.list", "w") as apt:
apt.write(ARCH_TO_PROPOSED_POCKET[arch].format(release))
@@ -631,14 +642,15 @@ def _add_apt_repository(spec):
:param spec: the parameter to pass to add_apt_repository
:type spec: str
"""
- if '{series}' in spec:
+ if "{series}" in spec:
series = get_distrib_codename()
- spec = spec.replace('{series}', series)
+ spec = spec.replace("{series}", series)
# software-properties package for bionic properly reacts to proxy settings
# passed as environment variables (See lp:1433761). This is not the case
# LTS and non-LTS releases below bionic.
- _run_with_retries(['add-apt-repository', '--yes', spec],
- cmd_env=env_proxy_settings(['https']))
+ _run_with_retries(
+ ["add-apt-repository", "--yes", spec], cmd_env=env_proxy_settings(["https"])
+ )
def _add_cloud_pocket(pocket):
@@ -653,14 +665,11 @@ def _add_cloud_pocket(pocket):
:raises: SourceConfigError if the cloud pocket doesn't exist or the
requested release doesn't match the current distro version.
"""
- apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
- fatal=True)
+ apt_install(filter_installed_packages(["ubuntu-cloud-keyring"]), fatal=True)
if pocket not in CLOUD_ARCHIVE_POCKETS:
- raise SourceConfigError(
- 'Unsupported cloud: source option %s' %
- pocket)
+ raise SourceConfigError("Unsupported cloud: source option %s" % pocket)
actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
- with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
+ with open("/etc/apt/sources.list.d/cloud-archive.list", "w") as apt:
apt.write(CLOUD_ARCHIVE.format(actual_pocket))
@@ -677,9 +686,9 @@ def _add_cloud_staging(cloud_archive_release, openstack_release):
current version of the os.
"""
_verify_is_ubuntu_rel(cloud_archive_release, openstack_release)
- ppa = 'ppa:ubuntu-cloud-archive/{}-staging'.format(openstack_release)
- cmd = 'add-apt-repository -y {}'.format(ppa)
- _run_with_retries(cmd.split(' '))
+ ppa = "ppa:ubuntu-cloud-archive/{}-staging".format(openstack_release)
+ cmd = "add-apt-repository -y {}".format(ppa)
+ _run_with_retries(cmd.split(" "))
def _add_cloud_distro_check(cloud_archive_release, openstack_release):
@@ -710,12 +719,18 @@ def _verify_is_ubuntu_rel(release, os_release):
ubuntu_rel = get_distrib_codename()
if release != ubuntu_rel:
raise SourceConfigError(
- 'Invalid Cloud Archive release specified: {}-{} on this Ubuntu'
- 'version ({})'.format(release, os_release, ubuntu_rel))
-
-
-def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,),
- retry_message="", cmd_env=None):
+ "Invalid Cloud Archive release specified: {}-{} on this Ubuntu"
+ "version ({})".format(release, os_release, ubuntu_rel)
+ )
+
+
+def _run_with_retries(
+ cmd,
+ max_retries=CMD_RETRY_COUNT,
+ retry_exitcodes=(1,),
+ retry_message="",
+ cmd_env=None,
+):
"""Run a command and retry until success or max_retries is reached.
:param cmd: The apt command to run.
@@ -766,8 +781,10 @@ def _run_apt_command(cmd, fatal=False):
"""
if fatal:
_run_with_retries(
- cmd, retry_exitcodes=(1, APT_NO_LOCK,),
- retry_message="Couldn't acquire DPKG lock")
+ cmd,
+ retry_exitcodes=(1, APT_NO_LOCK),
+ retry_message="Couldn't acquire DPKG lock",
+ )
else:
subprocess.call(cmd, env=get_apt_dpkg_env())
@@ -801,5 +818,7 @@ def get_apt_dpkg_env():
"""
# The fallback is used in the event of ``/etc/environment`` not containing
# avalid PATH variable.
- return {'DEBIAN_FRONTEND': 'noninteractive',
- 'PATH': get_system_env('PATH', '/usr/sbin:/usr/bin:/sbin:/bin')}
+ return {
+ "DEBIAN_FRONTEND": "noninteractive",
+ "PATH": get_system_env("PATH", "/usr/sbin:/usr/bin:/sbin:/bin"),
+ }
diff --git a/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py b/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py
index 929a75d..b80f196 100644
--- a/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py
+++ b/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py
@@ -43,6 +43,7 @@ import sys
class _container(dict):
"""Simple container for attributes."""
+
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
@@ -57,6 +58,7 @@ class Version(_container):
class Cache(object):
"""Simulation of ``apt_pkg`` Cache object."""
+
def __init__(self, progress=None):
pass
@@ -77,15 +79,15 @@ class Cache(object):
:raises: KeyError, subprocess.CalledProcessError
"""
apt_result = self._apt_cache_show([package])[package]
- apt_result['name'] = apt_result.pop('package')
+ apt_result["name"] = apt_result.pop("package")
pkg = Package(apt_result)
dpkg_result = self._dpkg_list([package]).get(package, {})
current_ver = None
- installed_version = dpkg_result.get('version')
+ installed_version = dpkg_result.get("version")
if installed_version:
- current_ver = Version({'ver_str': installed_version})
+ current_ver = Version({"ver_str": installed_version})
pkg.current_ver = current_ver
- pkg.architecture = dpkg_result.get('architecture')
+ pkg.architecture = dpkg_result.get("architecture")
return pkg
def _dpkg_list(self, packages):
@@ -99,17 +101,17 @@ class Cache(object):
:raises: subprocess.CalledProcessError
"""
pkgs = {}
- cmd = ['dpkg-query', '--list']
+ cmd = ["dpkg-query", "--list"]
cmd.extend(packages)
if locale.getlocale() == (None, None):
# subprocess calls out to locale.getpreferredencoding(False) to
# determine encoding. Workaround for Trusty where the
# environment appears to not be set up correctly.
- locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
+ locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
try:
- output = subprocess.check_output(cmd,
- stderr=subprocess.STDOUT,
- universal_newlines=True)
+ output = subprocess.check_output(
+ cmd, stderr=subprocess.STDOUT, universal_newlines=True
+ )
except subprocess.CalledProcessError as cp:
# ``dpkg-query`` may return error and at the same time have
# produced useful output, for example when asked for multiple
@@ -119,22 +121,25 @@ class Cache(object):
output = cp.output
headings = []
for line in output.splitlines():
- if line.startswith('||/'):
+ if line.startswith("||/"):
headings = line.split()
headings.pop(0)
continue
- elif (line.startswith('|') or line.startswith('+') or
- line.startswith('dpkg-query:')):
+ elif (
+ line.startswith("|")
+ or line.startswith("+")
+ or line.startswith("dpkg-query:")
+ ):
continue
else:
data = line.split(None, 4)
status = data.pop(0)
- if status != 'ii':
+ if status != "ii":
continue
pkg = {}
pkg.update({k.lower(): v for k, v in zip(headings, data)})
- if 'name' in pkg:
- pkgs.update({pkg['name']: pkg})
+ if "name" in pkg:
+ pkgs.update({pkg["name"]: pkg})
return pkgs
def _apt_cache_show(self, packages):
@@ -148,33 +153,33 @@ class Cache(object):
:raises: subprocess.CalledProcessError
"""
pkgs = {}
- cmd = ['apt-cache', 'show', '--no-all-versions']
+ cmd = ["apt-cache", "show", "--no-all-versions"]
cmd.extend(packages)
if locale.getlocale() == (None, None):
# subprocess calls out to locale.getpreferredencoding(False) to
# determine encoding. Workaround for Trusty where the
# environment appears to not be set up correctly.
- locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
+ locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
try:
- output = subprocess.check_output(cmd,
- stderr=subprocess.STDOUT,
- universal_newlines=True)
+ output = subprocess.check_output(
+ cmd, stderr=subprocess.STDOUT, universal_newlines=True
+ )
previous = None
pkg = {}
for line in output.splitlines():
if not line:
- if 'package' in pkg:
- pkgs.update({pkg['package']: pkg})
+ if "package" in pkg:
+ pkgs.update({pkg["package"]: pkg})
pkg = {}
continue
- if line.startswith(' '):
+ if line.startswith(" "):
if previous and previous in pkg:
pkg[previous] += os.linesep + line.lstrip()
continue
- if ':' in line:
- kv = line.split(':', 1)
+ if ":" in line:
+ kv = line.split(":", 1)
key = kv[0].lower()
- if key == 'n':
+ if key == "n":
continue
previous = key
pkg.update({key: kv[1].lstrip()})
@@ -192,14 +197,14 @@ class Config(_container):
def _populate(self):
cfgs = {}
- cmd = ['apt-config', 'dump']
- output = subprocess.check_output(cmd,
- stderr=subprocess.STDOUT,
- universal_newlines=True)
+ cmd = ["apt-config", "dump"]
+ output = subprocess.check_output(
+ cmd, stderr=subprocess.STDOUT, universal_newlines=True
+ )
for line in output.splitlines():
if not line.startswith("CommandLine"):
k, v = line.split(" ", 1)
- cfgs[k] = v.strip(";").strip("\"")
+ cfgs[k] = v.strip(";").strip('"')
return cfgs
@@ -225,8 +230,8 @@ def upstream_version(version):
:rtype: str
"""
if version:
- version = version.split(':')[-1]
- version = version.split('-')[0]
+ version = version.split(":")[-1]
+ version = version.split("-")[0]
return version
@@ -250,18 +255,21 @@ def version_compare(a, b):
:rtype: int
:raises: subprocess.CalledProcessError, RuntimeError
"""
- for op in ('gt', 1), ('eq', 0), ('lt', -1):
+ for op in ("gt", 1), ("eq", 0), ("lt", -1):
try:
- subprocess.check_call(['dpkg', '--compare-versions',
- a, op[0], b],
- stderr=subprocess.STDOUT,
- universal_newlines=True)
+ subprocess.check_call(
+ ["dpkg", "--compare-versions", a, op[0], b],
+ stderr=subprocess.STDOUT,
+ universal_newlines=True,
+ )
return op[1]
except subprocess.CalledProcessError as cp:
if cp.returncode == 1:
continue
raise
else:
- raise RuntimeError('Unable to compare "{}" and "{}", according to '
- 'our logic they are neither greater, equal nor '
- 'less than each other.')
+ raise RuntimeError(
+ 'Unable to compare "{}" and "{}", according to '
+ "our logic they are neither greater, equal nor "
+ "less than each other."
+ )
diff --git a/hooks/charmhelpers/osplatform.py b/hooks/charmhelpers/osplatform.py
index 78c81af..4433b60 100644
--- a/hooks/charmhelpers/osplatform.py
+++ b/hooks/charmhelpers/osplatform.py
@@ -11,7 +11,7 @@ def get_platform():
"""
# linux_distribution is deprecated and will be removed in Python 3.7
# Warnings *not* disabled, as we certainly need to fix this.
- if hasattr(platform, 'linux_distribution'):
+ if hasattr(platform, "linux_distribution"):
tuple_platform = platform.linux_distribution()
current_platform = tuple_platform[0]
else:
@@ -29,17 +29,16 @@ def get_platform():
# ElementaryOS fails to run tests locally without this.
return "ubuntu"
else:
- raise RuntimeError("This module is not supported on {}."
- .format(current_platform))
+ raise RuntimeError(
+ "This module is not supported on {}.".format(current_platform)
+ )
def _get_platform_from_fs():
"""Get Platform from /etc/os-release."""
- with open(os.path.join(os.sep, 'etc', 'os-release')) as fin:
+ with open(os.path.join(os.sep, "etc", "os-release")) as fin:
content = dict(
- line.split('=', 1)
- for line in fin.read().splitlines()
- if '=' in line
+ line.split("=", 1) for line in fin.read().splitlines() if "=" in line
)
for k, v in content.items():
content[k] = v.strip('"')
diff --git a/hooks/common.py b/hooks/common.py
index ec34670..cd71ec1 100644
--- a/hooks/common.py
+++ b/hooks/common.py
@@ -16,19 +16,19 @@ from charmhelpers.core.hookenv import (
from pynag import Model
-INPROGRESS_DIR = '/etc/nagios3-inprogress'
-INPROGRESS_CFG = '/etc/nagios3-inprogress/nagios.cfg'
-INPROGRESS_CONF_D = '/etc/nagios3-inprogress/conf.d'
-CHARM_CFG = '/etc/nagios3-inprogress/conf.d/charm.cfg'
-MAIN_NAGIOS_BAK = '/etc/nagios3.bak'
-MAIN_NAGIOS_DIR = '/etc/nagios3'
-MAIN_NAGIOS_CFG = '/etc/nagios3/nagios.cfg'
-PLUGIN_PATH = '/usr/lib/nagios/plugins'
+INPROGRESS_DIR = "/etc/nagios3-inprogress"
+INPROGRESS_CFG = "/etc/nagios3-inprogress/nagios.cfg"
+INPROGRESS_CONF_D = "/etc/nagios3-inprogress/conf.d"
+CHARM_CFG = "/etc/nagios3-inprogress/conf.d/charm.cfg"
+MAIN_NAGIOS_BAK = "/etc/nagios3.bak"
+MAIN_NAGIOS_DIR = "/etc/nagios3"
+MAIN_NAGIOS_CFG = "/etc/nagios3/nagios.cfg"
+PLUGIN_PATH = "/usr/lib/nagios/plugins"
Model.cfg_file = INPROGRESS_CFG
Model.pynag_directory = INPROGRESS_CONF_D
-reduce_RE = re.compile(r'[\W_]')
+reduce_RE = re.compile(r"[\W_]")
def check_ip(n):
@@ -43,14 +43,14 @@ def check_ip(n):
return False
-def get_local_ingress_address(binding='website'):
+def get_local_ingress_address(binding="website"):
# using network-get to retrieve the address details if available.
- log('Getting hostname for binding %s' % binding)
+ log("Getting hostname for binding %s" % binding)
try:
network_info = network_get(binding)
- if network_info is not None and 'ingress-addresses' in network_info:
- log('Using ingress-addresses')
- hostname = network_info['ingress-addresses'][0]
+ if network_info is not None and "ingress-addresses" in network_info:
+ log("Using ingress-addresses")
+ hostname = network_info["ingress-addresses"][0]
log(hostname)
return hostname
except NotImplementedError:
@@ -60,11 +60,11 @@ def get_local_ingress_address(binding='website'):
# Pre 2.3 output
try:
hostname = network_get_primary_address(binding)
- log('Using primary-addresses')
+ log("Using primary-addresses")
except NotImplementedError:
# pre Juju 2.0
- hostname = unit_get('private-address')
- log('Using unit_get private address')
+ hostname = unit_get("private-address")
+ log("Using unit_get private address")
log(hostname)
return hostname
@@ -72,14 +72,14 @@ def get_local_ingress_address(binding='website'):
def get_remote_relation_attr(remote_unit, attr_name, relation_id=None):
args = ["relation-get", attr_name, remote_unit]
if relation_id is not None:
- args.extend(['-r', relation_id])
+ args.extend(["-r", relation_id])
return subprocess.check_output(args).strip()
def get_ip_and_hostname(remote_unit, relation_id=None):
- hostname = get_remote_relation_attr(remote_unit, 'ingress-address', relation_id)
+ hostname = get_remote_relation_attr(remote_unit, "ingress-address", relation_id)
if hostname is None or not len(hostname):
- hostname = get_remote_relation_attr(remote_unit, 'private-address', relation_id)
+ hostname = get_remote_relation_attr(remote_unit, "private-address", relation_id)
if hostname is None or not len(hostname):
log("relation-get failed")
@@ -89,18 +89,18 @@ def get_ip_and_hostname(remote_unit, relation_id=None):
ip_address = hostname
else:
ip_address = socket.getaddrinfo(hostname, None)[0][4][0]
- return (ip_address, remote_unit.replace('/', '-'))
+ return (ip_address, remote_unit.replace("/", "-"))
def refresh_hostgroups(): # noqa:C901
""" Not the most efficient thing but since we're only
parsing what is already on disk here its not too bad """
- hosts = [x['host_name'] for x in Model.Host.objects.all if x['host_name']]
+ hosts = [x["host_name"] for x in Model.Host.objects.all if x["host_name"]]
hgroups = {}
for host in hosts:
try:
- (service, unit_id) = host.rsplit('-', 1)
+ (service, unit_id) = host.rsplit("-", 1)
except ValueError:
continue
if service in hgroups:
@@ -109,8 +109,8 @@ def refresh_hostgroups(): # noqa:C901
hgroups[service] = [host]
# Find existing autogenerated
- auto_hgroups = Model.Hostgroup.objects.filter(notes__contains='#autogenerated#')
- auto_hgroups = [x.get_attribute('hostgroup_name') for x in auto_hgroups]
+ auto_hgroups = Model.Hostgroup.objects.filter(notes__contains="#autogenerated#")
+ auto_hgroups = [x.get_attribute("hostgroup_name") for x in auto_hgroups]
# Delete the ones not in hgroups
to_delete = set(auto_hgroups).difference(set(hgroups.keys()))
@@ -127,10 +127,10 @@ def refresh_hostgroups(): # noqa:C901
except (ValueError, KeyError):
hgroup = Model.Hostgroup()
hgroup.set_filename(CHARM_CFG)
- hgroup.set_attribute('hostgroup_name', hgroup_name)
- hgroup.set_attribute('notes', '#autogenerated#')
+ hgroup.set_attribute("hostgroup_name", hgroup_name)
+ hgroup.set_attribute("notes", "#autogenerated#")
- hgroup.set_attribute('members', ','.join(members))
+ hgroup.set_attribute("members", ",".join(members))
hgroup.save()
@@ -138,15 +138,14 @@ def _make_check_command(args):
args = [str(arg) for arg in args]
# There is some worry of collision, but the uniqueness of the initial
# command should be enough.
- signature = reduce_RE.sub('_', ''.join(
- [os.path.basename(arg) for arg in args]))
+ signature = reduce_RE.sub("_", "".join([os.path.basename(arg) for arg in args]))
Model.Command.objects.reload_cache()
try:
cmd = Model.Command.objects.get_by_shortname(signature)
except (ValueError, KeyError):
cmd = Model.Command()
- cmd.set_attribute('command_name', signature)
- cmd.set_attribute('command_line', ' '.join(args))
+ cmd.set_attribute("command_name", signature)
+ cmd.set_attribute("command_line", " ".join(args))
cmd.save()
return signature
@@ -159,130 +158,131 @@ def _extend_args(args, cmd_args, switch, value):
def customize_http(service, name, extra):
args = []
cmd_args = []
- plugin = os.path.join(PLUGIN_PATH, 'check_http')
- port = extra.get('port', 80)
- path = extra.get('path', '/')
+ plugin = os.path.join(PLUGIN_PATH, "check_http")
+ port = extra.get("port", 80)
+ path = extra.get("path", "/")
args = [port, path]
- cmd_args = [plugin, '-p', '"$ARG1$"', '-u', '"$ARG2$"']
- if 'status' in extra:
- _extend_args(args, cmd_args, '-e', extra['status'])
- if 'host' in extra:
- _extend_args(args, cmd_args, '-H', extra['host'])
- cmd_args.extend(('-I', '$HOSTADDRESS$'))
+ cmd_args = [plugin, "-p", '"$ARG1$"', "-u", '"$ARG2$"']
+ if "status" in extra:
+ _extend_args(args, cmd_args, "-e", extra["status"])
+ if "host" in extra:
+ _extend_args(args, cmd_args, "-H", extra["host"])
+ cmd_args.extend(("-I", "$HOSTADDRESS$"))
else:
- cmd_args.extend(('-H', '$HOSTADDRESS$'))
- check_timeout = config('check_timeout')
+ cmd_args.extend(("-H", "$HOSTADDRESS$"))
+ check_timeout = config("check_timeout")
if check_timeout is not None:
- cmd_args.extend(('-t', check_timeout))
+ cmd_args.extend(("-t", check_timeout))
check_command = _make_check_command(cmd_args)
- cmd = '%s!%s' % (check_command, '!'.join([str(x) for x in args]))
- service.set_attribute('check_command', cmd)
+ cmd = "%s!%s" % (check_command, "!".join([str(x) for x in args]))
+ service.set_attribute("check_command", cmd)
return True
def customize_mysql(service, name, extra):
- plugin = os.path.join(PLUGIN_PATH, 'check_mysql')
+ plugin = os.path.join(PLUGIN_PATH, "check_mysql")
args = []
- cmd_args = [plugin, '-H', '$HOSTADDRESS$']
- if 'user' in extra:
- _extend_args(args, cmd_args, '-u', extra['user'])
- if 'password' in extra:
- _extend_args(args, cmd_args, '-p', extra['password'])
- check_timeout = config('check_timeout')
+ cmd_args = [plugin, "-H", "$HOSTADDRESS$"]
+ if "user" in extra:
+ _extend_args(args, cmd_args, "-u", extra["user"])
+ if "password" in extra:
+ _extend_args(args, cmd_args, "-p", extra["password"])
+ check_timeout = config("check_timeout")
if check_timeout is not None:
- cmd_args.extend(('-t', check_timeout))
+ cmd_args.extend(("-t", check_timeout))
check_command = _make_check_command(cmd_args)
- cmd = '%s!%s' % (check_command, '!'.join([str(x) for x in args]))
- service.set_attribute('check_command', cmd)
+ cmd = "%s!%s" % (check_command, "!".join([str(x) for x in args]))
+ service.set_attribute("check_command", cmd)
return True
def customize_pgsql(service, name, extra):
- plugin = os.path.join(PLUGIN_PATH, 'check_pgsql')
+ plugin = os.path.join(PLUGIN_PATH, "check_pgsql")
args = []
- cmd_args = [plugin, '-H', '$HOSTADDRESS$']
- check_timeout = config('check_timeout')
+ cmd_args = [plugin, "-H", "$HOSTADDRESS$"]
+ check_timeout = config("check_timeout")
if check_timeout is not None:
- cmd_args.extend(('-t', check_timeout))
+ cmd_args.extend(("-t", check_timeout))
check_command = _make_check_command(cmd_args)
- cmd = '%s!%s' % (check_command, '!'.join([str(x) for x in args]))
- service.set_attribute('check_command', cmd)
+ cmd = "%s!%s" % (check_command, "!".join([str(x) for x in args]))
+ service.set_attribute("check_command", cmd)
return True
def customize_nrpe(service, name, extra):
- plugin = os.path.join(PLUGIN_PATH, 'check_nrpe')
+ plugin = os.path.join(PLUGIN_PATH, "check_nrpe")
args = []
- cmd_args = [plugin, '-H', '$HOSTADDRESS$']
- if name in ('mem', 'swap'):
- cmd_args.extend(('-c', 'check_%s' % name))
- elif 'command' in extra:
- cmd_args.extend(('-c', extra['command']))
+ cmd_args = [plugin, "-H", "$HOSTADDRESS$"]
+ if name in ("mem", "swap"):
+ cmd_args.extend(("-c", "check_%s" % name))
+ elif "command" in extra:
+ cmd_args.extend(("-c", extra["command"]))
else:
- cmd_args.extend(('-c', extra))
- check_timeout = config('check_timeout')
+ cmd_args.extend(("-c", extra))
+ check_timeout = config("check_timeout")
if check_timeout is not None:
- cmd_args.extend(('-t', check_timeout))
+ cmd_args.extend(("-t", check_timeout))
check_command = _make_check_command(cmd_args)
- cmd = '%s!%s' % (check_command, '!'.join([str(x) for x in args]))
- service.set_attribute('check_command', cmd)
+ cmd = "%s!%s" % (check_command, "!".join([str(x) for x in args]))
+ service.set_attribute("check_command", cmd)
return True
def customize_rpc(service, name, extra):
""" Customize the check_rpc plugin to check things like nfs."""
- plugin = os.path.join(PLUGIN_PATH, 'check_rpc')
+ plugin = os.path.join(PLUGIN_PATH, "check_rpc")
args = []
# /usr/lib/nagios/plugins/check_rpc -H <host> -C <rpc_command>
- cmd_args = [plugin, '-H', '$HOSTADDRESS$']
- if 'rpc_command' in extra:
- cmd_args.extend(('-C', extra['rpc_command']))
- if 'program_version' in extra:
- cmd_args.extend(('-c', extra['program_version']))
+ cmd_args = [plugin, "-H", "$HOSTADDRESS$"]
+ if "rpc_command" in extra:
+ cmd_args.extend(("-C", extra["rpc_command"]))
+ if "program_version" in extra:
+ cmd_args.extend(("-c", extra["program_version"]))
check_command = _make_check_command(cmd_args)
- cmd = '%s!%s' % (check_command, '!'.join([str(x) for x in args]))
- service.set_attribute('check_command', cmd)
+ cmd = "%s!%s" % (check_command, "!".join([str(x) for x in args]))
+ service.set_attribute("check_command", cmd)
return True
def customize_tcp(service, name, extra):
""" Customize tcp can be used to check things like memcached. """
- plugin = os.path.join(PLUGIN_PATH, 'check_tcp')
+ plugin = os.path.join(PLUGIN_PATH, "check_tcp")
args = []
# /usr/lib/nagios/plugins/check_tcp -H <host> -E
- cmd_args = [plugin, '-H', '$HOSTADDRESS$', '-E']
- if 'port' in extra:
- cmd_args.extend(('-p', extra['port']))
- if 'string' in extra:
- cmd_args.extend(('-s', "'{}'".format(extra['string'])))
- if 'expect' in extra:
- cmd_args.extend(('-e', extra['expect']))
- if 'warning' in extra:
- cmd_args.extend(('-w', extra['warning']))
- if 'critical' in extra:
- cmd_args.extend(('-c', extra['critical']))
- if 'timeout' in extra:
- cmd_args.extend(('-t', extra['timeout']))
- check_timeout = config('check_timeout')
+ cmd_args = [plugin, "-H", "$HOSTADDRESS$", "-E"]
+ if "port" in extra:
+ cmd_args.extend(("-p", extra["port"]))
+ if "string" in extra:
+ cmd_args.extend(("-s", "'{}'".format(extra["string"])))
+ if "expect" in extra:
+ cmd_args.extend(("-e", extra["expect"]))
+ if "warning" in extra:
+ cmd_args.extend(("-w", extra["warning"]))
+ if "critical" in extra:
+ cmd_args.extend(("-c", extra["critical"]))
+ if "timeout" in extra:
+ cmd_args.extend(("-t", extra["timeout"]))
+ check_timeout = config("check_timeout")
if check_timeout is not None:
- cmd_args.extend(('-t', check_timeout))
+ cmd_args.extend(("-t", check_timeout))
check_command = _make_check_command(cmd_args)
- cmd = '%s!%s' % (check_command, '!'.join([str(x) for x in args]))
- service.set_attribute('check_command', cmd)
+ cmd = "%s!%s" % (check_command, "!".join([str(x) for x in args]))
+ service.set_attribute("check_command", cmd)
return True
def customize_service(service, family, name, extra):
""" The monitors.yaml names are mapped to methods that customize services. """
- customs = {'http': customize_http,
- 'mysql': customize_mysql,
- 'nrpe': customize_nrpe,
- 'tcp': customize_tcp,
- 'rpc': customize_rpc,
- 'pgsql': customize_pgsql,
- }
+ customs = {
+ "http": customize_http,
+ "mysql": customize_mysql,
+ "nrpe": customize_nrpe,
+ "tcp": customize_tcp,
+ "rpc": customize_rpc,
+ "pgsql": customize_pgsql,
+ }
if family in customs:
return customs[family](service, name, extra)
return False
@@ -292,14 +292,13 @@ def update_localhost():
""" Update the localhost definition to use the ubuntu icons."""
Model.cfg_file = MAIN_NAGIOS_CFG
- Model.pynag_directory = os.path.join(MAIN_NAGIOS_DIR, 'conf.d')
- hosts = Model.Host.objects.filter(host_name='localhost',
- object_type='host')
+ Model.pynag_directory = os.path.join(MAIN_NAGIOS_DIR, "conf.d")
+ hosts = Model.Host.objects.filter(host_name="localhost", object_type="host")
for host in hosts:
- host.icon_image = 'base/ubuntu.png'
- host.icon_image_alt = 'Ubuntu Linux'
- host.vrml_image = 'ubuntu.png'
- host.statusmap_image = 'base/ubuntu.gd2'
+ host.icon_image = "base/ubuntu.png"
+ host.icon_image_alt = "Ubuntu Linux"
+ host.vrml_image = "ubuntu.png"
+ host.statusmap_image = "base/ubuntu.gd2"
host.save()
@@ -309,13 +308,13 @@ def get_pynag_host(target_id, owner_unit=None, owner_relation=None):
except (ValueError, KeyError):
host = Model.Host()
host.set_filename(CHARM_CFG)
- host.set_attribute('host_name', target_id)
- host.set_attribute('use', 'generic-host')
+ host.set_attribute("host_name", target_id)
+ host.set_attribute("use", "generic-host")
# Adding the ubuntu icon image definitions to the host.
- host.set_attribute('icon_image', 'base/ubuntu.png')
- host.set_attribute('icon_image_alt', 'Ubuntu Linux')
- host.set_attribute('vrml_image', 'ubuntu.png')
- host.set_attribute('statusmap_image', 'base/ubuntu.gd2')
+ host.set_attribute("icon_image", "base/ubuntu.png")
+ host.set_attribute("icon_image_alt", "Ubuntu Linux")
+ host.set_attribute("vrml_image", "ubuntu.png")
+ host.set_attribute("statusmap_image", "base/ubuntu.gd2")
host.save()
host = Model.Host.objects.get_by_shortname(target_id)
apply_host_policy(target_id, owner_unit, owner_relation)
@@ -323,22 +322,23 @@ def get_pynag_host(target_id, owner_unit=None, owner_relation=None):
def get_pynag_service(target_id, service_name):
- services = Model.Service.objects.filter(host_name=target_id,
- service_description=service_name)
+ services = Model.Service.objects.filter(
+ host_name=target_id, service_description=service_name
+ )
if len(services) == 0:
service = Model.Service()
service.set_filename(CHARM_CFG)
- service.set_attribute('service_description', service_name)
- service.set_attribute('host_name', target_id)
- service.set_attribute('use', 'generic-service')
+ service.set_attribute("service_description", service_name)
+ service.set_attribute("host_name", target_id)
+ service.set_attribute("use", "generic-service")
else:
service = services[0]
return service
def apply_host_policy(target_id, owner_unit, owner_relation):
- ssh_service = get_pynag_service(target_id, 'SSH')
- ssh_service.set_attribute('check_command', 'check_ssh')
+ ssh_service = get_pynag_service(target_id, "SSH")
+ ssh_service.set_attribute("check_command", "check_ssh")
ssh_service.save()
@@ -381,5 +381,6 @@ def flush_inprogress_config():
if os.path.exists(MAIN_NAGIOS_DIR):
shutil.move(MAIN_NAGIOS_DIR, MAIN_NAGIOS_BAK)
shutil.move(INPROGRESS_DIR, MAIN_NAGIOS_DIR)
- # now that directory has been changed need to update the config file to reflect the real stuff..
+ # now that directory has been changed need to update the config file
+ # to reflect the real stuff..
_commit_in_config(INPROGRESS_DIR, MAIN_NAGIOS_DIR)
diff --git a/hooks/monitors_relation_changed.py b/hooks/monitors_relation_changed.py
index 5e4f664..404706c 100755
--- a/hooks/monitors_relation_changed.py
+++ b/hooks/monitors_relation_changed.py
@@ -28,7 +28,7 @@ from charmhelpers.core.hookenv import (
related_units,
relation_ids,
log,
- DEBUG
+ DEBUG,
)
from common import (
@@ -37,35 +37,30 @@ from common import (
get_pynag_service,
refresh_hostgroups,
initialize_inprogress_config,
- flush_inprogress_config
+ flush_inprogress_config,
)
-REQUIRED_REL_DATA_KEYS = [
- 'target-address',
- 'monitors',
- 'target-id',
-]
+REQUIRED_REL_DATA_KEYS = ["target-address", "monitors", "target-id"]
def _prepare_relation_data(unit, rid):
relation_data = relation_get(unit=unit, rid=rid)
if not relation_data:
- msg = (
- 'no relation data found for unit {} in relation {} - '
- 'skipping'.format(unit, rid)
+ msg = "no relation data found for unit {} in relation {} - " "skipping".format(
+ unit, rid
)
log(msg, level=DEBUG)
return {}
- if rid.split(':')[0] == 'nagios':
+ if rid.split(":")[0] == "nagios":
# Fake it for the more generic 'nagios' relation
- relation_data['target-id'] = unit.replace('/', '-')
- relation_data['monitors'] = {'monitors': {'remote': {}}}
+ relation_data["target-id"] = unit.replace("/", "-")
+ relation_data["monitors"] = {"monitors": {"remote": {}}}
- if not relation_data.get('target-address'):
- relation_data['target-address'] = ingress_address(unit=unit, rid=rid)
+ if not relation_data.get("target-address"):
+ relation_data["target-address"] = ingress_address(unit=unit, rid=rid)
for key in REQUIRED_REL_DATA_KEYS:
if not relation_data.get(key):
@@ -73,9 +68,8 @@ def _prepare_relation_data(unit, rid):
# the relation at first (e.g. gnocchi). After a few hook runs,
# though, they add the key. For this reason I think using a logging
# level higher than DEBUG could be misleading
- msg = (
- '{} not found for unit {} in relation {} - '
- 'skipping'.format(key, unit, rid)
+ msg = "{} not found for unit {} in relation {} - " "skipping".format(
+ key, unit, rid
)
log(msg, level=DEBUG)
return {}
@@ -85,7 +79,7 @@ def _prepare_relation_data(unit, rid):
def _collect_relation_data():
all_relations = defaultdict(dict)
- for relname in ['nagios', 'monitors']:
+ for relname in ["nagios", "monitors"]:
for relid in relation_ids(relname):
for unit in related_units(relid):
relation_data = _prepare_relation_data(unit=unit, rid=relid)
@@ -101,11 +95,10 @@ def main(argv): # noqa: C901
# context.
#
if len(argv) > 1:
- relation_settings = {'monitors': open(argv[1]).read(),
- 'target-id': argv[2]}
+ relation_settings = {"monitors": open(argv[1]).read(), "target-id": argv[2]}
if len(argv) > 3:
- relation_settings['target-address'] = argv[3]
- all_relations = {'monitors:99': {'testing/0': relation_settings}}
+ relation_settings["target-address"] = argv[3]
+ all_relations = {"monitors:99": {"testing/0": relation_settings}}
else:
all_relations = _collect_relation_data()
@@ -113,12 +106,12 @@ def main(argv): # noqa: C901
targets_with_addresses = set()
for relid, units in all_relations.iteritems():
for unit, relation_settings in units.items():
- if 'target-id' in relation_settings:
- targets_with_addresses.add(relation_settings['target-id'])
+ if "target-id" in relation_settings:
+ targets_with_addresses.add(relation_settings["target-id"])
new_all_relations = {}
for relid, units in all_relations.iteritems():
for unit, relation_settings in units.items():
- if relation_settings['target-id'] in targets_with_addresses:
+ if relation_settings["target-id"] in targets_with_addresses:
if relid not in new_all_relations:
new_all_relations[relid] = {}
new_all_relations[relid][unit] = relation_settings
@@ -129,21 +122,21 @@ def main(argv): # noqa: C901
all_hosts = {}
for relid, units in all_relations.items():
for unit, relation_settings in units.iteritems():
- machine_id = relation_settings.get('machine_id', None)
+ machine_id = relation_settings.get("machine_id", None)
if machine_id:
- all_hosts[machine_id] = relation_settings['target-id']
+ all_hosts[machine_id] = relation_settings["target-id"]
for relid, units in all_relations.items():
apply_relation_config(relid, units, all_hosts)
refresh_hostgroups()
flush_inprogress_config()
- os.system('service nagios3 reload')
+ os.system("service nagios3 reload")
-def apply_relation_config(relid, units, all_hosts): # noqa: C901
+def apply_relation_config(relid, units, all_hosts): # noqa: C901
for unit, relation_settings in units.iteritems():
- monitors = relation_settings['monitors']
- target_id = relation_settings['target-id']
- machine_id = relation_settings.get('machine_id', None)
+ monitors = relation_settings["monitors"]
+ target_id = relation_settings["target-id"]
+ machine_id = relation_settings.get("machine_id", None)
parent_host = None
if machine_id:
container_regex = re.compile(r"(\d+)/lx[cd]/\d+")
@@ -155,7 +148,7 @@ def apply_relation_config(relid, units, all_hosts): # noqa: C901
# If not set, we don't mess with it, as multiple services may feed
# monitors in for a particular address. Generally a primary will set
# this to its own private-address
- target_address = relation_settings.get('target-address', None)
+ target_address = relation_settings.get("target-address", None)
if type(monitors) != dict:
monitors = yaml.safe_load(monitors)
@@ -164,23 +157,24 @@ def apply_relation_config(relid, units, all_hosts): # noqa: C901
host = get_pynag_host(target_id)
if not target_address:
raise Exception("No Target Address provied by NRPE service!")
- host.set_attribute('address', target_address)
+ host.set_attribute("address", target_address)
if parent_host:
# We assume that we only want one parent and will overwrite any
# existing parents for this host.
- host.set_attribute('parents', parent_host)
+ host.set_attribute("parents", parent_host)
host.save()
- for mon_family, mons in monitors['monitors']['remote'].iteritems():
+ for mon_family, mons in monitors["monitors"]["remote"].iteritems():
for mon_name, mon in mons.iteritems():
- service_name = '%s-%s' % (target_id, mon_name)
+ service_name = "%s-%s" % (target_id, mon_name)
service = get_pynag_service(target_id, service_name)
if customize_service(service, mon_family, mon_name, mon):
service.save()
else:
- print('Ignoring %s due to unknown family %s' % (mon_name,
- mon_family))
+ print(
+ "Ignoring %s due to unknown family %s" % (mon_name, mon_family)
+ )
-if __name__ == '__main__':
+if __name__ == "__main__":
main(sys.argv)
diff --git a/hooks/upgrade_charm.py b/hooks/upgrade_charm.py
index 20957ba..5091f9a 100755
--- a/hooks/upgrade_charm.py
+++ b/hooks/upgrade_charm.py
@@ -6,6 +6,7 @@ import base64
from jinja2 import Template
import glob
import os
+
# import re
import pwd
import grp
@@ -22,27 +23,27 @@ from charmhelpers import fetch
from common import update_localhost
# Gather facts
-legacy_relations = hookenv.config('legacy')
-extra_config = hookenv.config('extraconfig')
-enable_livestatus = hookenv.config('enable_livestatus')
-livestatus_path = hookenv.config('livestatus_path')
-enable_pagerduty = hookenv.config('enable_pagerduty')
-pagerduty_key = hookenv.config('pagerduty_key')
-pagerduty_path = hookenv.config('pagerduty_path')
-notification_levels = hookenv.config('pagerduty_notification_levels')
-nagios_user = hookenv.config('nagios_user')
-nagios_group = hookenv.config('nagios_group')
-ssl_config = str(hookenv.config('ssl')).lower()
-charm_dir = os.environ['CHARM_DIR']
-cert_domain = hookenv.unit_get('public-address')
+legacy_relations = hookenv.config("legacy")
+extra_config = hookenv.config("extraconfig")
+enable_livestatus = hookenv.config("enable_livestatus")
+livestatus_path = hookenv.config("livestatus_path")
+enable_pagerduty = hookenv.config("enable_pagerduty")
+pagerduty_key = hookenv.config("pagerduty_key")
+pagerduty_path = hookenv.config("pagerduty_path")
+notification_levels = hookenv.config("pagerduty_notification_levels")
+nagios_user = hookenv.config("nagios_user")
+nagios_group = hookenv.config("nagios_group")
+ssl_config = str(hookenv.config("ssl")).lower()
+charm_dir = os.environ["CHARM_DIR"]
+cert_domain = hookenv.unit_get("public-address")
nagios_cfg = "/etc/nagios3/nagios.cfg"
nagios_cgi_cfg = "/etc/nagios3/cgi.cfg"
pagerduty_cfg = "/etc/nagios3/conf.d/pagerduty_nagios.cfg"
traps_cfg = "/etc/nagios3/conf.d/traps.cfg"
pagerduty_cron = "/etc/cron.d/nagios-pagerduty-flush"
-password = hookenv.config('password')
-ro_password = hookenv.config('ro-password')
-nagiosadmin = hookenv.config('nagiosadmin') or 'nagiosadmin'
+password = hookenv.config("password")
+ro_password = hookenv.config("ro-password")
+nagiosadmin = hookenv.config("nagiosadmin") or "nagiosadmin"
contactgroup_members = hookenv.config("contactgroup-members")
# this global var will collect contactgroup members that must be forced
@@ -60,11 +61,12 @@ def warn_legacy_relations():
in the future
"""
if legacy_relations is not None:
- hookenv.log("Relations have been radically changed."
- " The monitoring interface is not supported anymore.",
- "WARNING")
- hookenv.log("Please use the generic juju-info or the monitors interface",
- "WARNING")
+ hookenv.log(
+ "Relations have been radically changed."
+ " The monitoring interface is not supported anymore.",
+ "WARNING",
+ )
+ hookenv.log("Please use the generic juju-info or the monitors interface", "WARNING")
def parse_extra_contacts(yaml_string):
@@ -76,40 +78,44 @@ def parse_extra_contacts(yaml_string):
extra_contacts = []
# Valid characters for contact names
- valid_name_chars = string.ascii_letters + string.digits + '_-'
+ valid_name_chars = string.ascii_letters + string.digits + "_-"
try:
extra_contacts_raw = yaml.load(yaml_string, Loader=yaml.SafeLoader) or []
if not isinstance(extra_contacts_raw, list):
- raise ValueError('not a list')
+ raise ValueError("not a list")
for contact in extra_contacts_raw:
- if {'name', 'host', 'service'} > set(contact.keys()):
- hookenv.log('Contact {} is missing fields.'.format(contact),
- hookenv.WARNING)
+ if {"name", "host", "service"} > set(contact.keys()):
+ hookenv.log(
+ "Contact {} is missing fields.".format(contact), hookenv.WARNING
+ )
continue
- if set(contact['name']) > set(valid_name_chars):
- hookenv.log('Contact name {} is illegal'.format(contact['name']),
- hookenv.WARNING)
+ if set(contact["name"]) > set(valid_name_chars):
+ hookenv.log(
+ "Contact name {} is illegal".format(contact["name"]),
+ hookenv.WARNING,
+ )
continue
- if '\n' in (contact['host'] + contact['service']):
- hookenv.log('Line breaks not allowed in commands', hookenv.WARNING)
+ if "\n" in (contact["host"] + contact["service"]):
+ hookenv.log("Line breaks not allowed in commands", hookenv.WARNING)
continue
- contact['name'] = contact['name'].lower()
- contact['alias'] = contact['name'].capitalize()
+ contact["name"] = contact["name"].lower()
+ contact["alias"] = contact["name"].capitalize()
extra_contacts.append(contact)
except (ValueError, yaml.error.YAMLError) as e:
- hookenv.log('Invalid "extra_contacts" configuration: {}'.format(e),
- hookenv.WARNING)
+ hookenv.log(
+ 'Invalid "extra_contacts" configuration: {}'.format(e), hookenv.WARNING
+ )
if len(extra_contacts_raw) != len(extra_contacts):
hookenv.log(
- 'Invalid extra_contacts config, found {} contacts defined, '
- 'only {} were valid, check unit logs for '
- 'detailed errors'.format(len(extra_contacts_raw), len(extra_contacts))
+ "Invalid extra_contacts config, found {} contacts defined, "
+ "only {} were valid, check unit logs for "
+ "detailed errors".format(len(extra_contacts_raw), len(extra_contacts))
)
return extra_contacts
@@ -119,12 +125,12 @@ def parse_extra_contacts(yaml_string):
# proper nagios3 configuration file, otherwise remove the config
def write_extra_config():
# Be predjudice about this - remove the file always.
- if host.file_hash('/etc/nagios3/conf.d/extra.cfg') is not None:
- os.remove('/etc/nagios3/conf.d/extra.cfg')
+ if host.file_hash("/etc/nagios3/conf.d/extra.cfg") is not None:
+ os.remove("/etc/nagios3/conf.d/extra.cfg")
# If we have a config, then write it. the hook reconfiguration will
# handle the details
if extra_config is not None:
- host.write_file('/etc/nagios3/conf.d/extra.cfg', extra_config)
+ host.write_file("/etc/nagios3/conf.d/extra.cfg", extra_config)
# Equivalent of mkdir -p, since we can't rely on
@@ -152,7 +158,7 @@ def enable_livestatus_config():
if enable_livestatus:
hookenv.log("Livestatus is enabled")
fetch.apt_update()
- fetch.apt_install('check-mk-livestatus')
+ fetch.apt_install("check-mk-livestatus")
# Make the directory and fix perms on it
hookenv.log("Fixing perms on livestatus_path")
@@ -170,8 +176,10 @@ def enable_livestatus_config():
os.chown(livestatus_dir, uid, gid)
st = os.stat(livestatus_path)
os.chmod(livestatus_path, st.st_mode | stat.S_IRGRP)
- os.chmod(livestatus_dir, st.st_mode | stat.S_IRGRP |
- stat.S_ISGID | stat.S_IXUSR | stat.S_IXGRP)
+ os.chmod(
+ livestatus_dir,
+ st.st_mode | stat.S_IRGRP | stat.S_ISGID | stat.S_IXUSR | stat.S_IXGRP,
+ )
def enable_pagerduty_config():
@@ -180,34 +188,35 @@ def enable_pagerduty_config():
if enable_pagerduty:
hookenv.log("Pagerduty is enabled")
fetch.apt_update()
- fetch.apt_install('libhttp-parser-perl')
+ fetch.apt_install("libhttp-parser-perl")
env = os.environ
- proxy = env.get('JUJU_CHARM_HTTPS_PROXY') or env.get('https_proxy')
- proxy_switch = '--proxy {}'.format(proxy) if proxy else ''
+ proxy = env.get("JUJU_CHARM_HTTPS_PROXY") or env.get("https_proxy")
+ proxy_switch = "--proxy {}".format(proxy) if proxy else ""
# Ship the pagerduty_nagios.cfg file
- template_values = {'pagerduty_key': pagerduty_key,
- 'pagerduty_path': pagerduty_path,
- 'proxy_switch': proxy_switch,
- 'notification_levels': notification_levels}
-
- with open('hooks/templates/pagerduty_nagios_cfg.tmpl', 'r') as f:
+ template_values = {
+ "pagerduty_key": pagerduty_key,
+ "pagerduty_path": pagerduty_path,
+ "proxy_switch": proxy_switch,
+ "notification_levels": notification_levels,
+ }
+
+ with open("hooks/templates/pagerduty_nagios_cfg.tmpl", "r") as f:
templateDef = f.read()
t = Template(templateDef)
- with open(pagerduty_cfg, 'w') as f:
+ with open(pagerduty_cfg, "w") as f:
f.write(t.render(template_values))
- with open('hooks/templates/nagios-pagerduty-flush-cron.tmpl', 'r') as f2:
+ with open("hooks/templates/nagios-pagerduty-flush-cron.tmpl", "r") as f2:
templateDef = f2.read()
t2 = Template(templateDef)
- with open(pagerduty_cron, 'w') as f2:
+ with open(pagerduty_cron, "w") as f2:
f2.write(t2.render(template_values))
# Ship the pagerduty_nagios.pl script
- shutil.copy('files/pagerduty_nagios.pl',
- '/usr/local/bin/pagerduty_nagios.pl')
+ shutil.copy("files/pagerduty_nagios.pl", "/usr/local/bin/pagerduty_nagios.pl")
# Create the pagerduty queue dir
if not os.path.isdir(pagerduty_path):
@@ -228,13 +237,13 @@ def enable_pagerduty_config():
if enable_pagerduty:
# avoid duplicates
if "pagerduty" not in contactgroup_members:
- forced_contactgroup_members.append('pagerduty')
+ forced_contactgroup_members.append("pagerduty")
def enable_traps_config():
global forced_contactgroup_members
- send_traps_to = hookenv.config('send_traps_to')
+ send_traps_to = hookenv.config("send_traps_to")
if not send_traps_to:
if os.path.isfile(traps_cfg):
@@ -245,25 +254,23 @@ def enable_traps_config():
hookenv.log("Send traps feature is enabled, target address is %s" % send_traps_to)
if "managementstation" not in contactgroup_members:
- forced_contactgroup_members.append('managementstation')
+ forced_contactgroup_members.append("managementstation")
- template_values = { 'send_traps_to': send_traps_to }
+ template_values = {"send_traps_to": send_traps_to}
- with open('hooks/templates/traps.tmpl','r') as f:
+ with open("hooks/templates/traps.tmpl", "r") as f:
templateDef = f.read()
t = Template(templateDef)
- with open(traps_cfg, 'w') as f:
+ with open(traps_cfg, "w") as f:
f.write(t.render(template_values))
def update_contacts():
# Multiple Email Contacts
- admin_members = ''
+ admin_members = ""
contacts = []
- admin_email = list(
- filter(None, set(hookenv.config('admin_email').split(',')))
- )
+ admin_email = list(filter(None, set(hookenv.config("admin_email").split(","))))
if len(admin_email) == 0:
hookenv.log("admin_email is unset, this isn't valid config")
hookenv.status_set("blocked", "admin_email is not configured")
@@ -271,26 +278,18 @@ def update_contacts():
hookenv.status_set("active", "ready")
if len(admin_email) == 1:
hookenv.log("Setting one admin email address '%s'" % admin_email[0])
- contacts = [{
- 'contact_name': 'root',
- 'alias': 'Root',
- 'email': admin_email[0]
- }]
+ contacts = [{"contact_name": "root", "alias": "Root", "email": admin_email[0]}]
elif len(admin_email) > 1:
hookenv.log("Setting %d admin email addresses" % len(admin_email))
contacts = []
for email in admin_email:
- contact_name = email.replace('@', '').replace('.','').lower()
+ contact_name = email.replace("@", "").replace(".", "").lower()
contact_alias = contact_name.capitalize()
- contacts.append({
- 'contact_name': contact_name,
- 'alias': contact_alias,
- 'email': email
- })
+ contacts.append(
+ {"contact_name": contact_name, "alias": contact_alias, "email": email}
+ )
- admin_members = ', '.join([
- c['contact_name'] for c in contacts
- ])
+ admin_members = ", ".join([c["contact_name"] for c in contacts])
resulting_members = contactgroup_members
if admin_members:
@@ -301,26 +300,40 @@ def update_contacts():
resulting_members = ",".join([resulting_members] + forced_contactgroup_members)
# Parse extra_contacts
- extra_contacts = parse_extra_contacts(hookenv.config('extra_contacts'))
-
- template_values = {'admin_service_notification_period': hookenv.config('admin_service_notification_period'),
- 'admin_host_notification_period': hookenv.config('admin_host_notification_period'),
- 'admin_service_notification_options': hookenv.config('admin_service_notification_options'),
- 'admin_host_notification_options': hookenv.config('admin_host_notification_options'),
- 'admin_service_notification_commands': hookenv.config('admin_service_notification_commands'),
- 'admin_host_notification_commands': hookenv.config('admin_host_notification_commands'),
- 'contacts': contacts,
- 'contactgroup_members': resulting_members,
- 'extra_contacts': extra_contacts}
-
- with open('hooks/templates/contacts-cfg.tmpl', 'r') as f:
+ extra_contacts = parse_extra_contacts(hookenv.config("extra_contacts"))
+
+ template_values = {
+ "admin_service_notification_period": hookenv.config(
+ "admin_service_notification_period"
+ ),
+ "admin_host_notification_period": hookenv.config(
+ "admin_host_notification_period"
+ ),
+ "admin_service_notification_options": hookenv.config(
+ "admin_service_notification_options"
+ ),
+ "admin_host_notification_options": hookenv.config(
+ "admin_host_notification_options"
+ ),
+ "admin_service_notification_commands": hookenv.config(
+ "admin_service_notification_commands"
+ ),
+ "admin_host_notification_commands": hookenv.config(
+ "admin_host_notification_commands"
+ ),
+ "contacts": contacts,
+ "contactgroup_members": resulting_members,
+ "extra_contacts": extra_contacts,
+ }
+
+ with open("hooks/templates/contacts-cfg.tmpl", "r") as f:
template_def = f.read()
t = Template(template_def)
- with open('/etc/nagios3/conf.d/contacts_nagios2.cfg', 'w') as f:
+ with open("/etc/nagios3/conf.d/contacts_nagios2.cfg", "w") as f:
f.write(t.render(template_values))
- host.service_reload('nagios3')
+ host.service_reload("nagios3")
def ssl_configured():
@@ -331,13 +344,13 @@ def ssl_configured():
# Gather local facts for SSL deployment
-deploy_key_path = os.path.join(charm_dir, 'data', '%s.key' % (cert_domain))
-deploy_cert_path = os.path.join(charm_dir, 'data', '%s.crt' % (cert_domain))
-deploy_csr_path = os.path.join(charm_dir, 'data', '%s.csr' % (cert_domain))
+deploy_key_path = os.path.join(charm_dir, "data", "%s.key" % (cert_domain))
+deploy_cert_path = os.path.join(charm_dir, "data", "%s.crt" % (cert_domain))
+deploy_csr_path = os.path.join(charm_dir, "data", "%s.csr" % (cert_domain))
# set basename for SSL key locations
-cert_file = '/etc/ssl/certs/%s.pem' % (cert_domain)
-key_file = '/etc/ssl/private/%s.key' % (cert_domain)
-chain_file = '/etc/ssl/certs/%s.csr' % (cert_domain)
+cert_file = "/etc/ssl/certs/%s.pem" % (cert_domain)
+key_file = "/etc/ssl/private/%s.key" % (cert_domain)
+chain_file = "/etc/ssl/certs/%s.csr" % (cert_domain)
# Check for key and certificate, since the CSR is optional
@@ -353,23 +366,23 @@ def check_ssl_files():
# Decode the SSL keys from their base64 encoded values in the configuration
def decode_ssl_keys():
- if hookenv.config('ssl_key'):
+ if hookenv.config("ssl_key"):
hookenv.log("Writing key from config ssl_key: %s" % key_file)
- with open(key_file, 'w') as f:
- f.write(str(base64.b64decode(hookenv.config('ssl_key'))))
- if hookenv.config('ssl_cert'):
- with open(cert_file, 'w') as f:
- f.write(str(base64.b64decode(hookenv.config('ssl_cert'))))
- if hookenv.config('ssl_chain'):
- with open(chain_file, 'w') as f:
- f.write(str(base64.b64decode(hookenv.config('ssl_cert'))))
+ with open(key_file, "w") as f:
+ f.write(str(base64.b64decode(hookenv.config("ssl_key"))))
+ if hookenv.config("ssl_cert"):
+ with open(cert_file, "w") as f:
+ f.write(str(base64.b64decode(hookenv.config("ssl_cert"))))
+ if hookenv.config("ssl_chain"):
+ with open(chain_file, "w") as f:
+ f.write(str(base64.b64decode(hookenv.config("ssl_cert"))))
def enable_ssl():
# Set the basename of all ssl files
# Validate that we have configs, and generate a self signed certificate.
- if not hookenv.config('ssl_cert'):
+ if not hookenv.config("ssl_cert"):
# bail if keys already exist
if os.path.exists(cert_file):
hookenv.log("Keys exist, not creating keys!", "WARNING")
@@ -388,69 +401,69 @@ def nagios_bool(value):
def update_config():
- host_context = hookenv.config('nagios_host_context')
- local_host_name = 'nagios'
+ host_context = hookenv.config("nagios_host_context")
+ local_host_name = "nagios"
principal_unitname = hookenv.principal_unit()
# Fallback to using "primary" if it exists.
if principal_unitname:
local_host_name = principal_unitname
else:
- local_host_name = hookenv.local_unit().replace('/', '-')
- template_values = {'nagios_user': nagios_user,
- 'nagios_group': nagios_group,
- 'enable_livestatus': enable_livestatus,
- 'livestatus_path': livestatus_path,
- 'livestatus_args': hookenv.config('livestatus_args'),
- 'check_external_commands': hookenv.config('check_external_commands'),
- 'command_check_interval': hookenv.config('command_check_interval'),
- 'command_file': hookenv.config('command_file'),
- 'debug_file': hookenv.config('debug_file'),
- 'debug_verbosity': hookenv.config('debug_verbosity'),
- 'debug_level': hookenv.config('debug_level'),
- 'daemon_dumps_core': hookenv.config('daemon_dumps_core'),
- 'flap_detection': nagios_bool(hookenv.config('flap_detection')),
- 'admin_email': hookenv.config('admin_email'),
- 'admin_pager': hookenv.config('admin_pager'),
- 'log_rotation_method': hookenv.config('log_rotation_method'),
- 'log_archive_path': hookenv.config('log_archive_path'),
- 'use_syslog': hookenv.config('use_syslog'),
- 'monitor_self': hookenv.config('monitor_self'),
- 'nagios_hostname': "{}-{}".format(host_context, local_host_name),
- 'load_monitor': hookenv.config('load_monitor'),
- 'is_container': host.is_container(),
- 'service_check_timeout': hookenv.config('service_check_timeout'),
- 'service_check_timeout_state': hookenv.config('service_check_timeout_state'),
- }
-
- with open('hooks/templates/nagios-cfg.tmpl', 'r') as f:
+ local_host_name = hookenv.local_unit().replace("/", "-")
+ template_values = {
+ "nagios_user": nagios_user,
+ "nagios_group": nagios_group,
+ "enable_livestatus": enable_livestatus,
+ "livestatus_path": livestatus_path,
+ "livestatus_args": hookenv.config("livestatus_args"),
+ "check_external_commands": hookenv.config("check_external_commands"),
+ "command_check_interval": hookenv.config("command_check_interval"),
+ "command_file": hookenv.config("command_file"),
+ "debug_file": hookenv.config("debug_file"),
+ "debug_verbosity": hookenv.config("debug_verbosity"),
+ "debug_level": hookenv.config("debug_level"),
+ "daemon_dumps_core": hookenv.config("daemon_dumps_core"),
+ "flap_detection": nagios_bool(hookenv.config("flap_detection")),
+ "admin_email": hookenv.config("admin_email"),
+ "admin_pager": hookenv.config("admin_pager"),
+ "log_rotation_method": hookenv.config("log_rotation_method"),
+ "log_archive_path": hookenv.config("log_archive_path"),
+ "use_syslog": hookenv.config("use_syslog"),
+ "monitor_self": hookenv.config("monitor_self"),
+ "nagios_hostname": "{}-{}".format(host_context, local_host_name),
+ "load_monitor": hookenv.config("load_monitor"),
+ "is_container": host.is_container(),
+ "service_check_timeout": hookenv.config("service_check_timeout"),
+ "service_check_timeout_state": hookenv.config("service_check_timeout_state"),
+ }
+
+ with open("hooks/templates/nagios-cfg.tmpl", "r") as f:
templateDef = f.read()
t = Template(templateDef)
- with open(nagios_cfg, 'w') as f:
+ with open(nagios_cfg, "w") as f:
f.write(t.render(template_values))
- with open('hooks/templates/localhost_nagios2.cfg.tmpl', 'r') as f:
+ with open("hooks/templates/localhost_nagios2.cfg.tmpl", "r") as f:
templateDef = f.read()
t = Template(templateDef)
- with open('/etc/nagios3/conf.d/localhost_nagios2.cfg', 'w') as f:
+ with open("/etc/nagios3/conf.d/localhost_nagios2.cfg", "w") as f:
f.write(t.render(template_values))
- host.service_reload('nagios3')
+ host.service_reload("nagios3")
def update_cgi_config():
- template_values = {'nagiosadmin': nagiosadmin,
- 'ro_password': ro_password}
- with open('hooks/templates/nagios-cgi.tmpl', 'r') as f:
+ template_values = {"nagiosadmin": nagiosadmin, "ro_password": ro_password}
+ with open("hooks/templates/nagios-cgi.tmpl", "r") as f:
templateDef = f.read()
t = Template(templateDef)
- with open(nagios_cgi_cfg, 'w') as f:
+ with open(nagios_cgi_cfg, "w") as f:
f.write(t.render(template_values))
- host.service_reload('nagios3')
- host.service_reload('apache2')
+ host.service_reload("nagios3")
+ host.service_reload("apache2")
# Nagios3 is deployed as a global apache application from the archive.
@@ -467,33 +480,35 @@ def update_apache():
# Start by Setting the ports.conf
- with open('hooks/templates/ports-cfg.jinja2', 'r') as f:
+ with open("hooks/templates/ports-cfg.jinja2", "r") as f:
template_def = f.read()
t = Template(template_def)
- ports_conf = '/etc/apache2/ports.conf'
+ ports_conf = "/etc/apache2/ports.conf"
- with open(ports_conf, 'w') as f:
- f.write(t.render({'enable_http': HTTP_ENABLED}))
+ with open(ports_conf, "w") as f:
+ f.write(t.render({"enable_http": HTTP_ENABLED}))
# Next setup the default-ssl.conf
if os.path.exists(chain_file) and os.path.getsize(chain_file) > 0:
ssl_chain = chain_file
else:
ssl_chain = None
- template_values = {'ssl_key': key_file,
- 'ssl_cert': cert_file,
- 'ssl_chain': ssl_chain}
- with open('hooks/templates/default-ssl.tmpl', 'r') as f:
+ template_values = {
+ "ssl_key": key_file,
+ "ssl_cert": cert_file,
+ "ssl_chain": ssl_chain,
+ }
+ with open("hooks/templates/default-ssl.tmpl", "r") as f:
template_def = f.read()
t = Template(template_def)
- ssl_conf = '/etc/apache2/sites-available/default-ssl.conf'
- with open(ssl_conf, 'w') as f:
+ ssl_conf = "/etc/apache2/sites-available/default-ssl.conf"
+ with open(ssl_conf, "w") as f:
f.write(t.render(template_values))
# Create directory for extra *.include files installed by subordinates
try:
- os.makedirs('/etc/apache2/vhost.d/')
+ os.makedirs("/etc/apache2/vhost.d/")
except OSError:
pass
@@ -501,20 +516,20 @@ def update_apache():
sites = glob.glob("/etc/apache2/sites-available/*.conf")
non_ssl = set(sites) - {ssl_conf}
for each in non_ssl:
- site = os.path.basename(each).rsplit('.', 1)[0]
+ site = os.path.basename(each).rsplit(".", 1)[0]
Apache2Site(site).action(enabled=HTTP_ENABLED)
# Configure the behavior of https site
Apache2Site("default-ssl").action(enabled=SSL_CONFIGURED)
# Finally, restart apache2
- host.service_reload('apache2')
+ host.service_reload("apache2")
class Apache2Site:
def __init__(self, site):
self.site = site
- self.is_ssl = 'ssl' in site.lower()
+ self.is_ssl = "ssl" in site.lower()
self.port = 443 if self.is_ssl else 80
def action(self, enabled):
@@ -524,36 +539,40 @@ class Apache2Site:
try:
subprocess.check_output(args, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
- hookenv.log("Apache2Site: `{}`, returned {}, stdout:\n{}"
- .format(e.cmd, e.returncode, e.output), "ERROR")
+ hookenv.log(
+ "Apache2Site: `{}`, returned {}, stdout:\n{}".format(
+ e.cmd, e.returncode, e.output
+ ),
+ "ERROR",
+ )
def _enable(self):
hookenv.log("Apache2Site: Enabling %s..." % self.site, "INFO")
- self._call(['a2ensite', self.site])
+ self._call(["a2ensite", self.site])
if self.port == 443:
- self._call(['a2enmod', 'ssl'])
+ self._call(["a2enmod", "ssl"])
hookenv.open_port(self.port)
def _disable(self):
hookenv.log("Apache2Site: Disabling %s..." % self.site, "INFO")
- self._call(['a2dissite', self.site])
+ self._call(["a2dissite", self.site])
hookenv.close_port(self.port)
def update_password(account, password):
"""Update the charm and Apache's record of the password for the supplied account."""
- account_file = ''.join(['/var/lib/juju/nagios.', account, '.passwd'])
+ account_file = "".join(["/var/lib/juju/nagios.", account, ".passwd"])
if password:
- with open(account_file, 'w') as f:
+ with open(account_file, "w") as f:
f.write(password)
os.fchmod(f.fileno(), 0o0400)
- subprocess.call(['htpasswd', '-b', '/etc/nagios3/htpasswd.users',
- account, password])
+ subprocess.call(
+ ["htpasswd", "-b", "/etc/nagios3/htpasswd.users", account, password]
+ )
else:
""" password was empty, it has been removed. We should delete the account """
os.path.isfile(account_file) and os.remove(account_file)
- subprocess.call(['htpasswd', '-D', '/etc/nagios3/htpasswd.users',
- account])
+ subprocess.call(["htpasswd", "-D", "/etc/nagios3/htpasswd.users", account])
warn_legacy_relations()
@@ -571,12 +590,12 @@ update_apache()
update_localhost()
update_cgi_config()
update_contacts()
-update_password('nagiosro', ro_password)
+update_password("nagiosro", ro_password)
if password:
update_password(nagiosadmin, password)
-if nagiosadmin != 'nagiosadmin':
- update_password('nagiosadmin', False)
+if nagiosadmin != "nagiosadmin":
+ update_password("nagiosadmin", False)
-subprocess.call(['scripts/postfix_loopback_only.sh'])
-subprocess.call(['hooks/mymonitors-relation-joined'])
-subprocess.call(['hooks/monitors-relation-changed'])
+subprocess.call(["scripts/postfix_loopback_only.sh"])
+subprocess.call(["hooks/mymonitors-relation-joined"])
+subprocess.call(["hooks/monitors-relation-changed"])
diff --git a/hooks/website_relation_joined.py b/hooks/website_relation_joined.py
index 984ae80..6f2f992 100755
--- a/hooks/website_relation_joined.py
+++ b/hooks/website_relation_joined.py
@@ -17,23 +17,19 @@
import common
-from charmhelpers.core.hookenv import (
- config,
- log,
- relation_set,
-)
+from charmhelpers.core.hookenv import config, log, relation_set
def main():
- relation_data = {'hostname': common.get_local_ingress_address()}
- sslcfg = config()['ssl']
- if sslcfg == 'only':
- relation_data['port'] = 443
+ relation_data = {"hostname": common.get_local_ingress_address()}
+ sslcfg = config()["ssl"]
+ if sslcfg == "only":
+ relation_data["port"] = 443
else:
- relation_data['port'] = 80
- log('website-relation-joined data %s' % relation_data)
+ relation_data["port"] = 80
+ log("website-relation-joined data %s" % relation_data)
relation_set(None, **relation_data)
-if __name__ == '__main__': # pragma: no cover
+if __name__ == "__main__": # pragma: no cover
main()
diff --git a/tests/functional/conftest.py b/tests/functional/conftest.py
index 484e3a4..23a9fb4 100644
--- a/tests/functional/conftest.py
+++ b/tests/functional/conftest.py
@@ -15,7 +15,7 @@ import pytest
STAT_FILE = "python3 -c \"import json; import os; s=os.stat('%s'); print(json.dumps({'uid': s.st_uid, 'gid': s.st_gid, 'mode': oct(s.st_mode), 'size': s.st_size}))\"" # noqa: E501
-@pytest.yield_fixture(scope='session')
+@pytest.yield_fixture(scope="session")
def event_loop(request):
"""Override the default pytest event loop to allow for broaded scopedv fixtures."""
loop = asyncio.get_event_loop_policy().new_event_loop()
@@ -26,7 +26,7 @@ def event_loop(request):
asyncio.set_event_loop(None)
-@pytest.fixture(scope='session')
+@pytest.fixture(scope="session")
async def controller():
"""Connect to the current controller."""
controller = Controller()
@@ -35,21 +35,21 @@ async def controller():
await controller.disconnect()
-@pytest.fixture(scope='session')
+@pytest.fixture(scope="session")
async def model(controller):
"""Create a model that lives only for the duration of the test."""
model_name = "functest-{}".format(uuid.uuid4())
model = await controller.add_model(model_name)
yield model
await model.disconnect()
- if os.getenv('PYTEST_KEEP_MODEL'):
+ if os.getenv("PYTEST_KEEP_MODEL"):
return
await controller.destroy_model(model_name)
while model_name in await controller.list_models():
await asyncio.sleep(1)
-@pytest.fixture(scope='session')
+@pytest.fixture(scope="session")
async def current_model():
"""Return the current model, does not create or destroy it."""
model = Model()
@@ -61,29 +61,34 @@ async def current_model():
@pytest.fixture
async def get_app(model):
"""Return the application requested."""
+
async def _get_app(name):
try:
return model.applications[name]
except KeyError:
raise JujuError("Cannot find application {}".format(name))
+
return _get_app
@pytest.fixture
async def get_unit(model):
"""Return the requested <app_name>/<unit_number> unit."""
+
async def _get_unit(name):
try:
- (app_name, unit_number) = name.split('/')
+ (app_name, unit_number) = name.split("/")
return model.applications[app_name].units[unit_number]
except (KeyError, ValueError):
raise JujuError("Cannot find unit {}".format(name))
+
return _get_unit
@pytest.fixture
async def get_entity(model, get_unit, get_app):
"""Return a unit or an application."""
+
async def _get_entity(name):
try:
return await get_unit(name)
@@ -92,12 +97,14 @@ async def get_entity(model, get_unit, get_app):
return await get_app(name)
except JujuError:
raise JujuError("Cannot find entity {}".format(name))
+
return _get_entity
@pytest.fixture
async def run_command(get_unit):
"""Run a command on a unit."""
+
async def _run_command(cmd, target):
"""
Run a command on a unit.
@@ -105,13 +112,10 @@ async def run_command(get_unit):
:param cmd: Command to be run
:param target: Unit object or unit name string
"""
- unit = (
- target
- if type(target) is juju.unit.Unit
- else await get_unit(target)
- )
+ unit = target if type(target) is juju.unit.Unit else await get_unit(target)
action = await unit.run(cmd)
return action.results
+
return _run_command
@@ -123,31 +127,36 @@ async def file_stat(run_command):
:param path: File path
:param target: Unit object or unit name string
"""
+
async def _file_stat(path, target):
cmd = STAT_FILE % path
results = await run_command(cmd, target)
- return json.loads(results['Stdout'])
+ return json.loads(results["Stdout"])
+
return _file_stat
@pytest.fixture
async def file_contents(run_command):
"""Return the contents of a file."""
+
async def _file_contents(path, target):
"""Return the contents of a file.
:param path: File path
:param target: Unit object or unit name string
"""
- cmd = 'cat {}'.format(path)
+ cmd = "cat {}".format(path)
results = await run_command(cmd, target)
- return results['Stdout']
+ return results["Stdout"]
+
return _file_contents
@pytest.fixture
async def reconfigure_app(get_app, model):
"""Apply a different config to the requested app."""
+
async def _reconfigure_app(cfg, target):
application = (
target
@@ -156,16 +165,19 @@ async def reconfigure_app(get_app, model):
)
await application.set_config(cfg)
await application.get_config()
- await model.block_until(lambda: application.status == 'active')
+ await model.block_until(lambda: application.status == "active")
+
return _reconfigure_app
@pytest.fixture
async def create_group(run_command):
"""Create the UNIX group specified."""
+
async def _create_group(group_name, target):
cmd = "sudo groupadd %s" % group_name
await run_command(cmd, target)
+
return _create_group
@@ -173,30 +185,24 @@ pytestmark = pytest.mark.asyncio
CHARM_BUILD_DIR = os.getenv("CHARM_BUILD_DIR", "..").rstrip("/")
-SERIES = [
- "trusty",
- "xenial",
- "bionic",
-]
+SERIES = ["trusty", "xenial", "bionic"]
############
# FIXTURES #
############
-@pytest.fixture(scope='session', params=SERIES)
+@pytest.fixture(scope="session", params=SERIES)
def series(request):
"""Return ubuntu version (i.e. xenial) in use in the test."""
return request.param
-@pytest.fixture(scope='session')
+@pytest.fixture(scope="session")
async def relatives(model, series):
nrpe = "nrpe"
nrpe_name = "nrpe-{}".format(series)
nrpe_app = await model.deploy(
- 'cs:' + nrpe, application_name=nrpe_name,
- series=series, config={},
- num_units=0
+ "cs:" + nrpe, application_name=nrpe_name, series=series, config={}, num_units=0
)
mysql = "mysql"
@@ -205,50 +211,52 @@ async def relatives(model, series):
mysql_name = "mysql-{}".format(series)
mysql_app = await model.deploy(
- 'cs:' + mysql, application_name=mysql_name,
- series=series, config={}
+ "cs:" + mysql, application_name=mysql_name, series=series, config={}
)
- await model.add_relation('{}:nrpe-external-master'.format(mysql_name),
- '{}:nrpe-external-master'.format(nrpe_name))
+ await model.add_relation(
+ "{}:nrpe-external-master".format(mysql_name),
+ "{}:nrpe-external-master".format(nrpe_name),
+ )
await model.block_until(
- lambda: mysql_app.units[0].workload_status == "active" and
- mysql_app.units[0].agent_status == "idle"
+ lambda: mysql_app.units[0].workload_status == "active"
+ and mysql_app.units[0].agent_status == "idle"
)
yield {
"mysql": {"name": mysql_name, "app": mysql_app},
- "nrpe": {"name": nrpe_name, "app": nrpe_app}
+ "nrpe": {"name": nrpe_name, "app": nrpe_app},
}
-@pytest.fixture(scope='session')
+@pytest.fixture(scope="session")
async def deploy_app(relatives, model, series):
"""Return application of the charm under test."""
app_name = "nagios-{}".format(series)
"""Deploy the nagios app."""
nagios_app = await model.deploy(
- os.path.join(CHARM_BUILD_DIR, 'nagios'),
+ os.path.join(CHARM_BUILD_DIR, "nagios"),
application_name=app_name,
series=series,
config={
- 'enable_livestatus': False,
- 'ssl': 'off',
- 'extraconfig': '',
- 'enable_pagerduty': False
- }
+ "enable_livestatus": False,
+ "ssl": "off",
+ "extraconfig": "",
+ "enable_pagerduty": False,
+ },
)
- await model.add_relation('{}:monitors'.format(relatives["nrpe"]["name"]),
- '{}:monitors'.format(app_name))
+ await model.add_relation(
+ "{}:monitors".format(relatives["nrpe"]["name"]), "{}:monitors".format(app_name)
+ )
await model.block_until(
- lambda: nagios_app.units[0].agent_status == "idle" and
- relatives["mysql"]["app"].units[0].agent_status == "idle"
+ lambda: nagios_app.units[0].agent_status == "idle"
+ and relatives["mysql"]["app"].units[0].agent_status == "idle"
)
yield nagios_app
- if os.getenv('PYTEST_KEEP_MODEL'):
+ if os.getenv("PYTEST_KEEP_MODEL"):
return
for relative in list(relatives.values()):
@@ -270,8 +278,9 @@ class Agent:
async def block_until_or_timeout(self, lambda_f, **kwargs):
await self.block_until(lambda_f, ignore_timeout=True, **kwargs)
- async def block_until(self, lambda_f, timeout=120, wait_period=5,
- ignore_timeout=False):
+ async def block_until(
+ self, lambda_f, timeout=120, wait_period=5, ignore_timeout=False
+ ):
try:
await self.model.block_until(
lambda_f, timeout=timeout, wait_period=wait_period
@@ -285,7 +294,7 @@ class Agent:
async def unit(model, deploy_app):
"""Return the unit we've deployed."""
unit = Agent(deploy_app.units[0], deploy_app)
- await unit.block_until(lambda: unit.is_active('idle'))
+ await unit.block_until(lambda: unit.is_active("idle"))
return unit
@@ -293,4 +302,4 @@ async def unit(model, deploy_app):
async def auth(file_contents, unit):
"""Return the basic auth credentials."""
nagiospwd = await file_contents("/var/lib/juju/nagios.passwd", unit.u)
- return 'nagiosadmin', nagiospwd.strip()
+ return "nagiosadmin", nagiospwd.strip()
diff --git a/tests/functional/test_config.py b/tests/functional/test_config.py
index b64f8a6..bee502c 100644
--- a/tests/functional/test_config.py
+++ b/tests/functional/test_config.py
@@ -1,25 +1,22 @@
from async_generator import asynccontextmanager
import pytest
import requests
+
pytestmark = pytest.mark.asyncio
@asynccontextmanager
async def config(unit, item, test_value, post_test):
await unit.application.set_config({item: test_value})
- await unit.block_until_or_timeout(
- lambda: unit.is_active('executing'), timeout=5,
- )
- await unit.block_until(lambda: unit.is_active('idle'))
+ await unit.block_until_or_timeout(lambda: unit.is_active("executing"), timeout=5)
+ await unit.block_until(lambda: unit.is_active("idle"))
yield test_value
await unit.application.set_config({item: post_test})
- await unit.block_until_or_timeout(
- lambda: unit.is_active('executing'), timeout=5,
- )
- await unit.block_until(lambda: unit.is_active('idle'))
+ await unit.block_until_or_timeout(lambda: unit.is_active("executing"), timeout=5)
+ await unit.block_until(lambda: unit.is_active("idle"))
-@pytest.fixture(params=['on', 'only'])
+@pytest.fixture(params=["on", "only"])
async def ssl(unit, request):
"""
Enable SSL before a test, then disable after test
@@ -27,7 +24,7 @@ async def ssl(unit, request):
:param Agent unit: unit from the fixture
:param request: test parameters
"""
- async with config(unit, 'ssl', request.param, 'off') as value:
+ async with config(unit, "ssl", request.param, "off") as value:
yield value
@@ -58,7 +55,7 @@ async def livestatus_path(unit):
"""
async with config(unit, "enable_livestatus", "true", "false"):
app_config = await unit.application.get_config()
- yield app_config['livestatus_path']['value']
+ yield app_config["livestatus_path"]["value"]
@pytest.fixture()
@@ -70,20 +67,24 @@ async def enable_pagerduty(unit):
"""
async with config(unit, "enable_pagerduty", "true", "false"):
app_config = await unit.application.get_config()
- yield app_config['pagerduty_path']['value']
+ yield app_config["pagerduty_path"]["value"]
+
@pytest.fixture
async def set_extra_contacts(unit):
"""Set extra contacts."""
name = "contact_name_1"
- extra_contacts = '''
+ extra_contacts = """
- name: {}
host: /custom/command/for/host $HOSTNAME$
service: /custom/command/for/service $SERVICENAME$
- '''.format(name)
+ """.format(
+ name
+ )
async with config(unit, "extra_contacts", extra_contacts, ""):
yield name
+
@pytest.fixture
async def set_multiple_admins(unit):
admins = "admin1@localhost,admin2@localhost"
@@ -96,7 +97,7 @@ async def set_multiple_admins(unit):
#########
async def test_web_interface_with_ssl(auth, unit, ssl):
http_url = "http://%s/nagios3/" % unit.u.public_address
- if ssl == 'only':
+ if ssl == "only":
with pytest.raises(requests.ConnectionError):
requests.get(http_url, auth=auth)
else:
@@ -108,57 +109,63 @@ async def test_web_interface_with_ssl(auth, unit, ssl):
assert r.status_code == 200, "HTTPs Admin login failed"
-@pytest.mark.usefixtures('extra_config')
+@pytest.mark.usefixtures("extra_config")
async def test_extra_config(auth, unit):
- host_url = "http://%s/cgi-bin/nagios3/status.cgi?" \
- "hostgroup=all&style=hostdetail" % unit.u.public_address
+ host_url = (
+ "http://%s/cgi-bin/nagios3/status.cgi?"
+ "hostgroup=all&style=hostdetail" % unit.u.public_address
+ )
r = requests.get(host_url, auth=auth)
- assert 'extra_config' in r.text, "Nagios is not monitoring extra_config"
+ assert "extra_config" in r.text, "Nagios is not monitoring extra_config"
async def test_live_status(unit, livestatus_path, file_stat):
stat = await file_stat(livestatus_path, unit.u)
- assert stat['size'] == 0, (
- "File %s didn't match expected size" % livestatus_path
- )
+ assert stat["size"] == 0, "File %s didn't match expected size" % livestatus_path
async def test_pager_duty(unit, enable_pagerduty, file_stat):
stat = await file_stat(enable_pagerduty, unit.u)
- assert stat['size'] != 0, (
- "Directory %s wasn't a non-zero size" % enable_pagerduty
- )
- stat = await file_stat('/etc/nagios3/conf.d/pagerduty_nagios.cfg', unit.u)
- assert stat['size'] != 0, "pagerduty_config wasn't a non-zero sized file"
+ assert stat["size"] != 0, "Directory %s wasn't a non-zero size" % enable_pagerduty
+ stat = await file_stat("/etc/nagios3/conf.d/pagerduty_nagios.cfg", unit.u)
+ assert stat["size"] != 0, "pagerduty_config wasn't a non-zero sized file"
async def test_extra_contacts(auth, unit, set_extra_contacts):
- contancts_url = "http://%s/cgi-bin/nagios3/config.cgi?" \
- "type=contacts" % unit.u.public_address
+ contancts_url = (
+ "http://%s/cgi-bin/nagios3/config.cgi?" "type=contacts" % unit.u.public_address
+ )
contact_name = set_extra_contacts
r = requests.get(contancts_url, auth=auth)
assert r.status_code == 200, "Get Nagios config request failed"
assert contact_name in r.text, "Nagios is not loading the extra contact."
- assert contact_name.capitalize() in r.text, "Contact name alias is not " \
- "the capitalized name."
- contactgroups_url = "http://%s/cgi-bin/nagios3/config.cgi" \
- "?type=contactgroups" % unit.u.public_address
+ assert contact_name.capitalize() in r.text, (
+ "Contact name alias is not " "the capitalized name."
+ )
+ contactgroups_url = (
+ "http://%s/cgi-bin/nagios3/config.cgi"
+ "?type=contactgroups" % unit.u.public_address
+ )
r = requests.get(contactgroups_url, auth=auth)
assert r.status_code == 200, "Get Nagios config request failed"
- assert contact_name in r.text, "Extra contact is not " \
- "added to the contact groups."
+ assert contact_name in r.text, (
+ "Extra contact is not " "added to the contact groups."
+ )
+
async def test_multiple_admin_contacts(auth, unit, set_multiple_admins):
- contancts_url = "http://%s/cgi-bin/nagios3/config.cgi?" \
- "type=contacts" % unit.u.public_address
+ contancts_url = (
+ "http://%s/cgi-bin/nagios3/config.cgi?" "type=contacts" % unit.u.public_address
+ )
admins = set_multiple_admins
r = requests.get(contancts_url, auth=auth)
assert r.status_code == 200, "Get Nagios config request failed"
- admins = admins.split(',')
+ admins = admins.split(",")
for admin in admins:
- admin = admin.replace('@', '').replace('.', '').lower()
+ admin = admin.replace("@", "").replace(".", "").lower()
admin_alias = admin.capitalize()
assert admin in r.text, "Nagios is not loading contact {}.".format(admin)
- assert admin_alias in r.text, "Nagios is not loading alias " \
- "for contact {}.".format(admin)
+ assert (
+ admin_alias in r.text
+ ), "Nagios is not loading alias " "for contact {}.".format(admin)
diff --git a/tests/functional/test_deploy.py b/tests/functional/test_deploy.py
index b8e9f40..e4131d6 100644
--- a/tests/functional/test_deploy.py
+++ b/tests/functional/test_deploy.py
@@ -1,5 +1,6 @@
import pytest
import requests
+
pytestmark = pytest.mark.asyncio
@@ -22,17 +23,15 @@ async def test_web_interface_is_protected(auth, unit):
async def test_hosts_being_monitored(auth, unit):
- host_url = ("http://%s/cgi-bin/nagios3/status.cgi?"
- "hostgroup=all&style=hostdetail") % unit.u.public_address
+ host_url = (
+ "http://%s/cgi-bin/nagios3/status.cgi?" "hostgroup=all&style=hostdetail"
+ ) % unit.u.public_address
r = requests.get(host_url, auth=auth)
- assert 'mysql' in r.text, "Nagios is not monitoring the hosts it supposed to."
+ assert "mysql" in r.text, "Nagios is not monitoring the hosts it supposed to."
async def test_nrpe_monitors_config(relatives, unit, file_contents):
# look for disk root check in nrpe config
- mysql_unit = relatives['mysql']['app'].units[0]
- contents = await file_contents(
- '/etc/nagios/nrpe.d/check_disk_root.cfg',
- mysql_unit
- )
+ mysql_unit = relatives["mysql"]["app"].units[0]
+ contents = await file_contents("/etc/nagios/nrpe.d/check_disk_root.cfg", mysql_unit)
assert contents, "disk root check config not found."
diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py
index f6fceac..0c6a597 100644
--- a/tests/unit/conftest.py
+++ b/tests/unit/conftest.py
@@ -1,5 +1,5 @@
import os
import sys
-HOOKS = os.path.join(os.path.dirname(__file__), '..', '..', 'hooks')
+HOOKS = os.path.join(os.path.dirname(__file__), "..", "..", "hooks")
sys.path.append(HOOKS)
diff --git a/tests/unit/test_monitor_relation_changed.py b/tests/unit/test_monitor_relation_changed.py
index 3a7f5d6..f2f0e1d 100644
--- a/tests/unit/test_monitor_relation_changed.py
+++ b/tests/unit/test_monitor_relation_changed.py
@@ -4,4 +4,4 @@ import monitors_relation_changed
def test_has_main():
# THIS IS A REALLY LAME TEST -- but it's a start for where there was nothing
# if you add tests later, please do better than me
- assert hasattr(monitors_relation_changed, 'main')
+ assert hasattr(monitors_relation_changed, "main")
diff --git a/tests/unit/test_website_relation_joined.py b/tests/unit/test_website_relation_joined.py
index 3164428..b4ca91a 100644
--- a/tests/unit/test_website_relation_joined.py
+++ b/tests/unit/test_website_relation_joined.py
@@ -4,16 +4,16 @@ import pytest
import website_relation_joined
-@mock.patch('common.get_local_ingress_address')
-@mock.patch('website_relation_joined.config')
-@mock.patch('website_relation_joined.relation_set')
-@pytest.mark.parametrize('ssl', [
- ('only', 443),
- ('on', 80),
- ('off', 80)
-], ids=['ssl=only', 'ssl=on', 'ssl=off'])
+@mock.patch("common.get_local_ingress_address")
+@mock.patch("website_relation_joined.config")
+@mock.patch("website_relation_joined.relation_set")
+@pytest.mark.parametrize(
+ "ssl",
+ [("only", 443), ("on", 80), ("off", 80)],
+ ids=["ssl=only", "ssl=on", "ssl=off"],
+)
def test_main(relation_set, config, get_local_ingress_address, ssl):
- get_local_ingress_address.return_value = 'example.com'
- config.return_value = {'ssl': ssl[0]}
+ get_local_ingress_address.return_value = "example.com"
+ config.return_value = {"ssl": ssl[0]}
website_relation_joined.main()
- relation_set.assert_called_with(None, port=ssl[1], hostname='example.com')
+ relation_set.assert_called_with(None, port=ssl[1], hostname="example.com")
diff --git a/tox.ini b/tox.ini
index 34f9248..e200dee 100644
--- a/tox.ini
+++ b/tox.ini
@@ -36,8 +36,12 @@ deps = -r{toxinidir}/tests/functional/requirements.txt
-r{toxinidir}/requirements.txt
[testenv:lint]
-commands = flake8
-deps = flake8
+commands =
+ black --check --line-length 88 files hooks scripts tests
+ flake8
+deps =
+ flake8
+ black
[flake8]
exclude =
@@ -46,5 +50,5 @@ exclude =
.tox,
hooks/charmhelpers
bin
-max-line-length = 120
+max-line-length = 88
max-complexity = 10
Follow ups