nagios-charmers team mailing list archive
-
nagios-charmers team
-
Mailing list archive
-
Message #01008
[Merge] ~aieri/charm-nagios:bug/1864192 into charm-nagios:master
Andrea Ieri has proposed merging ~aieri/charm-nagios:bug/1864192 into charm-nagios:master.
Requested reviews:
Chris Sanders (chris.sanders)
Adam Dyess (addyess)
Peter Sabaini (peter-sabaini)
For more details, see:
https://code.launchpad.net/~aieri/charm-nagios/+git/nagios-charm/+merge/387302
--
Your team Nagios Charm developers is subscribed to branch charm-nagios:master.
diff --git a/Makefile b/Makefile
index dbbeab3..5ed72eb 100644
--- a/Makefile
+++ b/Makefile
@@ -35,8 +35,7 @@ test:
bin/charm_helpers_sync.py:
@mkdir -p bin
- @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \
- > bin/charm_helpers_sync.py
+ @curl -o bin/charm_helpers_sync.py https://raw.githubusercontent.com/juju/charm-helpers/master/tools/charm_helpers_sync/charm_helpers_sync.py
sync: bin/charm_helpers_sync.py
@$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers.yaml
diff --git a/bin/charm_helpers_sync.py b/bin/charm_helpers_sync.py
index bd79460..7c0c194 100644
--- a/bin/charm_helpers_sync.py
+++ b/bin/charm_helpers_sync.py
@@ -29,7 +29,7 @@ from fnmatch import fnmatch
import six
-CHARM_HELPERS_BRANCH = 'lp:charm-helpers'
+CHARM_HELPERS_REPO = 'https://github.com/juju/charm-helpers'
def parse_config(conf_file):
@@ -39,10 +39,16 @@ def parse_config(conf_file):
return yaml.load(open(conf_file).read())
-def clone_helpers(work_dir, branch):
+def clone_helpers(work_dir, repo):
dest = os.path.join(work_dir, 'charm-helpers')
- logging.info('Checking out %s to %s.' % (branch, dest))
- cmd = ['bzr', 'checkout', '--lightweight', branch, dest]
+ logging.info('Cloning out %s to %s.' % (repo, dest))
+ branch = None
+ if '@' in repo:
+ repo, branch = repo.split('@', 1)
+ cmd = ['git', 'clone', '--depth=1']
+ if branch is not None:
+ cmd += ['--branch', branch]
+ cmd += [repo, dest]
subprocess.check_call(cmd)
return dest
@@ -174,6 +180,9 @@ def extract_options(inc, global_options=None):
def sync_helpers(include, src, dest, options=None):
+ if os.path.exists(dest):
+ logging.debug('Removing existing directory: %s' % dest)
+ shutil.rmtree(dest)
if not os.path.isdir(dest):
os.makedirs(dest)
@@ -198,8 +207,8 @@ if __name__ == '__main__':
default=None, help='helper config file')
parser.add_option('-D', '--debug', action='store_true', dest='debug',
default=False, help='debug')
- parser.add_option('-b', '--branch', action='store', dest='branch',
- help='charm-helpers bzr branch (overrides config)')
+ parser.add_option('-r', '--repository', action='store', dest='repo',
+ help='charm-helpers git repository (overrides config)')
parser.add_option('-d', '--destination', action='store', dest='dest_dir',
help='sync destination dir (overrides config)')
(opts, args) = parser.parse_args()
@@ -218,10 +227,10 @@ if __name__ == '__main__':
else:
config = {}
- if 'branch' not in config:
- config['branch'] = CHARM_HELPERS_BRANCH
- if opts.branch:
- config['branch'] = opts.branch
+ if 'repo' not in config:
+ config['repo'] = CHARM_HELPERS_REPO
+ if opts.repo:
+ config['repo'] = opts.repo
if opts.dest_dir:
config['destination'] = opts.dest_dir
@@ -241,7 +250,7 @@ if __name__ == '__main__':
sync_options = config['options']
tmpd = tempfile.mkdtemp()
try:
- checkout = clone_helpers(tmpd, config['branch'])
+ checkout = clone_helpers(tmpd, config['repo'])
sync_helpers(config['include'], checkout, config['destination'],
options=sync_options)
except Exception as e:
diff --git a/charm-helpers.yaml b/charm-helpers.yaml
index e5f7760..640679e 100644
--- a/charm-helpers.yaml
+++ b/charm-helpers.yaml
@@ -1,7 +1,8 @@
+repo: https://github.com/juju/charm-helpers
destination: hooks/charmhelpers
-branch: lp:charm-helpers
include:
- core
- fetch
- osplatform
- contrib.ssl
+ - contrib.charmsupport
diff --git a/hooks/charmhelpers/__init__.py b/hooks/charmhelpers/__init__.py
index e7aa471..61ef907 100644
--- a/hooks/charmhelpers/__init__.py
+++ b/hooks/charmhelpers/__init__.py
@@ -23,22 +23,22 @@ import subprocess
import sys
try:
- import six # flake8: noqa
+ import six # NOQA:F401
except ImportError:
if sys.version_info.major == 2:
subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
else:
subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
- import six # flake8: noqa
+ import six # NOQA:F401
try:
- import yaml # flake8: noqa
+ import yaml # NOQA:F401
except ImportError:
if sys.version_info.major == 2:
subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
else:
subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
- import yaml # flake8: noqa
+ import yaml # NOQA:F401
# Holds a list of mapping of mangled function names that have been deprecated
diff --git a/hooks/charmhelpers/contrib/charmsupport/__init__.py b/hooks/charmhelpers/contrib/charmsupport/__init__.py
new file mode 100644
index 0000000..d7567b8
--- /dev/null
+++ b/hooks/charmhelpers/contrib/charmsupport/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/hooks/charmhelpers/contrib/charmsupport/nrpe.py
new file mode 100644
index 0000000..d775861
--- /dev/null
+++ b/hooks/charmhelpers/contrib/charmsupport/nrpe.py
@@ -0,0 +1,500 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Compatibility with the nrpe-external-master charm"""
+# Copyright 2012 Canonical Ltd.
+#
+# Authors:
+# Matthew Wedgwood <matthew.wedgwood@xxxxxxxxxxxxx>
+
+import subprocess
+import pwd
+import grp
+import os
+import glob
+import shutil
+import re
+import shlex
+import yaml
+
+from charmhelpers.core.hookenv import (
+ config,
+ hook_name,
+ local_unit,
+ log,
+ relation_get,
+ relation_ids,
+ relation_set,
+ relations_of_type,
+)
+
+from charmhelpers.core.host import service
+from charmhelpers.core import host
+
+# This module adds compatibility with the nrpe-external-master and plain nrpe
+# subordinate charms. To use it in your charm:
+#
+# 1. Update metadata.yaml
+#
+# provides:
+# (...)
+# nrpe-external-master:
+# interface: nrpe-external-master
+# scope: container
+#
+# and/or
+#
+# provides:
+# (...)
+# local-monitors:
+# interface: local-monitors
+# scope: container
+
+#
+# 2. Add the following to config.yaml
+#
+# nagios_context:
+# default: "juju"
+# type: string
+# description: |
+# Used by the nrpe subordinate charms.
+# A string that will be prepended to instance name to set the host name
+# in nagios. So for instance the hostname would be something like:
+# juju-myservice-0
+# If you're running multiple environments with the same services in them
+# this allows you to differentiate between them.
+# nagios_servicegroups:
+# default: ""
+# type: string
+# description: |
+# A comma-separated list of nagios servicegroups.
+# If left empty, the nagios_context will be used as the servicegroup
+#
+# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master
+#
+# 4. Update your hooks.py with something like this:
+#
+# from charmsupport.nrpe import NRPE
+# (...)
+# def update_nrpe_config():
+# nrpe_compat = NRPE()
+# nrpe_compat.add_check(
+# shortname = "myservice",
+# description = "Check MyService",
+# check_cmd = "check_http -w 2 -c 10 http://localhost"
+# )
+# nrpe_compat.add_check(
+# "myservice_other",
+# "Check for widget failures",
+# check_cmd = "/srv/myapp/scripts/widget_check"
+# )
+# nrpe_compat.write()
+#
+# def config_changed():
+# (...)
+# update_nrpe_config()
+#
+# def nrpe_external_master_relation_changed():
+# update_nrpe_config()
+#
+# def local_monitors_relation_changed():
+# update_nrpe_config()
+#
+# 4.a If your charm is a subordinate charm set primary=False
+#
+# from charmsupport.nrpe import NRPE
+# (...)
+# def update_nrpe_config():
+# nrpe_compat = NRPE(primary=False)
+#
+# 5. ln -s hooks.py nrpe-external-master-relation-changed
+# ln -s hooks.py local-monitors-relation-changed
+
+
+class CheckException(Exception):
+ pass
+
+
+class Check(object):
+ shortname_re = '[A-Za-z0-9-_.@]+$'
+ service_template = ("""
+#---------------------------------------------------
+# This file is Juju managed
+#---------------------------------------------------
+define service {{
+ use active-service
+ host_name {nagios_hostname}
+ service_description {nagios_hostname}[{shortname}] """
+ """{description}
+ check_command check_nrpe!{command}
+ servicegroups {nagios_servicegroup}
+}}
+""")
+
+ def __init__(self, shortname, description, check_cmd):
+ super(Check, self).__init__()
+ # XXX: could be better to calculate this from the service name
+ if not re.match(self.shortname_re, shortname):
+ raise CheckException("shortname must match {}".format(
+ Check.shortname_re))
+ self.shortname = shortname
+ self.command = "check_{}".format(shortname)
+ # Note: a set of invalid characters is defined by the
+ # Nagios server config
+ # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()=
+ self.description = description
+ self.check_cmd = self._locate_cmd(check_cmd)
+
+ def _get_check_filename(self):
+ return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command))
+
+ def _get_service_filename(self, hostname):
+ return os.path.join(NRPE.nagios_exportdir,
+ 'service__{}_{}.cfg'.format(hostname, self.command))
+
+ def _locate_cmd(self, check_cmd):
+ search_path = (
+ '/usr/lib/nagios/plugins',
+ '/usr/local/lib/nagios/plugins',
+ )
+ parts = shlex.split(check_cmd)
+ for path in search_path:
+ if os.path.exists(os.path.join(path, parts[0])):
+ command = os.path.join(path, parts[0])
+ if len(parts) > 1:
+ command += " " + " ".join(parts[1:])
+ return command
+ log('Check command not found: {}'.format(parts[0]))
+ return ''
+
+ def _remove_service_files(self):
+ if not os.path.exists(NRPE.nagios_exportdir):
+ return
+ for f in os.listdir(NRPE.nagios_exportdir):
+ if f.endswith('_{}.cfg'.format(self.command)):
+ os.remove(os.path.join(NRPE.nagios_exportdir, f))
+
+ def remove(self, hostname):
+ nrpe_check_file = self._get_check_filename()
+ if os.path.exists(nrpe_check_file):
+ os.remove(nrpe_check_file)
+ self._remove_service_files()
+
+ def write(self, nagios_context, hostname, nagios_servicegroups):
+ nrpe_check_file = self._get_check_filename()
+ with open(nrpe_check_file, 'w') as nrpe_check_config:
+ nrpe_check_config.write("# check {}\n".format(self.shortname))
+ if nagios_servicegroups:
+ nrpe_check_config.write(
+ "# The following header was added automatically by juju\n")
+ nrpe_check_config.write(
+ "# Modifying it will affect nagios monitoring and alerting\n")
+ nrpe_check_config.write(
+ "# servicegroups: {}\n".format(nagios_servicegroups))
+ nrpe_check_config.write("command[{}]={}\n".format(
+ self.command, self.check_cmd))
+
+ if not os.path.exists(NRPE.nagios_exportdir):
+ log('Not writing service config as {} is not accessible'.format(
+ NRPE.nagios_exportdir))
+ else:
+ self.write_service_config(nagios_context, hostname,
+ nagios_servicegroups)
+
+ def write_service_config(self, nagios_context, hostname,
+ nagios_servicegroups):
+ self._remove_service_files()
+
+ templ_vars = {
+ 'nagios_hostname': hostname,
+ 'nagios_servicegroup': nagios_servicegroups,
+ 'description': self.description,
+ 'shortname': self.shortname,
+ 'command': self.command,
+ }
+ nrpe_service_text = Check.service_template.format(**templ_vars)
+ nrpe_service_file = self._get_service_filename(hostname)
+ with open(nrpe_service_file, 'w') as nrpe_service_config:
+ nrpe_service_config.write(str(nrpe_service_text))
+
+ def run(self):
+ subprocess.call(self.check_cmd)
+
+
+class NRPE(object):
+ nagios_logdir = '/var/log/nagios'
+ nagios_exportdir = '/var/lib/nagios/export'
+ nrpe_confdir = '/etc/nagios/nrpe.d'
+ homedir = '/var/lib/nagios' # home dir provided by nagios-nrpe-server
+
+ def __init__(self, hostname=None, primary=True):
+ super(NRPE, self).__init__()
+ self.config = config()
+ self.primary = primary
+ self.nagios_context = self.config['nagios_context']
+ if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']:
+ self.nagios_servicegroups = self.config['nagios_servicegroups']
+ else:
+ self.nagios_servicegroups = self.nagios_context
+ self.unit_name = local_unit().replace('/', '-')
+ if hostname:
+ self.hostname = hostname
+ else:
+ nagios_hostname = get_nagios_hostname()
+ if nagios_hostname:
+ self.hostname = nagios_hostname
+ else:
+ self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
+ self.checks = []
+ # Iff in an nrpe-external-master relation hook, set primary status
+ relation = relation_ids('nrpe-external-master')
+ if relation:
+ log("Setting charm primary status {}".format(primary))
+ for rid in relation:
+ relation_set(relation_id=rid, relation_settings={'primary': self.primary})
+ self.remove_check_queue = set()
+
+ def add_check(self, *args, **kwargs):
+ shortname = None
+ if kwargs.get('shortname') is None:
+ if len(args) > 0:
+ shortname = args[0]
+ else:
+ shortname = kwargs['shortname']
+
+ self.checks.append(Check(*args, **kwargs))
+ try:
+ self.remove_check_queue.remove(shortname)
+ except KeyError:
+ pass
+
+ def remove_check(self, *args, **kwargs):
+ if kwargs.get('shortname') is None:
+ raise ValueError('shortname of check must be specified')
+
+ # Use sensible defaults if they're not specified - these are not
+ # actually used during removal, but they're required for constructing
+ # the Check object; check_disk is chosen because it's part of the
+ # nagios-plugins-basic package.
+ if kwargs.get('check_cmd') is None:
+ kwargs['check_cmd'] = 'check_disk'
+ if kwargs.get('description') is None:
+ kwargs['description'] = ''
+
+ check = Check(*args, **kwargs)
+ check.remove(self.hostname)
+ self.remove_check_queue.add(kwargs['shortname'])
+
+ def write(self):
+ try:
+ nagios_uid = pwd.getpwnam('nagios').pw_uid
+ nagios_gid = grp.getgrnam('nagios').gr_gid
+ except Exception:
+ log("Nagios user not set up, nrpe checks not updated")
+ return
+
+ if not os.path.exists(NRPE.nagios_logdir):
+ os.mkdir(NRPE.nagios_logdir)
+ os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)
+
+ nrpe_monitors = {}
+ monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
+ for nrpecheck in self.checks:
+ nrpecheck.write(self.nagios_context, self.hostname,
+ self.nagios_servicegroups)
+ nrpe_monitors[nrpecheck.shortname] = {
+ "command": nrpecheck.command,
+ }
+
+ # update-status hooks are configured to firing every 5 minutes by
+ # default. When nagios-nrpe-server is restarted, the nagios server
+ # reports checks failing causing unnecessary alerts. Let's not restart
+ # on update-status hooks.
+ if not hook_name() == 'update-status':
+ service('restart', 'nagios-nrpe-server')
+
+ monitor_ids = relation_ids("local-monitors") + \
+ relation_ids("nrpe-external-master")
+ for rid in monitor_ids:
+ reldata = relation_get(unit=local_unit(), rid=rid)
+ if 'monitors' in reldata:
+ # update the existing set of monitors with the new data
+ old_monitors = yaml.safe_load(reldata['monitors'])
+ old_nrpe_monitors = old_monitors['monitors']['remote']['nrpe']
+ # remove keys that are in the remove_check_queue
+ old_nrpe_monitors = {k: v for k, v in old_nrpe_monitors.items()
+ if k not in self.remove_check_queue}
+ # update/add nrpe_monitors
+ old_nrpe_monitors.update(nrpe_monitors)
+ old_monitors['monitors']['remote']['nrpe'] = old_nrpe_monitors
+ # write back to the relation
+ relation_set(relation_id=rid, monitors=yaml.dump(old_monitors))
+ else:
+ # write a brand new set of monitors, as no existing ones.
+ relation_set(relation_id=rid, monitors=yaml.dump(monitors))
+
+ self.remove_check_queue.clear()
+
+
+def get_nagios_hostcontext(relation_name='nrpe-external-master'):
+ """
+ Query relation with nrpe subordinate, return the nagios_host_context
+
+ :param str relation_name: Name of relation nrpe sub joined to
+ """
+ for rel in relations_of_type(relation_name):
+ if 'nagios_host_context' in rel:
+ return rel['nagios_host_context']
+
+
+def get_nagios_hostname(relation_name='nrpe-external-master'):
+ """
+ Query relation with nrpe subordinate, return the nagios_hostname
+
+ :param str relation_name: Name of relation nrpe sub joined to
+ """
+ for rel in relations_of_type(relation_name):
+ if 'nagios_hostname' in rel:
+ return rel['nagios_hostname']
+
+
+def get_nagios_unit_name(relation_name='nrpe-external-master'):
+ """
+ Return the nagios unit name prepended with host_context if needed
+
+ :param str relation_name: Name of relation nrpe sub joined to
+ """
+ host_context = get_nagios_hostcontext(relation_name)
+ if host_context:
+ unit = "%s:%s" % (host_context, local_unit())
+ else:
+ unit = local_unit()
+ return unit
+
+
+def add_init_service_checks(nrpe, services, unit_name, immediate_check=True):
+ """
+ Add checks for each service in list
+
+ :param NRPE nrpe: NRPE object to add check to
+ :param list services: List of services to check
+ :param str unit_name: Unit name to use in check description
+ :param bool immediate_check: For sysv init, run the service check immediately
+ """
+ for svc in services:
+ # Don't add a check for these services from neutron-gateway
+ if svc in ['ext-port', 'os-charm-phy-nic-mtu']:
+ next
+
+ upstart_init = '/etc/init/%s.conf' % svc
+ sysv_init = '/etc/init.d/%s' % svc
+
+ if host.init_is_systemd():
+ nrpe.add_check(
+ shortname=svc,
+ description='process check {%s}' % unit_name,
+ check_cmd='check_systemd.py %s' % svc
+ )
+ elif os.path.exists(upstart_init):
+ nrpe.add_check(
+ shortname=svc,
+ description='process check {%s}' % unit_name,
+ check_cmd='check_upstart_job %s' % svc
+ )
+ elif os.path.exists(sysv_init):
+ cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
+ checkpath = '%s/service-check-%s.txt' % (nrpe.homedir, svc)
+ croncmd = (
+ '/usr/local/lib/nagios/plugins/check_exit_status.pl '
+ '-e -s /etc/init.d/%s status' % svc
+ )
+ cron_file = '*/5 * * * * root %s > %s\n' % (croncmd, checkpath)
+ f = open(cronpath, 'w')
+ f.write(cron_file)
+ f.close()
+ nrpe.add_check(
+ shortname=svc,
+ description='service check {%s}' % unit_name,
+ check_cmd='check_status_file.py -f %s' % checkpath,
+ )
+ # if /var/lib/nagios doesn't exist open(checkpath, 'w') will fail
+ # (LP: #1670223).
+ if immediate_check and os.path.isdir(nrpe.homedir):
+ f = open(checkpath, 'w')
+ subprocess.call(
+ croncmd.split(),
+ stdout=f,
+ stderr=subprocess.STDOUT
+ )
+ f.close()
+ os.chmod(checkpath, 0o644)
+
+
+def copy_nrpe_checks(nrpe_files_dir=None):
+ """
+ Copy the nrpe checks into place
+
+ """
+ NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
+ if nrpe_files_dir is None:
+ # determine if "charmhelpers" is in CHARMDIR or CHARMDIR/hooks
+ for segment in ['.', 'hooks']:
+ nrpe_files_dir = os.path.abspath(os.path.join(
+ os.getenv('CHARM_DIR'),
+ segment,
+ 'charmhelpers',
+ 'contrib',
+ 'openstack',
+ 'files'))
+ if os.path.isdir(nrpe_files_dir):
+ break
+ else:
+ raise RuntimeError("Couldn't find charmhelpers directory")
+ if not os.path.exists(NAGIOS_PLUGINS):
+ os.makedirs(NAGIOS_PLUGINS)
+ for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")):
+ if os.path.isfile(fname):
+ shutil.copy2(fname,
+ os.path.join(NAGIOS_PLUGINS, os.path.basename(fname)))
+
+
+def add_haproxy_checks(nrpe, unit_name):
+ """
+ Add checks for each service in list
+
+ :param NRPE nrpe: NRPE object to add check to
+ :param str unit_name: Unit name to use in check description
+ """
+ nrpe.add_check(
+ shortname='haproxy_servers',
+ description='Check HAProxy {%s}' % unit_name,
+ check_cmd='check_haproxy.sh')
+ nrpe.add_check(
+ shortname='haproxy_queue',
+ description='Check HAProxy queue depth {%s}' % unit_name,
+ check_cmd='check_haproxy_queue_depth.sh')
+
+
+def remove_deprecated_check(nrpe, deprecated_services):
+ """
+ Remove checks fro deprecated services in list
+
+ :param nrpe: NRPE object to remove check from
+ :type nrpe: NRPE
+ :param deprecated_services: List of deprecated services that are removed
+ :type deprecated_services: list
+ """
+ for dep_svc in deprecated_services:
+ log('Deprecated service: {}'.format(dep_svc))
+ nrpe.remove_check(shortname=dep_svc)
diff --git a/hooks/charmhelpers/contrib/charmsupport/volumes.py b/hooks/charmhelpers/contrib/charmsupport/volumes.py
new file mode 100644
index 0000000..7ea43f0
--- /dev/null
+++ b/hooks/charmhelpers/contrib/charmsupport/volumes.py
@@ -0,0 +1,173 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+'''
+Functions for managing volumes in juju units. One volume is supported per unit.
+Subordinates may have their own storage, provided it is on its own partition.
+
+Configuration stanzas::
+
+ volume-ephemeral:
+ type: boolean
+ default: true
+ description: >
+ If false, a volume is mounted as sepecified in "volume-map"
+ If true, ephemeral storage will be used, meaning that log data
+ will only exist as long as the machine. YOU HAVE BEEN WARNED.
+ volume-map:
+ type: string
+ default: {}
+ description: >
+ YAML map of units to device names, e.g:
+ "{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }"
+ Service units will raise a configure-error if volume-ephemeral
+ is 'true' and no volume-map value is set. Use 'juju set' to set a
+ value and 'juju resolved' to complete configuration.
+
+Usage::
+
+ from charmsupport.volumes import configure_volume, VolumeConfigurationError
+ from charmsupport.hookenv import log, ERROR
+ def post_mount_hook():
+ stop_service('myservice')
+ def post_mount_hook():
+ start_service('myservice')
+
+ if __name__ == '__main__':
+ try:
+ configure_volume(before_change=pre_mount_hook,
+ after_change=post_mount_hook)
+ except VolumeConfigurationError:
+ log('Storage could not be configured', ERROR)
+
+'''
+
+# XXX: Known limitations
+# - fstab is neither consulted nor updated
+
+import os
+from charmhelpers.core import hookenv
+from charmhelpers.core import host
+import yaml
+
+
+MOUNT_BASE = '/srv/juju/volumes'
+
+
+class VolumeConfigurationError(Exception):
+ '''Volume configuration data is missing or invalid'''
+ pass
+
+
+def get_config():
+ '''Gather and sanity-check volume configuration data'''
+ volume_config = {}
+ config = hookenv.config()
+
+ errors = False
+
+ if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'):
+ volume_config['ephemeral'] = True
+ else:
+ volume_config['ephemeral'] = False
+
+ try:
+ volume_map = yaml.safe_load(config.get('volume-map', '{}'))
+ except yaml.YAMLError as e:
+ hookenv.log("Error parsing YAML volume-map: {}".format(e),
+ hookenv.ERROR)
+ errors = True
+ if volume_map is None:
+ # probably an empty string
+ volume_map = {}
+ elif not isinstance(volume_map, dict):
+ hookenv.log("Volume-map should be a dictionary, not {}".format(
+ type(volume_map)))
+ errors = True
+
+ volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME'])
+ if volume_config['device'] and volume_config['ephemeral']:
+ # asked for ephemeral storage but also defined a volume ID
+ hookenv.log('A volume is defined for this unit, but ephemeral '
+ 'storage was requested', hookenv.ERROR)
+ errors = True
+ elif not volume_config['device'] and not volume_config['ephemeral']:
+ # asked for permanent storage but did not define volume ID
+ hookenv.log('Ephemeral storage was requested, but there is no volume '
+ 'defined for this unit.', hookenv.ERROR)
+ errors = True
+
+ unit_mount_name = hookenv.local_unit().replace('/', '-')
+ volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name)
+
+ if errors:
+ return None
+ return volume_config
+
+
+def mount_volume(config):
+ if os.path.exists(config['mountpoint']):
+ if not os.path.isdir(config['mountpoint']):
+ hookenv.log('Not a directory: {}'.format(config['mountpoint']))
+ raise VolumeConfigurationError()
+ else:
+ host.mkdir(config['mountpoint'])
+ if os.path.ismount(config['mountpoint']):
+ unmount_volume(config)
+ if not host.mount(config['device'], config['mountpoint'], persist=True):
+ raise VolumeConfigurationError()
+
+
+def unmount_volume(config):
+ if os.path.ismount(config['mountpoint']):
+ if not host.umount(config['mountpoint'], persist=True):
+ raise VolumeConfigurationError()
+
+
+def managed_mounts():
+ '''List of all mounted managed volumes'''
+ return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts())
+
+
+def configure_volume(before_change=lambda: None, after_change=lambda: None):
+ '''Set up storage (or don't) according to the charm's volume configuration.
+ Returns the mount point or "ephemeral". before_change and after_change
+ are optional functions to be called if the volume configuration changes.
+ '''
+
+ config = get_config()
+ if not config:
+ hookenv.log('Failed to read volume configuration', hookenv.CRITICAL)
+ raise VolumeConfigurationError()
+
+ if config['ephemeral']:
+ if os.path.ismount(config['mountpoint']):
+ before_change()
+ unmount_volume(config)
+ after_change()
+ return 'ephemeral'
+ else:
+ # persistent storage
+ if os.path.ismount(config['mountpoint']):
+ mounts = dict(managed_mounts())
+ if mounts.get(config['mountpoint']) != config['device']:
+ before_change()
+ unmount_volume(config)
+ mount_volume(config)
+ after_change()
+ else:
+ before_change()
+ mount_volume(config)
+ after_change()
+ return config['mountpoint']
diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py
index 67ad691..d7c37c1 100644
--- a/hooks/charmhelpers/core/hookenv.py
+++ b/hooks/charmhelpers/core/hookenv.py
@@ -21,23 +21,29 @@
from __future__ import print_function
import copy
from distutils.version import LooseVersion
+from enum import Enum
from functools import wraps
+from collections import namedtuple
import glob
import os
import json
import yaml
+import re
import subprocess
import sys
import errno
import tempfile
from subprocess import CalledProcessError
+from charmhelpers import deprecate
+
import six
if not six.PY3:
from UserDict import UserDict
else:
from collections import UserDict
+
CRITICAL = "CRITICAL"
ERROR = "ERROR"
WARNING = "WARNING"
@@ -45,6 +51,20 @@ INFO = "INFO"
DEBUG = "DEBUG"
TRACE = "TRACE"
MARKER = object()
+SH_MAX_ARG = 131071
+
+
+RANGE_WARNING = ('Passing NO_PROXY string that includes a cidr. '
+ 'This may not be compatible with software you are '
+ 'running in your shell.')
+
+
+class WORKLOAD_STATES(Enum):
+ ACTIVE = 'active'
+ BLOCKED = 'blocked'
+ MAINTENANCE = 'maintenance'
+ WAITING = 'waiting'
+
cache = {}
@@ -65,7 +85,7 @@ def cached(func):
@wraps(func)
def wrapper(*args, **kwargs):
global cache
- key = str((func, args, kwargs))
+ key = json.dumps((func, args, kwargs), sort_keys=True, default=str)
try:
return cache[key]
except KeyError:
@@ -95,7 +115,7 @@ def log(message, level=None):
command += ['-l', level]
if not isinstance(message, six.string_types):
message = repr(message)
- command += [message]
+ command += [message[:SH_MAX_ARG]]
# Missing juju-log should not cause failures in unit tests
# Send log output to stderr
try:
@@ -110,6 +130,24 @@ def log(message, level=None):
raise
+def function_log(message):
+ """Write a function progress message"""
+ command = ['function-log']
+ if not isinstance(message, six.string_types):
+ message = repr(message)
+ command += [message[:SH_MAX_ARG]]
+ # Missing function-log should not cause failures in unit tests
+ # Send function_log output to stderr
+ try:
+ subprocess.call(command)
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ message = "function-log: {}".format(message)
+ print(message, file=sys.stderr)
+ else:
+ raise
+
+
class Serializable(UserDict):
"""Wrapper, an object that can be serialized to yaml or json"""
@@ -198,11 +236,35 @@ def remote_unit():
return os.environ.get('JUJU_REMOTE_UNIT', None)
-def service_name():
- """The name service group this unit belongs to"""
+def application_name():
+ """
+ The name of the deployed application this unit belongs to.
+ """
return local_unit().split('/')[0]
+def service_name():
+ """
+ .. deprecated:: 0.19.1
+ Alias for :func:`application_name`.
+ """
+ return application_name()
+
+
+def model_name():
+ """
+ Name of the model that this unit is deployed in.
+ """
+ return os.environ['JUJU_MODEL_NAME']
+
+
+def model_uuid():
+ """
+ UUID of the model that this unit is deployed in.
+ """
+ return os.environ['JUJU_MODEL_UUID']
+
+
def principal_unit():
"""Returns the principal unit of this unit, otherwise None"""
# Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT
@@ -287,7 +349,7 @@ class Config(dict):
self.implicit_save = True
self._prev_dict = None
self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
- if os.path.exists(self.path):
+ if os.path.exists(self.path) and os.stat(self.path).st_size:
self.load_previous()
atexit(self._implicit_save)
@@ -307,7 +369,11 @@ class Config(dict):
"""
self.path = path or self.path
with open(self.path) as f:
- self._prev_dict = json.load(f)
+ try:
+ self._prev_dict = json.load(f)
+ except ValueError as e:
+ log('Unable to parse previous config data - {}'.format(str(e)),
+ level=ERROR)
for k, v in copy.deepcopy(self._prev_dict).items():
if k not in self:
self[k] = v
@@ -343,6 +409,7 @@ class Config(dict):
"""
with open(self.path, 'w') as f:
+ os.fchmod(f.fileno(), 0o600)
json.dump(self, f)
def _implicit_save(self):
@@ -350,22 +417,40 @@ class Config(dict):
self.save()
-@cached
+_cache_config = None
+
+
def config(scope=None):
- """Juju charm configuration"""
- config_cmd_line = ['config-get']
- if scope is not None:
- config_cmd_line.append(scope)
- else:
- config_cmd_line.append('--all')
- config_cmd_line.append('--format=json')
+ """
+ Get the juju charm configuration (scope==None) or individual key,
+ (scope=str). The returned value is a Python data structure loaded as
+ JSON from the Juju config command.
+
+ :param scope: If set, return the value for the specified key.
+ :type scope: Optional[str]
+ :returns: Either the whole config as a Config, or a key from it.
+ :rtype: Any
+ """
+ global _cache_config
+ config_cmd_line = ['config-get', '--all', '--format=json']
+ try:
+ # JSON Decode Exception for Python3.5+
+ exc_json = json.decoder.JSONDecodeError
+ except AttributeError:
+ # JSON Decode Exception for Python2.7 through Python3.4
+ exc_json = ValueError
try:
- config_data = json.loads(
- subprocess.check_output(config_cmd_line).decode('UTF-8'))
+ if _cache_config is None:
+ config_data = json.loads(
+ subprocess.check_output(config_cmd_line).decode('UTF-8'))
+ _cache_config = Config(config_data)
if scope is not None:
- return config_data
- return Config(config_data)
- except ValueError:
+ return _cache_config.get(scope)
+ return _cache_config
+ except (exc_json, UnicodeDecodeError) as e:
+ log('Unable to parse output from config-get: config_cmd_line="{}" '
+ 'message="{}"'
+ .format(config_cmd_line, str(e)), level=ERROR)
return None
@@ -459,6 +544,67 @@ def related_units(relid=None):
subprocess.check_output(units_cmd_line).decode('UTF-8')) or []
+def expected_peer_units():
+ """Get a generator for units we expect to join peer relation based on
+ goal-state.
+
+ The local unit is excluded from the result to make it easy to gauge
+ completion of all peers joining the relation with existing hook tools.
+
+ Example usage:
+ log('peer {} of {} joined peer relation'
+ .format(len(related_units()),
+ len(list(expected_peer_units()))))
+
+ This function will raise NotImplementedError if used with juju versions
+ without goal-state support.
+
+ :returns: iterator
+ :rtype: types.GeneratorType
+ :raises: NotImplementedError
+ """
+ if not has_juju_version("2.4.0"):
+ # goal-state first appeared in 2.4.0.
+ raise NotImplementedError("goal-state")
+ _goal_state = goal_state()
+ return (key for key in _goal_state['units']
+ if '/' in key and key != local_unit())
+
+
+def expected_related_units(reltype=None):
+ """Get a generator for units we expect to join relation based on
+ goal-state.
+
+ Note that you can not use this function for the peer relation, take a look
+ at expected_peer_units() for that.
+
+ This function will raise KeyError if you request information for a
+ relation type for which juju goal-state does not have information. It will
+ raise NotImplementedError if used with juju versions without goal-state
+ support.
+
+ Example usage:
+ log('participant {} of {} joined relation {}'
+ .format(len(related_units()),
+ len(list(expected_related_units())),
+ relation_type()))
+
+ :param reltype: Relation type to list data for, default is to list data for
+ the realtion type we are currently executing a hook for.
+ :type reltype: str
+ :returns: iterator
+ :rtype: types.GeneratorType
+ :raises: KeyError, NotImplementedError
+ """
+ if not has_juju_version("2.4.4"):
+ # goal-state existed in 2.4.0, but did not list individual units to
+ # join a relation in 2.4.1 through 2.4.3. (LP: #1794739)
+ raise NotImplementedError("goal-state relation unit count")
+ reltype = reltype or relation_type()
+ _goal_state = goal_state()
+ return (key for key in _goal_state['relations'][reltype] if '/' in key)
+
+
@cached
def relation_for_unit(unit=None, rid=None):
"""Get the json represenation of a unit's relation"""
@@ -644,18 +790,31 @@ def is_relation_made(relation, keys='private-address'):
return False
+def _port_op(op_name, port, protocol="TCP"):
+ """Open or close a service network port"""
+ _args = [op_name]
+ icmp = protocol.upper() == "ICMP"
+ if icmp:
+ _args.append(protocol)
+ else:
+ _args.append('{}/{}'.format(port, protocol))
+ try:
+ subprocess.check_call(_args)
+ except subprocess.CalledProcessError:
+ # Older Juju pre 2.3 doesn't support ICMP
+ # so treat it as a no-op if it fails.
+ if not icmp:
+ raise
+
+
def open_port(port, protocol="TCP"):
"""Open a service network port"""
- _args = ['open-port']
- _args.append('{}/{}'.format(port, protocol))
- subprocess.check_call(_args)
+ _port_op('open-port', port, protocol)
def close_port(port, protocol="TCP"):
"""Close a service network port"""
- _args = ['close-port']
- _args.append('{}/{}'.format(port, protocol))
- subprocess.check_call(_args)
+ _port_op('close-port', port, protocol)
def open_ports(start, end, protocol="TCP"):
@@ -672,6 +831,17 @@ def close_ports(start, end, protocol="TCP"):
subprocess.check_call(_args)
+def opened_ports():
+ """Get the opened ports
+
+ *Note that this will only show ports opened in a previous hook*
+
+ :returns: Opened ports as a list of strings: ``['8080/tcp', '8081-8083/tcp']``
+ """
+ _args = ['opened-ports', '--format=json']
+ return json.loads(subprocess.check_output(_args).decode('UTF-8'))
+
+
@cached
def unit_get(attribute):
"""Get the unit ID for the remote unit"""
@@ -793,6 +963,10 @@ class Hooks(object):
return wrapper
+class NoNetworkBinding(Exception):
+ pass
+
+
def charm_dir():
"""Return the root directory of the current charm"""
d = os.environ.get('JUJU_CHARM_DIR')
@@ -801,9 +975,23 @@ def charm_dir():
return os.environ.get('CHARM_DIR')
+def cmd_exists(cmd):
+ """Return True if the specified cmd exists in the path"""
+ return any(
+ os.access(os.path.join(path, cmd), os.X_OK)
+ for path in os.environ["PATH"].split(os.pathsep)
+ )
+
+
@cached
+@deprecate("moved to function_get()", log=log)
def action_get(key=None):
- """Gets the value of an action parameter, or all key/value param pairs"""
+ """
+ .. deprecated:: 0.20.7
+ Alias for :func:`function_get`.
+
+ Gets the value of an action parameter, or all key/value param pairs.
+ """
cmd = ['action-get']
if key is not None:
cmd.append(key)
@@ -812,52 +1000,130 @@ def action_get(key=None):
return action_data
+@cached
+def function_get(key=None):
+ """Gets the value of an action parameter, or all key/value param pairs"""
+ cmd = ['function-get']
+ # Fallback for older charms.
+ if not cmd_exists('function-get'):
+ cmd = ['action-get']
+
+ if key is not None:
+ cmd.append(key)
+ cmd.append('--format=json')
+ function_data = json.loads(subprocess.check_output(cmd).decode('UTF-8'))
+ return function_data
+
+
+@deprecate("moved to function_set()", log=log)
def action_set(values):
- """Sets the values to be returned after the action finishes"""
+ """
+ .. deprecated:: 0.20.7
+ Alias for :func:`function_set`.
+
+ Sets the values to be returned after the action finishes.
+ """
cmd = ['action-set']
for k, v in list(values.items()):
cmd.append('{}={}'.format(k, v))
subprocess.check_call(cmd)
+def function_set(values):
+ """Sets the values to be returned after the function finishes"""
+ cmd = ['function-set']
+ # Fallback for older charms.
+ if not cmd_exists('function-get'):
+ cmd = ['action-set']
+
+ for k, v in list(values.items()):
+ cmd.append('{}={}'.format(k, v))
+ subprocess.check_call(cmd)
+
+
+@deprecate("moved to function_fail()", log=log)
def action_fail(message):
- """Sets the action status to failed and sets the error message.
+ """
+ .. deprecated:: 0.20.7
+ Alias for :func:`function_fail`.
+
+ Sets the action status to failed and sets the error message.
- The results set by action_set are preserved."""
+ The results set by action_set are preserved.
+ """
subprocess.check_call(['action-fail', message])
+def function_fail(message):
+ """Sets the function status to failed and sets the error message.
+
+ The results set by function_set are preserved."""
+ cmd = ['function-fail']
+ # Fallback for older charms.
+ if not cmd_exists('function-fail'):
+ cmd = ['action-fail']
+ cmd.append(message)
+
+ subprocess.check_call(cmd)
+
+
def action_name():
"""Get the name of the currently executing action."""
return os.environ.get('JUJU_ACTION_NAME')
+def function_name():
+ """Get the name of the currently executing function."""
+ return os.environ.get('JUJU_FUNCTION_NAME') or action_name()
+
+
def action_uuid():
"""Get the UUID of the currently executing action."""
return os.environ.get('JUJU_ACTION_UUID')
+def function_id():
+ """Get the ID of the currently executing function."""
+ return os.environ.get('JUJU_FUNCTION_ID') or action_uuid()
+
+
def action_tag():
"""Get the tag for the currently executing action."""
return os.environ.get('JUJU_ACTION_TAG')
-def status_set(workload_state, message):
+def function_tag():
+ """Get the tag for the currently executing function."""
+ return os.environ.get('JUJU_FUNCTION_TAG') or action_tag()
+
+
+def status_set(workload_state, message, application=False):
"""Set the workload state with a message
Use status-set to set the workload state with a message which is visible
to the user via juju status. If the status-set command is not found then
- assume this is juju < 1.23 and juju-log the message unstead.
+ assume this is juju < 1.23 and juju-log the message instead.
- workload_state -- valid juju workload state.
- message -- status update message
+ workload_state -- valid juju workload state. str or WORKLOAD_STATES
+ message -- status update message
+ application -- Whether this is an application state set
"""
- valid_states = ['maintenance', 'blocked', 'waiting', 'active']
- if workload_state not in valid_states:
- raise ValueError(
- '{!r} is not a valid workload state'.format(workload_state)
- )
- cmd = ['status-set', workload_state, message]
+ bad_state_msg = '{!r} is not a valid workload state'
+
+ if isinstance(workload_state, str):
+ try:
+ # Convert string to enum.
+ workload_state = WORKLOAD_STATES[workload_state.upper()]
+ except KeyError:
+ raise ValueError(bad_state_msg.format(workload_state))
+
+ if workload_state not in WORKLOAD_STATES:
+ raise ValueError(bad_state_msg.format(workload_state))
+
+ cmd = ['status-set']
+ if application:
+ cmd.append('--application')
+ cmd.extend([workload_state.value, message])
try:
ret = subprocess.call(cmd)
if ret == 0:
@@ -865,7 +1131,7 @@ def status_set(workload_state, message):
except OSError as e:
if e.errno != errno.ENOENT:
raise
- log_message = 'status-set failed: {} {}'.format(workload_state,
+ log_message = 'status-set failed: {} {}'.format(workload_state.value,
message)
log(log_message, level='INFO')
@@ -919,6 +1185,14 @@ def application_version_set(version):
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
+@cached
+def goal_state():
+ """Juju goal state values"""
+ cmd = ['goal-state', '--format=json']
+ return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
+
+
+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
def is_leader():
"""Does the current unit hold the juju leadership
@@ -1012,7 +1286,6 @@ def juju_version():
universal_newlines=True).strip()
-@cached
def has_juju_version(minimum_version):
"""Return True if the Juju version is at least the provided version"""
return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
@@ -1072,6 +1345,8 @@ def _run_atexit():
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
def network_get_primary_address(binding):
'''
+ Deprecated since Juju 2.3; use network_get()
+
Retrieve the primary network address for a named binding
:param binding: string. The name of a relation of extra-binding
@@ -1079,10 +1354,19 @@ def network_get_primary_address(binding):
:raise: NotImplementedError if run on Juju < 2.0
'''
cmd = ['network-get', '--primary-address', binding]
- return subprocess.check_output(cmd).decode('UTF-8').strip()
+ try:
+ response = subprocess.check_output(
+ cmd,
+ stderr=subprocess.STDOUT).decode('UTF-8').strip()
+ except CalledProcessError as e:
+ if 'no network config found for binding' in e.output.decode('UTF-8'):
+ raise NoNetworkBinding("No network binding for {}"
+ .format(binding))
+ else:
+ raise
+ return response
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
def network_get(endpoint, relation_id=None):
"""
Retrieve the network details for a relation endpoint
@@ -1090,24 +1374,20 @@ def network_get(endpoint, relation_id=None):
:param endpoint: string. The name of a relation endpoint
:param relation_id: int. The ID of the relation for the current context.
:return: dict. The loaded YAML output of the network-get query.
- :raise: NotImplementedError if run on Juju < 2.1
+ :raise: NotImplementedError if request not supported by the Juju version.
"""
+ if not has_juju_version('2.2'):
+ raise NotImplementedError(juju_version()) # earlier versions require --primary-address
+ if relation_id and not has_juju_version('2.3'):
+ raise NotImplementedError # 2.3 added the -r option
+
cmd = ['network-get', endpoint, '--format', 'yaml']
if relation_id:
cmd.append('-r')
cmd.append(relation_id)
- try:
- response = subprocess.check_output(
- cmd,
- stderr=subprocess.STDOUT).decode('UTF-8').strip()
- except CalledProcessError as e:
- # Early versions of Juju 2.0.x required the --primary-address argument.
- # We catch that condition here and raise NotImplementedError since
- # the requested semantics are not available - the caller can then
- # use the network_get_primary_address() method instead.
- if '--primary-address is currently required' in e.output.decode('UTF-8'):
- raise NotImplementedError
- raise
+ response = subprocess.check_output(
+ cmd,
+ stderr=subprocess.STDOUT).decode('UTF-8').strip()
return yaml.safe_load(response)
@@ -1140,3 +1420,192 @@ def meter_info():
"""Get the meter status information, if running in the meter-status-changed
hook."""
return os.environ.get('JUJU_METER_INFO')
+
+
+def iter_units_for_relation_name(relation_name):
+ """Iterate through all units in a relation
+
+ Generator that iterates through all the units in a relation and yields
+ a named tuple with rid and unit field names.
+
+ Usage:
+ data = [(u.rid, u.unit)
+ for u in iter_units_for_relation_name(relation_name)]
+
+ :param relation_name: string relation name
+ :yield: Named Tuple with rid and unit field names
+ """
+ RelatedUnit = namedtuple('RelatedUnit', 'rid, unit')
+ for rid in relation_ids(relation_name):
+ for unit in related_units(rid):
+ yield RelatedUnit(rid, unit)
+
+
+def ingress_address(rid=None, unit=None):
+ """
+ Retrieve the ingress-address from a relation when available.
+ Otherwise, return the private-address.
+
+ When used on the consuming side of the relation (unit is a remote
+ unit), the ingress-address is the IP address that this unit needs
+ to use to reach the provided service on the remote unit.
+
+ When used on the providing side of the relation (unit == local_unit()),
+ the ingress-address is the IP address that is advertised to remote
+ units on this relation. Remote units need to use this address to
+ reach the local provided service on this unit.
+
+ Note that charms may document some other method to use in
+ preference to the ingress_address(), such as an address provided
+ on a different relation attribute or a service discovery mechanism.
+ This allows charms to redirect inbound connections to their peers
+ or different applications such as load balancers.
+
+ Usage:
+ addresses = [ingress_address(rid=u.rid, unit=u.unit)
+ for u in iter_units_for_relation_name(relation_name)]
+
+ :param rid: string relation id
+ :param unit: string unit name
+ :side effect: calls relation_get
+ :return: string IP address
+ """
+ settings = relation_get(rid=rid, unit=unit)
+ return (settings.get('ingress-address') or
+ settings.get('private-address'))
+
+
+def egress_subnets(rid=None, unit=None):
+ """
+ Retrieve the egress-subnets from a relation.
+
+ This function is to be used on the providing side of the
+ relation, and provides the ranges of addresses that client
+ connections may come from. The result is uninteresting on
+ the consuming side of a relation (unit == local_unit()).
+
+ Returns a stable list of subnets in CIDR format.
+ eg. ['192.168.1.0/24', '2001::F00F/128']
+
+ If egress-subnets is not available, falls back to using the published
+ ingress-address, or finally private-address.
+
+ :param rid: string relation id
+ :param unit: string unit name
+ :side effect: calls relation_get
+ :return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128']
+ """
+ def _to_range(addr):
+ if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None:
+ addr += '/32'
+ elif ':' in addr and '/' not in addr: # IPv6
+ addr += '/128'
+ return addr
+
+ settings = relation_get(rid=rid, unit=unit)
+ if 'egress-subnets' in settings:
+ return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()]
+ if 'ingress-address' in settings:
+ return [_to_range(settings['ingress-address'])]
+ if 'private-address' in settings:
+ return [_to_range(settings['private-address'])]
+ return [] # Should never happen
+
+
+def unit_doomed(unit=None):
+ """Determines if the unit is being removed from the model
+
+ Requires Juju 2.4.1.
+
+ :param unit: string unit name, defaults to local_unit
+ :side effect: calls goal_state
+ :side effect: calls local_unit
+ :side effect: calls has_juju_version
+ :return: True if the unit is being removed, already gone, or never existed
+ """
+ if not has_juju_version("2.4.1"):
+ # We cannot risk blindly returning False for 'we don't know',
+ # because that could cause data loss; if call sites don't
+ # need an accurate answer, they likely don't need this helper
+ # at all.
+ # goal-state existed in 2.4.0, but did not handle removals
+ # correctly until 2.4.1.
+ raise NotImplementedError("is_doomed")
+ if unit is None:
+ unit = local_unit()
+ gs = goal_state()
+ units = gs.get('units', {})
+ if unit not in units:
+ return True
+ # I don't think 'dead' units ever show up in the goal-state, but
+ # check anyway in addition to 'dying'.
+ return units[unit]['status'] in ('dying', 'dead')
+
+
+def env_proxy_settings(selected_settings=None):
+ """Get proxy settings from process environment variables.
+
+ Get charm proxy settings from environment variables that correspond to
+ juju-http-proxy, juju-https-proxy juju-no-proxy (available as of 2.4.2, see
+ lp:1782236) and juju-ftp-proxy in a format suitable for passing to an
+ application that reacts to proxy settings passed as environment variables.
+ Some applications support lowercase or uppercase notation (e.g. curl), some
+ support only lowercase (e.g. wget), there are also subjectively rare cases
+ of only uppercase notation support. no_proxy CIDR and wildcard support also
+ varies between runtimes and applications as there is no enforced standard.
+
+ Some applications may connect to multiple destinations and expose config
+ options that would affect only proxy settings for a specific destination
+ these should be handled in charms in an application-specific manner.
+
+ :param selected_settings: format only a subset of possible settings
+ :type selected_settings: list
+ :rtype: Option(None, dict[str, str])
+ """
+ SUPPORTED_SETTINGS = {
+ 'http': 'HTTP_PROXY',
+ 'https': 'HTTPS_PROXY',
+ 'no_proxy': 'NO_PROXY',
+ 'ftp': 'FTP_PROXY'
+ }
+ if selected_settings is None:
+ selected_settings = SUPPORTED_SETTINGS
+
+ selected_vars = [v for k, v in SUPPORTED_SETTINGS.items()
+ if k in selected_settings]
+ proxy_settings = {}
+ for var in selected_vars:
+ var_val = os.getenv(var)
+ if var_val:
+ proxy_settings[var] = var_val
+ proxy_settings[var.lower()] = var_val
+ # Now handle juju-prefixed environment variables. The legacy vs new
+ # environment variable usage is mutually exclusive
+ charm_var_val = os.getenv('JUJU_CHARM_{}'.format(var))
+ if charm_var_val:
+ proxy_settings[var] = charm_var_val
+ proxy_settings[var.lower()] = charm_var_val
+ if 'no_proxy' in proxy_settings:
+ if _contains_range(proxy_settings['no_proxy']):
+ log(RANGE_WARNING, level=WARNING)
+ return proxy_settings if proxy_settings else None
+
+
+def _contains_range(addresses):
+ """Check for cidr or wildcard domain in a string.
+
+ Given a string comprising a comma seperated list of ip addresses
+ and domain names, determine whether the string contains IP ranges
+ or wildcard domains.
+
+ :param addresses: comma seperated list of domains and ip addresses.
+ :type addresses: str
+ """
+ return (
+ # Test for cidr (e.g. 10.20.20.0/24)
+ "/" in addresses or
+ # Test for wildcard domains (*.foo.com or .foo.com)
+ "*" in addresses or
+ addresses.startswith(".") or
+ ",." in addresses or
+ " ." in addresses)
diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py
index 5656e2f..b33ac90 100644
--- a/hooks/charmhelpers/core/host.py
+++ b/hooks/charmhelpers/core/host.py
@@ -34,21 +34,23 @@ import six
from contextlib import contextmanager
from collections import OrderedDict
-from .hookenv import log, DEBUG
+from .hookenv import log, INFO, DEBUG, local_unit, charm_name
from .fstab import Fstab
from charmhelpers.osplatform import get_platform
__platform__ = get_platform()
if __platform__ == "ubuntu":
- from charmhelpers.core.host_factory.ubuntu import (
+ from charmhelpers.core.host_factory.ubuntu import ( # NOQA:F401
service_available,
add_new_group,
lsb_release,
cmp_pkgrevno,
CompareHostReleases,
+ get_distrib_codename,
+ arch
) # flake8: noqa -- ignore F401 for this import
elif __platform__ == "centos":
- from charmhelpers.core.host_factory.centos import (
+ from charmhelpers.core.host_factory.centos import ( # NOQA:F401
service_available,
add_new_group,
lsb_release,
@@ -58,6 +60,7 @@ elif __platform__ == "centos":
UPDATEDB_PATH = '/etc/updatedb.conf'
+
def service_start(service_name, **kwargs):
"""Start a system service.
@@ -287,8 +290,8 @@ def service_running(service_name, **kwargs):
for key, value in six.iteritems(kwargs):
parameter = '%s=%s' % (key, value)
cmd.append(parameter)
- output = subprocess.check_output(cmd,
- stderr=subprocess.STDOUT).decode('UTF-8')
+ output = subprocess.check_output(
+ cmd, stderr=subprocess.STDOUT).decode('UTF-8')
except subprocess.CalledProcessError:
return False
else:
@@ -441,6 +444,51 @@ def add_user_to_group(username, group):
subprocess.check_call(cmd)
+def chage(username, lastday=None, expiredate=None, inactive=None,
+ mindays=None, maxdays=None, root=None, warndays=None):
+ """Change user password expiry information
+
+ :param str username: User to update
+ :param str lastday: Set when password was changed in YYYY-MM-DD format
+ :param str expiredate: Set when user's account will no longer be
+ accessible in YYYY-MM-DD format.
+ -1 will remove an account expiration date.
+ :param str inactive: Set the number of days of inactivity after a password
+ has expired before the account is locked.
+ -1 will remove an account's inactivity.
+ :param str mindays: Set the minimum number of days between password
+ changes to MIN_DAYS.
+ 0 indicates the password can be changed anytime.
+ :param str maxdays: Set the maximum number of days during which a
+ password is valid.
+ -1 as MAX_DAYS will remove checking maxdays
+ :param str root: Apply changes in the CHROOT_DIR directory
+ :param str warndays: Set the number of days of warning before a password
+ change is required
+ :raises subprocess.CalledProcessError: if call to chage fails
+ """
+ cmd = ['chage']
+ if root:
+ cmd.extend(['--root', root])
+ if lastday:
+ cmd.extend(['--lastday', lastday])
+ if expiredate:
+ cmd.extend(['--expiredate', expiredate])
+ if inactive:
+ cmd.extend(['--inactive', inactive])
+ if mindays:
+ cmd.extend(['--mindays', mindays])
+ if maxdays:
+ cmd.extend(['--maxdays', maxdays])
+ if warndays:
+ cmd.extend(['--warndays', warndays])
+ cmd.append(username)
+ subprocess.check_call(cmd)
+
+
+remove_password_expiry = functools.partial(chage, expiredate='-1', inactive='-1', mindays='0', maxdays='-1')
+
+
def rsync(from_path, to_path, flags='-r', options=None, timeout=None):
"""Replicate the contents of a path"""
options = options or ['--delete', '--executability']
@@ -492,13 +540,15 @@ def write_file(path, content, owner='root', group='root', perms=0o444):
# lets see if we can grab the file and compare the context, to avoid doing
# a write.
existing_content = None
- existing_uid, existing_gid = None, None
+ existing_uid, existing_gid, existing_perms = None, None, None
try:
with open(path, 'rb') as target:
existing_content = target.read()
stat = os.stat(path)
- existing_uid, existing_gid = stat.st_uid, stat.st_gid
- except:
+ existing_uid, existing_gid, existing_perms = (
+ stat.st_uid, stat.st_gid, stat.st_mode
+ )
+ except Exception:
pass
if content != existing_content:
log("Writing file {} {}:{} {:o}".format(path, owner, group, perms),
@@ -506,10 +556,12 @@ def write_file(path, content, owner='root', group='root', perms=0o444):
with open(path, 'wb') as target:
os.fchown(target.fileno(), uid, gid)
os.fchmod(target.fileno(), perms)
+ if six.PY3 and isinstance(content, six.string_types):
+ content = content.encode('UTF-8')
target.write(content)
return
# the contents were the same, but we might still need to change the
- # ownership.
+ # ownership or permissions.
if existing_uid != uid:
log("Changing uid on already existing content: {} -> {}"
.format(existing_uid, uid), level=DEBUG)
@@ -518,6 +570,10 @@ def write_file(path, content, owner='root', group='root', perms=0o444):
log("Changing gid on already existing content: {} -> {}"
.format(existing_gid, gid), level=DEBUG)
os.chown(path, -1, gid)
+ if existing_perms != perms:
+ log("Changing permissions on existing content: {} -> {}"
+ .format(existing_perms, perms), level=DEBUG)
+ os.chmod(path, perms)
def fstab_remove(mp):
@@ -782,7 +838,7 @@ def list_nics(nic_type=None):
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
ip_output = (line.strip() for line in ip_output if line)
- key = re.compile('^[0-9]+:\s+(.+):')
+ key = re.compile(r'^[0-9]+:\s+(.+):')
for line in ip_output:
matched = re.search(key, line)
if matched:
@@ -927,6 +983,20 @@ def is_container():
def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH):
+ """Adds the specified path to the mlocate's udpatedb.conf PRUNEPATH list.
+
+ This method has no effect if the path specified by updatedb_path does not
+ exist or is not a file.
+
+ @param path: string the path to add to the updatedb.conf PRUNEPATHS value
+ @param updatedb_path: the path the updatedb.conf file
+ """
+ if not os.path.exists(updatedb_path) or os.path.isdir(updatedb_path):
+ # If the updatedb.conf file doesn't exist then don't attempt to update
+ # the file as the package providing mlocate may not be installed on
+ # the local system
+ return
+
with open(updatedb_path, 'r+') as f_id:
updatedb_text = f_id.read()
output = updatedb(updatedb_text, path)
@@ -946,3 +1016,89 @@ def updatedb(updatedb_text, new_path):
lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths))
output = "\n".join(lines)
return output
+
+
+def modulo_distribution(modulo=3, wait=30, non_zero_wait=False):
+ """ Modulo distribution
+
+ This helper uses the unit number, a modulo value and a constant wait time
+ to produce a calculated wait time distribution. This is useful in large
+ scale deployments to distribute load during an expensive operation such as
+ service restarts.
+
+ If you have 1000 nodes that need to restart 100 at a time 1 minute at a
+ time:
+
+ time.wait(modulo_distribution(modulo=100, wait=60))
+ restart()
+
+ If you need restarts to happen serially set modulo to the exact number of
+ nodes and set a high constant wait time:
+
+ time.wait(modulo_distribution(modulo=10, wait=120))
+ restart()
+
+ @param modulo: int The modulo number creates the group distribution
+ @param wait: int The constant time wait value
+ @param non_zero_wait: boolean Override unit % modulo == 0,
+ return modulo * wait. Used to avoid collisions with
+ leader nodes which are often given priority.
+ @return: int Calculated time to wait for unit operation
+ """
+ unit_number = int(local_unit().split('/')[1])
+ calculated_wait_time = (unit_number % modulo) * wait
+ if non_zero_wait and calculated_wait_time == 0:
+ return modulo * wait
+ else:
+ return calculated_wait_time
+
+
+def install_ca_cert(ca_cert, name=None):
+ """
+ Install the given cert as a trusted CA.
+
+ The ``name`` is the stem of the filename where the cert is written, and if
+ not provided, it will default to ``juju-{charm_name}``.
+
+ If the cert is empty or None, or is unchanged, nothing is done.
+ """
+ if not ca_cert:
+ return
+ if not isinstance(ca_cert, bytes):
+ ca_cert = ca_cert.encode('utf8')
+ if not name:
+ name = 'juju-{}'.format(charm_name())
+ cert_file = '/usr/local/share/ca-certificates/{}.crt'.format(name)
+ new_hash = hashlib.md5(ca_cert).hexdigest()
+ if file_hash(cert_file) == new_hash:
+ return
+ log("Installing new CA cert at: {}".format(cert_file), level=INFO)
+ write_file(cert_file, ca_cert)
+ subprocess.check_call(['update-ca-certificates', '--fresh'])
+
+
+def get_system_env(key, default=None):
+ """Get data from system environment as represented in ``/etc/environment``.
+
+ :param key: Key to look up
+ :type key: str
+ :param default: Value to return if key is not found
+ :type default: any
+ :returns: Value for key if found or contents of default parameter
+ :rtype: any
+ :raises: subprocess.CalledProcessError
+ """
+ env_file = '/etc/environment'
+ # use the shell and env(1) to parse the global environments file. This is
+ # done to get the correct result even if the user has shell variable
+ # substitutions or other shell logic in that file.
+ output = subprocess.check_output(
+ ['env', '-i', '/bin/bash', '-c',
+ 'set -a && source {} && env'.format(env_file)],
+ universal_newlines=True)
+ for k, v in (line.split('=', 1)
+ for line in output.splitlines() if '=' in line):
+ if k == key:
+ return v
+ else:
+ return default
diff --git a/hooks/charmhelpers/core/host_factory/ubuntu.py b/hooks/charmhelpers/core/host_factory/ubuntu.py
index d8dc378..3edc068 100644
--- a/hooks/charmhelpers/core/host_factory/ubuntu.py
+++ b/hooks/charmhelpers/core/host_factory/ubuntu.py
@@ -1,5 +1,6 @@
import subprocess
+from charmhelpers.core.hookenv import cached
from charmhelpers.core.strutils import BasicStringComparator
@@ -20,6 +21,11 @@ UBUNTU_RELEASES = (
'yakkety',
'zesty',
'artful',
+ 'bionic',
+ 'cosmic',
+ 'disco',
+ 'eoan',
+ 'focal'
)
@@ -70,6 +76,14 @@ def lsb_release():
return d
+def get_distrib_codename():
+ """Return the codename of the distribution
+ :returns: The codename
+ :rtype: str
+ """
+ return lsb_release()['DISTRIB_CODENAME'].lower()
+
+
def cmp_pkgrevno(package, revno, pkgcache=None):
"""Compare supplied revno with the revno of the installed package.
@@ -81,9 +95,22 @@ def cmp_pkgrevno(package, revno, pkgcache=None):
the pkgcache argument is None. Be sure to add charmhelpers.fetch if
you call this function, or pass an apt_pkg.Cache() instance.
"""
- import apt_pkg
+ from charmhelpers.fetch import apt_pkg
if not pkgcache:
from charmhelpers.fetch import apt_cache
pkgcache = apt_cache()
pkg = pkgcache[package]
return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
+
+
+@cached
+def arch():
+ """Return the package architecture as a string.
+
+ :returns: the architecture
+ :rtype: str
+ :raises: subprocess.CalledProcessError if dpkg command fails
+ """
+ return subprocess.check_output(
+ ['dpkg', '--print-architecture']
+ ).rstrip().decode('UTF-8')
diff --git a/hooks/charmhelpers/core/kernel.py b/hooks/charmhelpers/core/kernel.py
index 2d40452..e01f4f8 100644
--- a/hooks/charmhelpers/core/kernel.py
+++ b/hooks/charmhelpers/core/kernel.py
@@ -26,12 +26,12 @@ from charmhelpers.core.hookenv import (
__platform__ = get_platform()
if __platform__ == "ubuntu":
- from charmhelpers.core.kernel_factory.ubuntu import (
+ from charmhelpers.core.kernel_factory.ubuntu import ( # NOQA:F401
persistent_modprobe,
update_initramfs,
) # flake8: noqa -- ignore F401 for this import
elif __platform__ == "centos":
- from charmhelpers.core.kernel_factory.centos import (
+ from charmhelpers.core.kernel_factory.centos import ( # NOQA:F401
persistent_modprobe,
update_initramfs,
) # flake8: noqa -- ignore F401 for this import
diff --git a/hooks/charmhelpers/core/services/base.py b/hooks/charmhelpers/core/services/base.py
index ca9dc99..179ad4f 100644
--- a/hooks/charmhelpers/core/services/base.py
+++ b/hooks/charmhelpers/core/services/base.py
@@ -307,23 +307,34 @@ class PortManagerCallback(ManagerCallback):
"""
def __call__(self, manager, service_name, event_name):
service = manager.get_service(service_name)
- new_ports = service.get('ports', [])
+ # turn this generator into a list,
+ # as we'll be going over it multiple times
+ new_ports = list(service.get('ports', []))
port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
if os.path.exists(port_file):
with open(port_file) as fp:
old_ports = fp.read().split(',')
for old_port in old_ports:
- if bool(old_port):
- old_port = int(old_port)
- if old_port not in new_ports:
- hookenv.close_port(old_port)
+ if bool(old_port) and not self.ports_contains(old_port, new_ports):
+ hookenv.close_port(old_port)
with open(port_file, 'w') as fp:
fp.write(','.join(str(port) for port in new_ports))
for port in new_ports:
+ # A port is either a number or 'ICMP'
+ protocol = 'TCP'
+ if str(port).upper() == 'ICMP':
+ protocol = 'ICMP'
if event_name == 'start':
- hookenv.open_port(port)
+ hookenv.open_port(port, protocol)
elif event_name == 'stop':
- hookenv.close_port(port)
+ hookenv.close_port(port, protocol)
+
+ def ports_contains(self, port, ports):
+ if not bool(port):
+ return False
+ if str(port).upper() != 'ICMP':
+ port = int(port)
+ return port in ports
def service_stop(service_name):
diff --git a/hooks/charmhelpers/core/strutils.py b/hooks/charmhelpers/core/strutils.py
index 685dabd..e8df045 100644
--- a/hooks/charmhelpers/core/strutils.py
+++ b/hooks/charmhelpers/core/strutils.py
@@ -61,13 +61,19 @@ def bytes_from_string(value):
if isinstance(value, six.string_types):
value = six.text_type(value)
else:
- msg = "Unable to interpret non-string value '%s' as boolean" % (value)
+ msg = "Unable to interpret non-string value '%s' as bytes" % (value)
raise ValueError(msg)
matches = re.match("([0-9]+)([a-zA-Z]+)", value)
- if not matches:
- msg = "Unable to interpret string value '%s' as bytes" % (value)
- raise ValueError(msg)
- return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
+ if matches:
+ size = int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
+ else:
+ # Assume that value passed in is bytes
+ try:
+ size = int(value)
+ except ValueError:
+ msg = "Unable to interpret string value '%s' as bytes" % (value)
+ raise ValueError(msg)
+ return size
class BasicStringComparator(object):
diff --git a/hooks/charmhelpers/core/sysctl.py b/hooks/charmhelpers/core/sysctl.py
index 6e413e3..386428d 100644
--- a/hooks/charmhelpers/core/sysctl.py
+++ b/hooks/charmhelpers/core/sysctl.py
@@ -17,38 +17,59 @@
import yaml
-from subprocess import check_call
+from subprocess import check_call, CalledProcessError
from charmhelpers.core.hookenv import (
log,
DEBUG,
ERROR,
+ WARNING,
)
+from charmhelpers.core.host import is_container
+
__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@xxxxxxxxxxxxx>'
-def create(sysctl_dict, sysctl_file):
+def create(sysctl_dict, sysctl_file, ignore=False):
"""Creates a sysctl.conf file from a YAML associative array
- :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }"
+ :param sysctl_dict: a dict or YAML-formatted string of sysctl
+ options eg "{ 'kernel.max_pid': 1337 }"
:type sysctl_dict: str
:param sysctl_file: path to the sysctl file to be saved
:type sysctl_file: str or unicode
+ :param ignore: If True, ignore "unknown variable" errors.
+ :type ignore: bool
:returns: None
"""
- try:
- sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
- except yaml.YAMLError:
- log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
- level=ERROR)
- return
+ if type(sysctl_dict) is not dict:
+ try:
+ sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
+ except yaml.YAMLError:
+ log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
+ level=ERROR)
+ return
+ else:
+ sysctl_dict_parsed = sysctl_dict
with open(sysctl_file, "w") as fd:
for key, value in sysctl_dict_parsed.items():
fd.write("{}={}\n".format(key, value))
- log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed),
+ log("Updating sysctl_file: {} values: {}".format(sysctl_file,
+ sysctl_dict_parsed),
level=DEBUG)
- check_call(["sysctl", "-p", sysctl_file])
+ call = ["sysctl", "-p", sysctl_file]
+ if ignore:
+ call.append("-e")
+
+ try:
+ check_call(call)
+ except CalledProcessError as e:
+ if is_container():
+ log("Error setting some sysctl keys in this container: {}".format(e.output),
+ level=WARNING)
+ else:
+ raise e
diff --git a/hooks/charmhelpers/core/templating.py b/hooks/charmhelpers/core/templating.py
index 7b801a3..9014015 100644
--- a/hooks/charmhelpers/core/templating.py
+++ b/hooks/charmhelpers/core/templating.py
@@ -20,7 +20,8 @@ from charmhelpers.core import hookenv
def render(source, target, context, owner='root', group='root',
- perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None):
+ perms=0o444, templates_dir=None, encoding='UTF-8',
+ template_loader=None, config_template=None):
"""
Render a template.
@@ -32,6 +33,9 @@ def render(source, target, context, owner='root', group='root',
The context should be a dict containing the values to be replaced in the
template.
+ config_template may be provided to render from a provided template instead
+ of loading from a file.
+
The `owner`, `group`, and `perms` options will be passed to `write_file`.
If omitted, `templates_dir` defaults to the `templates` folder in the charm.
@@ -65,14 +69,19 @@ def render(source, target, context, owner='root', group='root',
if templates_dir is None:
templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
template_env = Environment(loader=FileSystemLoader(templates_dir))
- try:
- source = source
- template = template_env.get_template(source)
- except exceptions.TemplateNotFound as e:
- hookenv.log('Could not load template %s from %s.' %
- (source, templates_dir),
- level=hookenv.ERROR)
- raise e
+
+ # load from a string if provided explicitly
+ if config_template is not None:
+ template = template_env.from_string(config_template)
+ else:
+ try:
+ source = source
+ template = template_env.get_template(source)
+ except exceptions.TemplateNotFound as e:
+ hookenv.log('Could not load template %s from %s.' %
+ (source, templates_dir),
+ level=hookenv.ERROR)
+ raise e
content = template.render(context)
if target is not None:
target_dir = os.path.dirname(target)
diff --git a/hooks/charmhelpers/core/unitdata.py b/hooks/charmhelpers/core/unitdata.py
index 54ec969..ab55432 100644
--- a/hooks/charmhelpers/core/unitdata.py
+++ b/hooks/charmhelpers/core/unitdata.py
@@ -166,6 +166,10 @@ class Storage(object):
To support dicts, lists, integer, floats, and booleans values
are automatically json encoded/decoded.
+
+ Note: to facilitate unit testing, ':memory:' can be passed as the
+ path parameter which causes sqlite3 to only build the db in memory.
+ This should only be used for testing purposes.
"""
def __init__(self, path=None):
self.db_path = path
@@ -175,6 +179,9 @@ class Storage(object):
else:
self.db_path = os.path.join(
os.environ.get('CHARM_DIR', ''), '.unit-state.db')
+ if self.db_path != ':memory:':
+ with open(self.db_path, 'a') as f:
+ os.fchmod(f.fileno(), 0o600)
self.conn = sqlite3.connect('%s' % self.db_path)
self.cursor = self.conn.cursor()
self.revision = None
@@ -358,7 +365,7 @@ class Storage(object):
try:
yield self.revision
self.revision = None
- except:
+ except Exception:
self.flush(False)
self.revision = None
raise
diff --git a/hooks/charmhelpers/fetch/__init__.py b/hooks/charmhelpers/fetch/__init__.py
index 480a627..0cc7fc8 100644
--- a/hooks/charmhelpers/fetch/__init__.py
+++ b/hooks/charmhelpers/fetch/__init__.py
@@ -84,6 +84,7 @@ module = "charmhelpers.fetch.%s" % __platform__
fetch = importlib.import_module(module)
filter_installed_packages = fetch.filter_installed_packages
+filter_missing_packages = fetch.filter_missing_packages
install = fetch.apt_install
upgrade = fetch.apt_upgrade
update = _fetch_update = fetch.apt_update
@@ -96,11 +97,14 @@ if __platform__ == "ubuntu":
apt_update = fetch.apt_update
apt_upgrade = fetch.apt_upgrade
apt_purge = fetch.apt_purge
+ apt_autoremove = fetch.apt_autoremove
apt_mark = fetch.apt_mark
apt_hold = fetch.apt_hold
apt_unhold = fetch.apt_unhold
import_key = fetch.import_key
get_upstream_version = fetch.get_upstream_version
+ apt_pkg = fetch.ubuntu_apt_pkg
+ get_apt_dpkg_env = fetch.get_apt_dpkg_env
elif __platform__ == "centos":
yum_search = fetch.yum_search
diff --git a/hooks/charmhelpers/fetch/archiveurl.py b/hooks/charmhelpers/fetch/archiveurl.py
index dd24f9e..d25587a 100644
--- a/hooks/charmhelpers/fetch/archiveurl.py
+++ b/hooks/charmhelpers/fetch/archiveurl.py
@@ -89,7 +89,7 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
:param str source: URL pointing to an archive file.
:param str dest: Local path location to download archive file to.
"""
- # propogate all exceptions
+ # propagate all exceptions
# URLError, OSError, etc
proto, netloc, path, params, query, fragment = urlparse(source)
if proto in ('http', 'https'):
diff --git a/hooks/charmhelpers/fetch/bzrurl.py b/hooks/charmhelpers/fetch/bzrurl.py
index 07cd029..c4ab3ff 100644
--- a/hooks/charmhelpers/fetch/bzrurl.py
+++ b/hooks/charmhelpers/fetch/bzrurl.py
@@ -13,7 +13,7 @@
# limitations under the License.
import os
-from subprocess import check_call
+from subprocess import STDOUT, check_output
from charmhelpers.fetch import (
BaseFetchHandler,
UnhandledSource,
@@ -55,7 +55,7 @@ class BzrUrlFetchHandler(BaseFetchHandler):
cmd = ['bzr', 'branch']
cmd += cmd_opts
cmd += [source, dest]
- check_call(cmd)
+ check_output(cmd, stderr=STDOUT)
def install(self, source, dest=None, revno=None):
url_parts = self.parse_url(source)
diff --git a/hooks/charmhelpers/fetch/giturl.py b/hooks/charmhelpers/fetch/giturl.py
index 4cf21bc..070ca9b 100644
--- a/hooks/charmhelpers/fetch/giturl.py
+++ b/hooks/charmhelpers/fetch/giturl.py
@@ -13,7 +13,7 @@
# limitations under the License.
import os
-from subprocess import check_call, CalledProcessError
+from subprocess import check_output, CalledProcessError, STDOUT
from charmhelpers.fetch import (
BaseFetchHandler,
UnhandledSource,
@@ -50,7 +50,7 @@ class GitUrlFetchHandler(BaseFetchHandler):
cmd = ['git', 'clone', source, dest, '--branch', branch]
if depth:
cmd.extend(['--depth', depth])
- check_call(cmd)
+ check_output(cmd, stderr=STDOUT)
def install(self, source, branch="master", dest=None, depth=None):
url_parts = self.parse_url(source)
diff --git a/hooks/charmhelpers/fetch/python/__init__.py b/hooks/charmhelpers/fetch/python/__init__.py
new file mode 100644
index 0000000..bff99dc
--- /dev/null
+++ b/hooks/charmhelpers/fetch/python/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014-2019 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/hooks/charmhelpers/fetch/python/debug.py b/hooks/charmhelpers/fetch/python/debug.py
new file mode 100644
index 0000000..757135e
--- /dev/null
+++ b/hooks/charmhelpers/fetch/python/debug.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import print_function
+
+import atexit
+import sys
+
+from charmhelpers.fetch.python.rpdb import Rpdb
+from charmhelpers.core.hookenv import (
+ open_port,
+ close_port,
+ ERROR,
+ log
+)
+
+__author__ = "Jorge Niedbalski <jorge.niedbalski@xxxxxxxxxxxxx>"
+
+DEFAULT_ADDR = "0.0.0.0"
+DEFAULT_PORT = 4444
+
+
+def _error(message):
+ log(message, level=ERROR)
+
+
+def set_trace(addr=DEFAULT_ADDR, port=DEFAULT_PORT):
+ """
+ Set a trace point using the remote debugger
+ """
+ atexit.register(close_port, port)
+ try:
+ log("Starting a remote python debugger session on %s:%s" % (addr,
+ port))
+ open_port(port)
+ debugger = Rpdb(addr=addr, port=port)
+ debugger.set_trace(sys._getframe().f_back)
+ except Exception:
+ _error("Cannot start a remote debug session on %s:%s" % (addr,
+ port))
diff --git a/hooks/charmhelpers/fetch/python/packages.py b/hooks/charmhelpers/fetch/python/packages.py
new file mode 100644
index 0000000..6e95028
--- /dev/null
+++ b/hooks/charmhelpers/fetch/python/packages.py
@@ -0,0 +1,154 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import six
+import subprocess
+import sys
+
+from charmhelpers.fetch import apt_install, apt_update
+from charmhelpers.core.hookenv import charm_dir, log
+
+__author__ = "Jorge Niedbalski <jorge.niedbalski@xxxxxxxxxxxxx>"
+
+
+def pip_execute(*args, **kwargs):
+ """Overriden pip_execute() to stop sys.path being changed.
+
+ The act of importing main from the pip module seems to cause add wheels
+ from the /usr/share/python-wheels which are installed by various tools.
+ This function ensures that sys.path remains the same after the call is
+ executed.
+ """
+ try:
+ _path = sys.path
+ try:
+ from pip import main as _pip_execute
+ except ImportError:
+ apt_update()
+ if six.PY2:
+ apt_install('python-pip')
+ else:
+ apt_install('python3-pip')
+ from pip import main as _pip_execute
+ _pip_execute(*args, **kwargs)
+ finally:
+ sys.path = _path
+
+
+def parse_options(given, available):
+ """Given a set of options, check if available"""
+ for key, value in sorted(given.items()):
+ if not value:
+ continue
+ if key in available:
+ yield "--{0}={1}".format(key, value)
+
+
+def pip_install_requirements(requirements, constraints=None, **options):
+ """Install a requirements file.
+
+ :param constraints: Path to pip constraints file.
+ http://pip.readthedocs.org/en/stable/user_guide/#constraints-files
+ """
+ command = ["install"]
+
+ available_options = ('proxy', 'src', 'log', )
+ for option in parse_options(options, available_options):
+ command.append(option)
+
+ command.append("-r {0}".format(requirements))
+ if constraints:
+ command.append("-c {0}".format(constraints))
+ log("Installing from file: {} with constraints {} "
+ "and options: {}".format(requirements, constraints, command))
+ else:
+ log("Installing from file: {} with options: {}".format(requirements,
+ command))
+ pip_execute(command)
+
+
+def pip_install(package, fatal=False, upgrade=False, venv=None,
+ constraints=None, **options):
+ """Install a python package"""
+ if venv:
+ venv_python = os.path.join(venv, 'bin/pip')
+ command = [venv_python, "install"]
+ else:
+ command = ["install"]
+
+ available_options = ('proxy', 'src', 'log', 'index-url', )
+ for option in parse_options(options, available_options):
+ command.append(option)
+
+ if upgrade:
+ command.append('--upgrade')
+
+ if constraints:
+ command.extend(['-c', constraints])
+
+ if isinstance(package, list):
+ command.extend(package)
+ else:
+ command.append(package)
+
+ log("Installing {} package with options: {}".format(package,
+ command))
+ if venv:
+ subprocess.check_call(command)
+ else:
+ pip_execute(command)
+
+
+def pip_uninstall(package, **options):
+ """Uninstall a python package"""
+ command = ["uninstall", "-q", "-y"]
+
+ available_options = ('proxy', 'log', )
+ for option in parse_options(options, available_options):
+ command.append(option)
+
+ if isinstance(package, list):
+ command.extend(package)
+ else:
+ command.append(package)
+
+ log("Uninstalling {} package with options: {}".format(package,
+ command))
+ pip_execute(command)
+
+
+def pip_list():
+ """Returns the list of current python installed packages
+ """
+ return pip_execute(["list"])
+
+
+def pip_create_virtualenv(path=None):
+ """Create an isolated Python environment."""
+ if six.PY2:
+ apt_install('python-virtualenv')
+ else:
+ apt_install('python3-virtualenv')
+
+ if path:
+ venv_path = path
+ else:
+ venv_path = os.path.join(charm_dir(), 'venv')
+
+ if not os.path.exists(venv_path):
+ subprocess.check_call(['virtualenv', venv_path])
diff --git a/hooks/charmhelpers/fetch/python/rpdb.py b/hooks/charmhelpers/fetch/python/rpdb.py
new file mode 100644
index 0000000..9b31610
--- /dev/null
+++ b/hooks/charmhelpers/fetch/python/rpdb.py
@@ -0,0 +1,56 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Remote Python Debugger (pdb wrapper)."""
+
+import pdb
+import socket
+import sys
+
+__author__ = "Bertrand Janin <b@xxxxxxxxx>"
+__version__ = "0.1.3"
+
+
+class Rpdb(pdb.Pdb):
+
+ def __init__(self, addr="127.0.0.1", port=4444):
+ """Initialize the socket and initialize pdb."""
+
+ # Backup stdin and stdout before replacing them by the socket handle
+ self.old_stdout = sys.stdout
+ self.old_stdin = sys.stdin
+
+ # Open a 'reusable' socket to let the webapp reload on the same port
+ self.skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ self.skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
+ self.skt.bind((addr, port))
+ self.skt.listen(1)
+ (clientsocket, address) = self.skt.accept()
+ handle = clientsocket.makefile('rw')
+ pdb.Pdb.__init__(self, completekey='tab', stdin=handle, stdout=handle)
+ sys.stdout = sys.stdin = handle
+
+ def shutdown(self):
+ """Revert stdin and stdout, close the socket."""
+ sys.stdout = self.old_stdout
+ sys.stdin = self.old_stdin
+ self.skt.close()
+ self.set_continue()
+
+ def do_continue(self, arg):
+ """Stop all operation on ``continue``."""
+ self.shutdown()
+ return 1
+
+ do_EOF = do_quit = do_exit = do_c = do_cont = do_continue
diff --git a/hooks/charmhelpers/fetch/python/version.py b/hooks/charmhelpers/fetch/python/version.py
new file mode 100644
index 0000000..3eb4210
--- /dev/null
+++ b/hooks/charmhelpers/fetch/python/version.py
@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+__author__ = "Jorge Niedbalski <jorge.niedbalski@xxxxxxxxxxxxx>"
+
+
+def current_version():
+ """Current system python version"""
+ return sys.version_info
+
+
+def current_version_string():
+ """Current system python version as string major.minor.micro"""
+ return "{0}.{1}.{2}".format(sys.version_info.major,
+ sys.version_info.minor,
+ sys.version_info.micro)
diff --git a/hooks/charmhelpers/fetch/snap.py b/hooks/charmhelpers/fetch/snap.py
index 112a54c..fc70aa9 100644
--- a/hooks/charmhelpers/fetch/snap.py
+++ b/hooks/charmhelpers/fetch/snap.py
@@ -41,6 +41,10 @@ class CouldNotAcquireLockException(Exception):
pass
+class InvalidSnapChannel(Exception):
+ pass
+
+
def _snap_exec(commands):
"""
Execute snap commands.
@@ -65,7 +69,7 @@ def _snap_exec(commands):
.format(SNAP_NO_LOCK_RETRY_COUNT))
return_code = e.returncode
log('Snap failed to acquire lock, trying again in {} seconds.'
- .format(SNAP_NO_LOCK_RETRY_DELAY, level='WARN'))
+ .format(SNAP_NO_LOCK_RETRY_DELAY), level='WARN')
sleep(SNAP_NO_LOCK_RETRY_DELAY)
return return_code
@@ -132,3 +136,15 @@ def snap_refresh(packages, *flags):
log(message, level='INFO')
return _snap_exec(['refresh'] + flags + packages)
+
+
+def valid_snap_channel(channel):
+ """ Validate snap channel exists
+
+ :raises InvalidSnapChannel: When channel does not exist
+ :return: Boolean
+ """
+ if channel.lower() in SNAP_CHANNELS:
+ return True
+ else:
+ raise InvalidSnapChannel("Invalid Snap Channel: {}".format(channel))
diff --git a/hooks/charmhelpers/fetch/ubuntu.py b/hooks/charmhelpers/fetch/ubuntu.py
index 40e1cb5..3ddaf0d 100644
--- a/hooks/charmhelpers/fetch/ubuntu.py
+++ b/hooks/charmhelpers/fetch/ubuntu.py
@@ -13,23 +13,23 @@
# limitations under the License.
from collections import OrderedDict
-import os
import platform
import re
import six
-import time
import subprocess
-from tempfile import NamedTemporaryFile
+import sys
+import time
+
+from charmhelpers.core.host import get_distrib_codename, get_system_env
-from charmhelpers.core.host import (
- lsb_release
-)
from charmhelpers.core.hookenv import (
log,
DEBUG,
WARNING,
+ env_proxy_settings,
)
from charmhelpers.fetch import SourceConfigError, GPGKeyError
+from charmhelpers.fetch import ubuntu_apt_pkg
PROPOSED_POCKET = (
"# Proposed\n"
@@ -44,6 +44,7 @@ ARCH_TO_PROPOSED_POCKET = {
'x86_64': PROPOSED_POCKET,
'ppc64le': PROPOSED_PORTS_POCKET,
'aarch64': PROPOSED_PORTS_POCKET,
+ 's390x': PROPOSED_PORTS_POCKET,
}
CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
@@ -157,6 +158,38 @@ CLOUD_ARCHIVE_POCKETS = {
'queens/proposed': 'xenial-proposed/queens',
'xenial-queens/proposed': 'xenial-proposed/queens',
'xenial-proposed/queens': 'xenial-proposed/queens',
+ # Rocky
+ 'rocky': 'bionic-updates/rocky',
+ 'bionic-rocky': 'bionic-updates/rocky',
+ 'bionic-rocky/updates': 'bionic-updates/rocky',
+ 'bionic-updates/rocky': 'bionic-updates/rocky',
+ 'rocky/proposed': 'bionic-proposed/rocky',
+ 'bionic-rocky/proposed': 'bionic-proposed/rocky',
+ 'bionic-proposed/rocky': 'bionic-proposed/rocky',
+ # Stein
+ 'stein': 'bionic-updates/stein',
+ 'bionic-stein': 'bionic-updates/stein',
+ 'bionic-stein/updates': 'bionic-updates/stein',
+ 'bionic-updates/stein': 'bionic-updates/stein',
+ 'stein/proposed': 'bionic-proposed/stein',
+ 'bionic-stein/proposed': 'bionic-proposed/stein',
+ 'bionic-proposed/stein': 'bionic-proposed/stein',
+ # Train
+ 'train': 'bionic-updates/train',
+ 'bionic-train': 'bionic-updates/train',
+ 'bionic-train/updates': 'bionic-updates/train',
+ 'bionic-updates/train': 'bionic-updates/train',
+ 'train/proposed': 'bionic-proposed/train',
+ 'bionic-train/proposed': 'bionic-proposed/train',
+ 'bionic-proposed/train': 'bionic-proposed/train',
+ # Ussuri
+ 'ussuri': 'bionic-updates/ussuri',
+ 'bionic-ussuri': 'bionic-updates/ussuri',
+ 'bionic-ussuri/updates': 'bionic-updates/ussuri',
+ 'bionic-updates/ussuri': 'bionic-updates/ussuri',
+ 'ussuri/proposed': 'bionic-proposed/ussuri',
+ 'bionic-ussuri/proposed': 'bionic-proposed/ussuri',
+ 'bionic-proposed/ussuri': 'bionic-proposed/ussuri',
}
@@ -180,18 +213,54 @@ def filter_installed_packages(packages):
return _pkgs
-def apt_cache(in_memory=True, progress=None):
- """Build and return an apt cache."""
- from apt import apt_pkg
- apt_pkg.init()
- if in_memory:
- apt_pkg.config.set("Dir::Cache::pkgcache", "")
- apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
- return apt_pkg.Cache(progress)
+def filter_missing_packages(packages):
+ """Return a list of packages that are installed.
+
+ :param packages: list of packages to evaluate.
+ :returns list: Packages that are installed.
+ """
+ return list(
+ set(packages) -
+ set(filter_installed_packages(packages))
+ )
+
+
+def apt_cache(*_, **__):
+ """Shim returning an object simulating the apt_pkg Cache.
+
+ :param _: Accept arguments for compability, not used.
+ :type _: any
+ :param __: Accept keyword arguments for compability, not used.
+ :type __: any
+ :returns:Object used to interrogate the system apt and dpkg databases.
+ :rtype:ubuntu_apt_pkg.Cache
+ """
+ if 'apt_pkg' in sys.modules:
+ # NOTE(fnordahl): When our consumer use the upstream ``apt_pkg`` module
+ # in conjunction with the apt_cache helper function, they may expect us
+ # to call ``apt_pkg.init()`` for them.
+ #
+ # Detect this situation, log a warning and make the call to
+ # ``apt_pkg.init()`` to avoid the consumer Python interpreter from
+ # crashing with a segmentation fault.
+ log('Support for use of upstream ``apt_pkg`` module in conjunction'
+ 'with charm-helpers is deprecated since 2019-06-25', level=WARNING)
+ sys.modules['apt_pkg'].init()
+ return ubuntu_apt_pkg.Cache()
def apt_install(packages, options=None, fatal=False):
- """Install one or more packages."""
+ """Install one or more packages.
+
+ :param packages: Package(s) to install
+ :type packages: Option[str, List[str]]
+ :param options: Options to pass on to apt-get
+ :type options: Option[None, List[str]]
+ :param fatal: Whether the command's output should be checked and
+ retried.
+ :type fatal: bool
+ :raises: subprocess.CalledProcessError
+ """
if options is None:
options = ['--option=Dpkg::Options::=--force-confold']
@@ -208,7 +277,17 @@ def apt_install(packages, options=None, fatal=False):
def apt_upgrade(options=None, fatal=False, dist=False):
- """Upgrade all packages."""
+ """Upgrade all packages.
+
+ :param options: Options to pass on to apt-get
+ :type options: Option[None, List[str]]
+ :param fatal: Whether the command's output should be checked and
+ retried.
+ :type fatal: bool
+ :param dist: Whether ``dist-upgrade`` should be used over ``upgrade``
+ :type dist: bool
+ :raises: subprocess.CalledProcessError
+ """
if options is None:
options = ['--option=Dpkg::Options::=--force-confold']
@@ -229,7 +308,15 @@ def apt_update(fatal=False):
def apt_purge(packages, fatal=False):
- """Purge one or more packages."""
+ """Purge one or more packages.
+
+ :param packages: Package(s) to install
+ :type packages: Option[str, List[str]]
+ :param fatal: Whether the command's output should be checked and
+ retried.
+ :type fatal: bool
+ :raises: subprocess.CalledProcessError
+ """
cmd = ['apt-get', '--assume-yes', 'purge']
if isinstance(packages, six.string_types):
cmd.append(packages)
@@ -239,6 +326,21 @@ def apt_purge(packages, fatal=False):
_run_apt_command(cmd, fatal)
+def apt_autoremove(purge=True, fatal=False):
+ """Purge one or more packages.
+ :param purge: Whether the ``--purge`` option should be passed on or not.
+ :type purge: bool
+ :param fatal: Whether the command's output should be checked and
+ retried.
+ :type fatal: bool
+ :raises: subprocess.CalledProcessError
+ """
+ cmd = ['apt-get', '--assume-yes', 'autoremove']
+ if purge:
+ cmd.append('--purge')
+ _run_apt_command(cmd, fatal)
+
+
def apt_mark(packages, mark, fatal=False):
"""Flag one or more packages using apt-mark."""
log("Marking {} as {}".format(packages, mark))
@@ -265,13 +367,18 @@ def apt_unhold(packages, fatal=False):
def import_key(key):
"""Import an ASCII Armor key.
- /!\ A Radix64 format keyid is also supported for backwards
- compatibility, but should never be used; the key retrieval
- mechanism is insecure and subject to man-in-the-middle attacks
- voiding all signature checks using that key.
-
- :param keyid: The key in ASCII armor format,
- including BEGIN and END markers.
+ A Radix64 format keyid is also supported for backwards
+ compatibility. In this case Ubuntu keyserver will be
+ queried for a key via HTTPS by its keyid. This method
+ is less preferrable because https proxy servers may
+ require traffic decryption which is equivalent to a
+ man-in-the-middle attack (a proxy server impersonates
+ keyserver TLS certificates and has to be explicitly
+ trusted by the system).
+
+ :param key: A GPG key in ASCII armor format,
+ including BEGIN and END markers or a keyid.
+ :type key: (bytes, str)
:raises: GPGKeyError if the key could not be imported
"""
key = key.strip()
@@ -282,35 +389,131 @@ def import_key(key):
log("PGP key found (looks like ASCII Armor format)", level=DEBUG)
if ('-----BEGIN PGP PUBLIC KEY BLOCK-----' in key and
'-----END PGP PUBLIC KEY BLOCK-----' in key):
- log("Importing ASCII Armor PGP key", level=DEBUG)
- with NamedTemporaryFile() as keyfile:
- with open(keyfile.name, 'w') as fd:
- fd.write(key)
- fd.write("\n")
- cmd = ['apt-key', 'add', keyfile.name]
- try:
- subprocess.check_call(cmd)
- except subprocess.CalledProcessError:
- error = "Error importing PGP key '{}'".format(key)
- log(error)
- raise GPGKeyError(error)
+ log("Writing provided PGP key in the binary format", level=DEBUG)
+ if six.PY3:
+ key_bytes = key.encode('utf-8')
+ else:
+ key_bytes = key
+ key_name = _get_keyid_by_gpg_key(key_bytes)
+ key_gpg = _dearmor_gpg_key(key_bytes)
+ _write_apt_gpg_keyfile(key_name=key_name, key_material=key_gpg)
else:
raise GPGKeyError("ASCII armor markers missing from GPG key")
else:
- # We should only send things obviously not a keyid offsite
- # via this unsecured protocol, as it may be a secret or part
- # of one.
log("PGP key found (looks like Radix64 format)", level=WARNING)
- log("INSECURLY importing PGP key from keyserver; "
+ log("SECURELY importing PGP key from keyserver; "
"full key not provided.", level=WARNING)
- cmd = ['apt-key', 'adv', '--keyserver',
- 'hkp://keyserver.ubuntu.com:80', '--recv-keys', key]
- try:
- subprocess.check_call(cmd)
- except subprocess.CalledProcessError:
- error = "Error importing PGP key '{}'".format(key)
- log(error)
- raise GPGKeyError(error)
+ # as of bionic add-apt-repository uses curl with an HTTPS keyserver URL
+ # to retrieve GPG keys. `apt-key adv` command is deprecated as is
+ # apt-key in general as noted in its manpage. See lp:1433761 for more
+ # history. Instead, /etc/apt/trusted.gpg.d is used directly to drop
+ # gpg
+ key_asc = _get_key_by_keyid(key)
+ # write the key in GPG format so that apt-key list shows it
+ key_gpg = _dearmor_gpg_key(key_asc)
+ _write_apt_gpg_keyfile(key_name=key, key_material=key_gpg)
+
+
+def _get_keyid_by_gpg_key(key_material):
+ """Get a GPG key fingerprint by GPG key material.
+ Gets a GPG key fingerprint (40-digit, 160-bit) by the ASCII armor-encoded
+ or binary GPG key material. Can be used, for example, to generate file
+ names for keys passed via charm options.
+
+ :param key_material: ASCII armor-encoded or binary GPG key material
+ :type key_material: bytes
+ :raises: GPGKeyError if invalid key material has been provided
+ :returns: A GPG key fingerprint
+ :rtype: str
+ """
+ # Use the same gpg command for both Xenial and Bionic
+ cmd = 'gpg --with-colons --with-fingerprint'
+ ps = subprocess.Popen(cmd.split(),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ stdin=subprocess.PIPE)
+ out, err = ps.communicate(input=key_material)
+ if six.PY3:
+ out = out.decode('utf-8')
+ err = err.decode('utf-8')
+ if 'gpg: no valid OpenPGP data found.' in err:
+ raise GPGKeyError('Invalid GPG key material provided')
+ # from gnupg2 docs: fpr :: Fingerprint (fingerprint is in field 10)
+ return re.search(r"^fpr:{9}([0-9A-F]{40}):$", out, re.MULTILINE).group(1)
+
+
+def _get_key_by_keyid(keyid):
+ """Get a key via HTTPS from the Ubuntu keyserver.
+ Different key ID formats are supported by SKS keyservers (the longer ones
+ are more secure, see "dead beef attack" and https://evil32.com/). Since
+ HTTPS is used, if SSLBump-like HTTPS proxies are in place, they will
+ impersonate keyserver.ubuntu.com and generate a certificate with
+ keyserver.ubuntu.com in the CN field or in SubjAltName fields of a
+ certificate. If such proxy behavior is expected it is necessary to add the
+ CA certificate chain containing the intermediate CA of the SSLBump proxy to
+ every machine that this code runs on via ca-certs cloud-init directive (via
+ cloudinit-userdata model-config) or via other means (such as through a
+ custom charm option). Also note that DNS resolution for the hostname in a
+ URL is done at a proxy server - not at the client side.
+
+ 8-digit (32 bit) key ID
+ https://keyserver.ubuntu.com/pks/lookup?search=0x4652B4E6
+ 16-digit (64 bit) key ID
+ https://keyserver.ubuntu.com/pks/lookup?search=0x6E85A86E4652B4E6
+ 40-digit key ID:
+ https://keyserver.ubuntu.com/pks/lookup?search=0x35F77D63B5CEC106C577ED856E85A86E4652B4E6
+
+ :param keyid: An 8, 16 or 40 hex digit keyid to find a key for
+ :type keyid: (bytes, str)
+ :returns: A key material for the specified GPG key id
+ :rtype: (str, bytes)
+ :raises: subprocess.CalledProcessError
+ """
+ # options=mr - machine-readable output (disables html wrappers)
+ keyserver_url = ('https://keyserver.ubuntu.com'
+ '/pks/lookup?op=get&options=mr&exact=on&search=0x{}')
+ curl_cmd = ['curl', keyserver_url.format(keyid)]
+ # use proxy server settings in order to retrieve the key
+ return subprocess.check_output(curl_cmd,
+ env=env_proxy_settings(['https']))
+
+
+def _dearmor_gpg_key(key_asc):
+ """Converts a GPG key in the ASCII armor format to the binary format.
+
+ :param key_asc: A GPG key in ASCII armor format.
+ :type key_asc: (str, bytes)
+ :returns: A GPG key in binary format
+ :rtype: (str, bytes)
+ :raises: GPGKeyError
+ """
+ ps = subprocess.Popen(['gpg', '--dearmor'],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ stdin=subprocess.PIPE)
+ out, err = ps.communicate(input=key_asc)
+ # no need to decode output as it is binary (invalid utf-8), only error
+ if six.PY3:
+ err = err.decode('utf-8')
+ if 'gpg: no valid OpenPGP data found.' in err:
+ raise GPGKeyError('Invalid GPG key material. Check your network setup'
+ ' (MTU, routing, DNS) and/or proxy server settings'
+ ' as well as destination keyserver status.')
+ else:
+ return out
+
+
+def _write_apt_gpg_keyfile(key_name, key_material):
+ """Writes GPG key material into a file at a provided path.
+
+ :param key_name: A key name to use for a key file (could be a fingerprint)
+ :type key_name: str
+ :param key_material: A GPG key material (binary)
+ :type key_material: (str, bytes)
+ """
+ with open('/etc/apt/trusted.gpg.d/{}.gpg'.format(key_name),
+ 'wb') as keyf:
+ keyf.write(key_material)
def add_source(source, key=None, fail_invalid=False):
@@ -385,14 +588,16 @@ def add_source(source, key=None, fail_invalid=False):
for r, fn in six.iteritems(_mapping):
m = re.match(r, source)
if m:
- # call the assoicated function with the captured groups
- # raises SourceConfigError on error.
- fn(*m.groups())
if key:
+ # Import key before adding the source which depends on it,
+ # as refreshing packages could fail otherwise.
try:
import_key(key)
except GPGKeyError as e:
raise SourceConfigError(str(e))
+ # call the associated function with the captured groups
+ # raises SourceConfigError on error.
+ fn(*m.groups())
break
else:
# nothing matched. log an error and maybe sys.exit
@@ -405,13 +610,13 @@ def add_source(source, key=None, fail_invalid=False):
def _add_proposed():
"""Add the PROPOSED_POCKET as /etc/apt/source.list.d/proposed.list
- Uses lsb_release()['DISTRIB_CODENAME'] to determine the correct staza for
+ Uses get_distrib_codename to determine the correct stanza for
the deb line.
For intel architecutres PROPOSED_POCKET is used for the release, but for
other architectures PROPOSED_PORTS_POCKET is used for the release.
"""
- release = lsb_release()['DISTRIB_CODENAME']
+ release = get_distrib_codename()
arch = platform.machine()
if arch not in six.iterkeys(ARCH_TO_PROPOSED_POCKET):
raise SourceConfigError("Arch {} not supported for (distro-)proposed"
@@ -424,8 +629,16 @@ def _add_apt_repository(spec):
"""Add the spec using add_apt_repository
:param spec: the parameter to pass to add_apt_repository
+ :type spec: str
"""
- _run_with_retries(['add-apt-repository', '--yes', spec])
+ if '{series}' in spec:
+ series = get_distrib_codename()
+ spec = spec.replace('{series}', series)
+ # software-properties package for bionic properly reacts to proxy settings
+ # passed as environment variables (See lp:1433761). This is not the case
+ # LTS and non-LTS releases below bionic.
+ _run_with_retries(['add-apt-repository', '--yes', spec],
+ cmd_env=env_proxy_settings(['https']))
def _add_cloud_pocket(pocket):
@@ -494,7 +707,7 @@ def _verify_is_ubuntu_rel(release, os_release):
:raises: SourceConfigError if the release is not the same as the ubuntu
release.
"""
- ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
+ ubuntu_rel = get_distrib_codename()
if release != ubuntu_rel:
raise SourceConfigError(
'Invalid Cloud Archive release specified: {}-{} on this Ubuntu'
@@ -505,21 +718,22 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,),
retry_message="", cmd_env=None):
"""Run a command and retry until success or max_retries is reached.
- :param: cmd: str: The apt command to run.
- :param: max_retries: int: The number of retries to attempt on a fatal
- command. Defaults to CMD_RETRY_COUNT.
- :param: retry_exitcodes: tuple: Optional additional exit codes to retry.
- Defaults to retry on exit code 1.
- :param: retry_message: str: Optional log prefix emitted during retries.
- :param: cmd_env: dict: Environment variables to add to the command run.
+ :param cmd: The apt command to run.
+ :type cmd: str
+ :param max_retries: The number of retries to attempt on a fatal
+ command. Defaults to CMD_RETRY_COUNT.
+ :type max_retries: int
+ :param retry_exitcodes: Optional additional exit codes to retry.
+ Defaults to retry on exit code 1.
+ :type retry_exitcodes: tuple
+ :param retry_message: Optional log prefix emitted during retries.
+ :type retry_message: str
+ :param: cmd_env: Environment variables to add to the command run.
+ :type cmd_env: Option[None, Dict[str, str]]
"""
-
- env = None
- kwargs = {}
+ env = get_apt_dpkg_env()
if cmd_env:
- env = os.environ.copy()
env.update(cmd_env)
- kwargs['env'] = env
if not retry_message:
retry_message = "Failed executing '{}'".format(" ".join(cmd))
@@ -531,8 +745,7 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,),
retry_results = (None,) + retry_exitcodes
while result in retry_results:
try:
- # result = subprocess.check_call(cmd, env=env)
- result = subprocess.check_call(cmd, **kwargs)
+ result = subprocess.check_call(cmd, env=env)
except subprocess.CalledProcessError as e:
retry_count = retry_count + 1
if retry_count > max_retries:
@@ -545,22 +758,18 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,),
def _run_apt_command(cmd, fatal=False):
"""Run an apt command with optional retries.
- :param: cmd: str: The apt command to run.
- :param: fatal: bool: Whether the command's output should be checked and
- retried.
+ :param cmd: The apt command to run.
+ :type cmd: str
+ :param fatal: Whether the command's output should be checked and
+ retried.
+ :type fatal: bool
"""
- # Provide DEBIAN_FRONTEND=noninteractive if not present in the environment.
- cmd_env = {
- 'DEBIAN_FRONTEND': os.environ.get('DEBIAN_FRONTEND', 'noninteractive')}
-
if fatal:
_run_with_retries(
- cmd, cmd_env=cmd_env, retry_exitcodes=(1, APT_NO_LOCK,),
+ cmd, retry_exitcodes=(1, APT_NO_LOCK,),
retry_message="Couldn't acquire DPKG lock")
else:
- env = os.environ.copy()
- env.update(cmd_env)
- subprocess.call(cmd, env=env)
+ subprocess.call(cmd, env=get_apt_dpkg_env())
def get_upstream_version(package):
@@ -568,11 +777,10 @@ def get_upstream_version(package):
@returns None (if not installed) or the upstream version
"""
- import apt_pkg
cache = apt_cache()
try:
pkg = cache[package]
- except:
+ except Exception:
# the package is unknown to the current apt cache.
return None
@@ -580,4 +788,18 @@ def get_upstream_version(package):
# package is known, but no version is currently installed.
return None
- return apt_pkg.upstream_version(pkg.current_ver.ver_str)
+ return ubuntu_apt_pkg.upstream_version(pkg.current_ver.ver_str)
+
+
+def get_apt_dpkg_env():
+ """Get environment suitable for execution of APT and DPKG tools.
+
+ We keep this in a helper function instead of in a global constant to
+ avoid execution on import of the library.
+ :returns: Environment suitable for execution of APT and DPKG tools.
+ :rtype: Dict[str, str]
+ """
+ # The fallback is used in the event of ``/etc/environment`` not containing
+ # avalid PATH variable.
+ return {'DEBIAN_FRONTEND': 'noninteractive',
+ 'PATH': get_system_env('PATH', '/usr/sbin:/usr/bin:/sbin:/bin')}
diff --git a/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py b/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py
new file mode 100644
index 0000000..929a75d
--- /dev/null
+++ b/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py
@@ -0,0 +1,267 @@
+# Copyright 2019 Canonical Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Provide a subset of the ``python-apt`` module API.
+
+Data collection is done through subprocess calls to ``apt-cache`` and
+``dpkg-query`` commands.
+
+The main purpose for this module is to avoid dependency on the
+``python-apt`` python module.
+
+The indicated python module is a wrapper around the ``apt`` C++ library
+which is tightly connected to the version of the distribution it was
+shipped on. It is not developed in a backward/forward compatible manner.
+
+This in turn makes it incredibly hard to distribute as a wheel for a piece
+of python software that supports a span of distro releases [0][1].
+
+Upstream feedback like [2] does not give confidence in this ever changing,
+so with this we get rid of the dependency.
+
+0: https://github.com/juju-solutions/layer-basic/pull/135
+1: https://bugs.launchpad.net/charm-octavia/+bug/1824112
+2: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=845330#10
+"""
+
+import locale
+import os
+import subprocess
+import sys
+
+
+class _container(dict):
+ """Simple container for attributes."""
+ __getattr__ = dict.__getitem__
+ __setattr__ = dict.__setitem__
+
+
+class Package(_container):
+ """Simple container for package attributes."""
+
+
+class Version(_container):
+ """Simple container for version attributes."""
+
+
+class Cache(object):
+ """Simulation of ``apt_pkg`` Cache object."""
+ def __init__(self, progress=None):
+ pass
+
+ def __contains__(self, package):
+ try:
+ pkg = self.__getitem__(package)
+ return pkg is not None
+ except KeyError:
+ return False
+
+ def __getitem__(self, package):
+ """Get information about a package from apt and dpkg databases.
+
+ :param package: Name of package
+ :type package: str
+ :returns: Package object
+ :rtype: object
+ :raises: KeyError, subprocess.CalledProcessError
+ """
+ apt_result = self._apt_cache_show([package])[package]
+ apt_result['name'] = apt_result.pop('package')
+ pkg = Package(apt_result)
+ dpkg_result = self._dpkg_list([package]).get(package, {})
+ current_ver = None
+ installed_version = dpkg_result.get('version')
+ if installed_version:
+ current_ver = Version({'ver_str': installed_version})
+ pkg.current_ver = current_ver
+ pkg.architecture = dpkg_result.get('architecture')
+ return pkg
+
+ def _dpkg_list(self, packages):
+ """Get data from system dpkg database for package.
+
+ :param packages: Packages to get data from
+ :type packages: List[str]
+ :returns: Structured data about installed packages, keys like
+ ``dpkg-query --list``
+ :rtype: dict
+ :raises: subprocess.CalledProcessError
+ """
+ pkgs = {}
+ cmd = ['dpkg-query', '--list']
+ cmd.extend(packages)
+ if locale.getlocale() == (None, None):
+ # subprocess calls out to locale.getpreferredencoding(False) to
+ # determine encoding. Workaround for Trusty where the
+ # environment appears to not be set up correctly.
+ locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
+ try:
+ output = subprocess.check_output(cmd,
+ stderr=subprocess.STDOUT,
+ universal_newlines=True)
+ except subprocess.CalledProcessError as cp:
+ # ``dpkg-query`` may return error and at the same time have
+ # produced useful output, for example when asked for multiple
+ # packages where some are not installed
+ if cp.returncode != 1:
+ raise
+ output = cp.output
+ headings = []
+ for line in output.splitlines():
+ if line.startswith('||/'):
+ headings = line.split()
+ headings.pop(0)
+ continue
+ elif (line.startswith('|') or line.startswith('+') or
+ line.startswith('dpkg-query:')):
+ continue
+ else:
+ data = line.split(None, 4)
+ status = data.pop(0)
+ if status != 'ii':
+ continue
+ pkg = {}
+ pkg.update({k.lower(): v for k, v in zip(headings, data)})
+ if 'name' in pkg:
+ pkgs.update({pkg['name']: pkg})
+ return pkgs
+
+ def _apt_cache_show(self, packages):
+ """Get data from system apt cache for package.
+
+ :param packages: Packages to get data from
+ :type packages: List[str]
+ :returns: Structured data about package, keys like
+ ``apt-cache show``
+ :rtype: dict
+ :raises: subprocess.CalledProcessError
+ """
+ pkgs = {}
+ cmd = ['apt-cache', 'show', '--no-all-versions']
+ cmd.extend(packages)
+ if locale.getlocale() == (None, None):
+ # subprocess calls out to locale.getpreferredencoding(False) to
+ # determine encoding. Workaround for Trusty where the
+ # environment appears to not be set up correctly.
+ locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
+ try:
+ output = subprocess.check_output(cmd,
+ stderr=subprocess.STDOUT,
+ universal_newlines=True)
+ previous = None
+ pkg = {}
+ for line in output.splitlines():
+ if not line:
+ if 'package' in pkg:
+ pkgs.update({pkg['package']: pkg})
+ pkg = {}
+ continue
+ if line.startswith(' '):
+ if previous and previous in pkg:
+ pkg[previous] += os.linesep + line.lstrip()
+ continue
+ if ':' in line:
+ kv = line.split(':', 1)
+ key = kv[0].lower()
+ if key == 'n':
+ continue
+ previous = key
+ pkg.update({key: kv[1].lstrip()})
+ except subprocess.CalledProcessError as cp:
+ # ``apt-cache`` returns 100 if none of the packages asked for
+ # exist in the apt cache.
+ if cp.returncode != 100:
+ raise
+ return pkgs
+
+
+class Config(_container):
+ def __init__(self):
+ super(Config, self).__init__(self._populate())
+
+ def _populate(self):
+ cfgs = {}
+ cmd = ['apt-config', 'dump']
+ output = subprocess.check_output(cmd,
+ stderr=subprocess.STDOUT,
+ universal_newlines=True)
+ for line in output.splitlines():
+ if not line.startswith("CommandLine"):
+ k, v = line.split(" ", 1)
+ cfgs[k] = v.strip(";").strip("\"")
+
+ return cfgs
+
+
+# Backwards compatibility with old apt_pkg module
+sys.modules[__name__].config = Config()
+
+
+def init():
+ """Compability shim that does nothing."""
+ pass
+
+
+def upstream_version(version):
+ """Extracts upstream version from a version string.
+
+ Upstream reference: https://salsa.debian.org/apt-team/apt/blob/master/
+ apt-pkg/deb/debversion.cc#L259
+
+ :param version: Version string
+ :type version: str
+ :returns: Upstream version
+ :rtype: str
+ """
+ if version:
+ version = version.split(':')[-1]
+ version = version.split('-')[0]
+ return version
+
+
+def version_compare(a, b):
+ """Compare the given versions.
+
+ Call out to ``dpkg`` to make sure the code doing the comparison is
+ compatible with what the ``apt`` library would do. Mimic the return
+ values.
+
+ Upstream reference:
+ https://apt-team.pages.debian.net/python-apt/library/apt_pkg.html
+ ?highlight=version_compare#apt_pkg.version_compare
+
+ :param a: version string
+ :type a: str
+ :param b: version string
+ :type b: str
+ :returns: >0 if ``a`` is greater than ``b``, 0 if a equals b,
+ <0 if ``a`` is smaller than ``b``
+ :rtype: int
+ :raises: subprocess.CalledProcessError, RuntimeError
+ """
+ for op in ('gt', 1), ('eq', 0), ('lt', -1):
+ try:
+ subprocess.check_call(['dpkg', '--compare-versions',
+ a, op[0], b],
+ stderr=subprocess.STDOUT,
+ universal_newlines=True)
+ return op[1]
+ except subprocess.CalledProcessError as cp:
+ if cp.returncode == 1:
+ continue
+ raise
+ else:
+ raise RuntimeError('Unable to compare "{}" and "{}", according to '
+ 'our logic they are neither greater, equal nor '
+ 'less than each other.')
diff --git a/hooks/charmhelpers/osplatform.py b/hooks/charmhelpers/osplatform.py
index d9a4d5c..78c81af 100644
--- a/hooks/charmhelpers/osplatform.py
+++ b/hooks/charmhelpers/osplatform.py
@@ -1,4 +1,5 @@
import platform
+import os
def get_platform():
@@ -9,9 +10,13 @@ def get_platform():
This string is used to decide which platform module should be imported.
"""
# linux_distribution is deprecated and will be removed in Python 3.7
- # Warings *not* disabled, as we certainly need to fix this.
- tuple_platform = platform.linux_distribution()
- current_platform = tuple_platform[0]
+ # Warnings *not* disabled, as we certainly need to fix this.
+ if hasattr(platform, 'linux_distribution'):
+ tuple_platform = platform.linux_distribution()
+ current_platform = tuple_platform[0]
+ else:
+ current_platform = _get_platform_from_fs()
+
if "Ubuntu" in current_platform:
return "ubuntu"
elif "CentOS" in current_platform:
@@ -20,6 +25,22 @@ def get_platform():
# Stock Python does not detect Ubuntu and instead returns debian.
# Or at least it does in some build environments like Travis CI
return "ubuntu"
+ elif "elementary" in current_platform:
+ # ElementaryOS fails to run tests locally without this.
+ return "ubuntu"
else:
raise RuntimeError("This module is not supported on {}."
.format(current_platform))
+
+
+def _get_platform_from_fs():
+ """Get Platform from /etc/os-release."""
+ with open(os.path.join(os.sep, 'etc', 'os-release')) as fin:
+ content = dict(
+ line.split('=', 1)
+ for line in fin.read().splitlines()
+ if '=' in line
+ )
+ for k, v in content.items():
+ content[k] = v.strip('"')
+ return content["NAME"]
diff --git a/hooks/common.py b/hooks/common.py
index 66d41ec..c2280a3 100644
--- a/hooks/common.py
+++ b/hooks/common.py
@@ -43,6 +43,12 @@ def check_ip(n):
return False
+def ingress_address(relation_data):
+ if 'ingress-address' in relation_data:
+ return relation_data['ingress-address']
+ return relation_data['private-address']
+
+
def get_local_ingress_address(binding='website'):
# using network-get to retrieve the address details if available.
log('Getting hostname for binding %s' % binding)
@@ -342,21 +348,6 @@ def apply_host_policy(target_id, owner_unit, owner_relation):
ssh_service.save()
-def get_valid_relations():
- for x in subprocess.Popen(['relation-ids', 'monitors'],
- stdout=subprocess.PIPE).stdout:
- yield x.strip()
- for x in subprocess.Popen(['relation-ids', 'nagios'],
- stdout=subprocess.PIPE).stdout:
- yield x.strip()
-
-
-def get_valid_units(relation_id):
- for x in subprocess.Popen(['relation-list', '-r', relation_id],
- stdout=subprocess.PIPE).stdout:
- yield x.strip()
-
-
def _replace_in_config(find_me, replacement):
with open(INPROGRESS_CFG) as cf:
with tempfile.NamedTemporaryFile(dir=INPROGRESS_DIR, delete=False) as new_cf:
diff --git a/hooks/install b/hooks/install
index f002e46..a8900a3 100755
--- a/hooks/install
+++ b/hooks/install
@@ -29,7 +29,7 @@ echo nagios3-cgi nagios3/adminpassword password $PASSWORD | debconf-set-selectio
echo nagios3-cgi nagios3/adminpassword-repeat password $PASSWORD | debconf-set-selections
DEBIAN_FRONTEND=noninteractive apt-get -qy \
- install nagios3 nagios-plugins python-cheetah python-jinja2 dnsutils debconf-utils nagios-nrpe-plugin pynag python-apt python-yaml
+ install nagios3 nagios-plugins python-cheetah python-jinja2 dnsutils debconf-utils nagios-nrpe-plugin pynag python-apt python-yaml python-enum34
scripts/postfix_loopback_only.sh
diff --git a/hooks/monitors-relation-changed b/hooks/monitors-relation-changed
index 13cb96c..e16589d 100755
--- a/hooks/monitors-relation-changed
+++ b/hooks/monitors-relation-changed
@@ -18,17 +18,77 @@
import sys
import os
-import subprocess
import yaml
-import json
import re
-
-
-from common import (customize_service, get_pynag_host,
- get_pynag_service, refresh_hostgroups,
- get_valid_relations, get_valid_units,
- initialize_inprogress_config, flush_inprogress_config,
- get_local_ingress_address)
+from collections import defaultdict
+
+from charmhelpers.core.hookenv import (
+ relation_get,
+ ingress_address,
+ related_units,
+ relation_ids,
+ log,
+ DEBUG
+)
+
+from common import (
+ customize_service,
+ get_pynag_host,
+ get_pynag_service,
+ refresh_hostgroups,
+ initialize_inprogress_config,
+ flush_inprogress_config
+)
+
+
+REQUIRED_REL_DATA_KEYS = [
+ 'target-address',
+ 'monitors',
+ 'target-id',
+]
+
+
+def _prepare_relation_data(unit, rid):
+ relation_data = relation_get(unit=unit, rid=rid)
+
+ if not relation_data:
+ msg = (
+ 'no relation data found for unit {} in relation {} - '
+ 'skipping'.format(unit, rid)
+ )
+ log(msg, level=DEBUG)
+ return {}
+
+ if rid.split(':')[0] == 'nagios':
+ # Fake it for the more generic 'nagios' relation'
+ relation_data['target-id'] = unit.replace('/', '-')
+ relation_data['monitors'] = {'monitors': {'remote': {}}}
+
+ if not relation_data.get('target-address'):
+ relation_data['target-address'] = ingress_address(unit=unit, rid=rid)
+
+ for key in REQUIRED_REL_DATA_KEYS:
+ if not relation_data.get(key):
+ msg = (
+ '{} not found for unit {} in relation {} - '
+ 'skipping'.format(key, unit, rid)
+ )
+ log(msg, level=DEBUG)
+ return {}
+
+ return relation_data
+
+
+def _collect_relation_data():
+ all_relations = defaultdict(dict)
+ for relname in ['nagios', 'monitors']:
+ for relid in relation_ids(relname):
+ for unit in related_units(relid):
+ relation_data = _prepare_relation_data(unit=unit, rid=relid)
+ if relation_data:
+ all_relations[relid][unit] = relation_data
+
+ return all_relations
def main(argv):
@@ -43,35 +103,7 @@ def main(argv):
relation_settings['target-address'] = argv[3]
all_relations = {'monitors:99': {'testing/0': relation_settings}}
else:
- all_relations = {}
- for relid in get_valid_relations():
- (relname, relnum) = relid.split(':')
- for unit in get_valid_units(relid):
- relation_settings = json.loads(
- subprocess.check_output(['relation-get', '--format=json',
- '-r', relid,
- '-',unit]).strip())
-
- if relation_settings is None or relation_settings == '':
- continue
-
- if relname == 'monitors':
- if ('monitors' not in relation_settings
- or 'target-id' not in relation_settings):
- continue
- if ('target-id' in relation_settings and 'target-address' not in relation_settings):
- relation_settings['target-address'] = get_local_ingress_address('monitors')
-
- else:
- # Fake it for the more generic 'nagios' relation'
- relation_settings['target-id'] = unit.replace('/','-')
- relation_settings['target-address'] = get_local_ingress_address('monitors')
- relation_settings['monitors'] = {'monitors': {'remote': {} } }
-
- if relid not in all_relations:
- all_relations[relid] = {}
-
- all_relations[relid][unit] = relation_settings
+ all_relations = _collect_relation_data()
# Hack to work around http://pad.lv/1025478
targets_with_addresses = set()
Follow ups