cf-charmers team mailing list archive
-
cf-charmers team
-
Mailing list archive
-
Message #00366
[Merge] lp:~johnsca/charms/trusty/cf-dea/port-conflicts into lp:~cf-charmers/charms/trusty/cf-dea/trunk
Cory Johns has proposed merging lp:~johnsca/charms/trusty/cf-dea/port-conflicts into lp:~cf-charmers/charms/trusty/cf-dea/trunk.
Requested reviews:
Cloud Foundry Charmers (cf-charmers)
For more details, see:
https://code.launchpad.net/~johnsca/charms/trusty/cf-dea/port-conflicts/+merge/222670
Resolve port conflicts in cf-dea
https://codereview.appspot.com/107890043/
--
https://code.launchpad.net/~johnsca/charms/trusty/cf-dea/port-conflicts/+merge/222670
Your team Cloud Foundry Charmers is requested to review the proposed merge of lp:~johnsca/charms/trusty/cf-dea/port-conflicts into lp:~cf-charmers/charms/trusty/cf-dea/trunk.
=== modified file 'config.yaml'
--- config.yaml 2014-05-12 07:18:47 +0000
+++ config.yaml 2014-06-10 15:53:18 +0000
@@ -1,5 +1,5 @@
options:
- warden_image_url:
+ warden_image_url:
type: string
# Fetched from: https://github.com/cloudfoundry/cf-release/blob/v153/config/blobs.yml
default: "http://blob.cfblob.com/rest/objects/4e4e78bca61e122204e4e98643d9ae051782c701f072"
@@ -23,4 +23,7 @@
description: |
Key ID to import to the apt keyring to support use with arbitary source
configuration from outside of Launchpad archives or PPA's.
-
+ varz_port:
+ type: int
+ default: 8888
+ description: 'Port for varz endpoint to listen on'
=== modified file 'hooks/charmhelpers/contrib/cloudfoundry/contexts.py'
--- hooks/charmhelpers/contrib/cloudfoundry/contexts.py 2014-05-27 19:50:27 +0000
+++ hooks/charmhelpers/contrib/cloudfoundry/contexts.py 2014-06-10 15:53:18 +0000
@@ -33,7 +33,7 @@
class NatsRelation(RelationContext):
interface = 'nats'
- required_keys = ['nats_port', 'nats_address', 'nats_user', 'nats_password']
+ required_keys = ['address', 'port', 'user', 'password']
class MysqlRelation(RelationContext):
@@ -44,9 +44,10 @@
def get_data(self):
RelationContext.get_data(self)
if self.is_ready():
- if 'port' not in self['db']:
- self['db']['port'] = '3306'
- self['db']['dsn'] = self.dsn_template.format(**self['db'])
+ for unit in self['db']:
+ if 'port' not in unit:
+ unit['port'] = '3306'
+ unit['dsn'] = self.dsn_template.format(**unit)
class RouterRelation(RelationContext):
@@ -56,9 +57,19 @@
class LogRouterRelation(RelationContext):
interface = 'logrouter'
- required_keys = ['shared-secret', 'logrouter-address']
+ required_keys = ['shared_secret', 'address', 'incoming_port', 'outgoing_port']
class LoggregatorRelation(RelationContext):
interface = 'loggregator'
- required_keys = ['shared_secret', 'loggregator_address']
+ required_keys = ['address', 'incoming_port', 'outgoing_port']
+
+
+class EtcdRelation(RelationContext):
+ interface = 'etcd'
+ required_keys = ['hostname', 'port']
+
+
+class CloudControllerRelation(RelationContext):
+ interface = 'cc'
+ required_keys = ['hostname', 'port', 'user', 'password']
=== modified file 'hooks/charmhelpers/core/services.py'
--- hooks/charmhelpers/core/services.py 2014-05-27 19:50:27 +0000
+++ hooks/charmhelpers/core/services.py 2014-06-10 15:53:18 +0000
@@ -11,6 +11,14 @@
"""
Register a list of services, given their definitions.
+ Traditional charm authoring is focused on implementing hooks. That is,
+ the charm author is thinking in terms of "What hook am I handling; what
+ does this hook need to do?" However, in most cases, the real question
+ should be "Do I have the information I need to configure and start this
+ piece of software and, if so, what are the steps for doing so." The
+ ServiceManager framework tries to bring the focus to the data and the
+ setup tasks, in the most declarative way possible.
+
Service definitions are dicts in the following formats (all keys except
'service' are optional):
@@ -21,6 +29,7 @@
"data_lost": <one or more callbacks>,
"start": <one or more callbacks>,
"stop": <one or more callbacks>,
+ "ports": <list of ports to manage>,
}
The 'required_data' list should contain dicts of required data (or
@@ -44,14 +53,20 @@
The 'start' value should be either a single callback, or a list of
callbacks, to be called when starting the service, after the 'data_ready'
callbacks are complete. Each callback will be called with the service
- name as the only parameter. This defaults to `host.service_start`.
+ name as the only parameter. This defaults to
+ `[host.service_start, services.open_ports]`.
The 'stop' value should be either a single callback, or a list of
callbacks, to be called when stopping the service. If the service is
being stopped because it no longer has all of its 'required_data', this
will be called after all of the 'data_lost' callbacks are complete.
Each callback will be called with the service name as the only parameter.
- This defaults to `host.service_stop`.
+ This defaults to `[services.close_ports, host.service_stop]`.
+
+ The 'ports' value should be a list of ports to manage. The default
+ 'start' handler will open the ports after the service is started,
+ and the default 'stop' handler will close the ports prior to stopping
+ the service.
Examples:
@@ -60,27 +75,28 @@
a mongodb relation and which runs a custom `db_migrate` function prior to
restarting the service, and a Runit serivce called spadesd.
- >>> manager = services.ServiceManager([
- ... {
- ... 'service': 'bingod',
- ... 'required_data': [MongoRelation(), config()],
- ... 'data_ready': [
- ... services.template(source='bingod.conf'),
- ... services.template(source='bingod.ini',
- ... target='/etc/bingod.ini',
- ... owner='bingo', perms=0400),
- ... ],
- ... },
- ... {
- ... 'service': 'spadesd',
- ... 'data_ready': services.template(source='spadesd_run.j2',
- ... target='/etc/sv/spadesd/run',
- ... perms=0555),
- ... 'start': runit_start,
- ... 'stop': runit_stop,
- ... },
- ... ])
- ... manager.manage()
+ manager = services.ServiceManager([
+ {
+ 'service': 'bingod',
+ 'ports': [80, 443],
+ 'required_data': [MongoRelation(), config(), {'my': 'data'}],
+ 'data_ready': [
+ services.template(source='bingod.conf'),
+ services.template(source='bingod.ini',
+ target='/etc/bingod.ini',
+ owner='bingo', perms=0400),
+ ],
+ },
+ {
+ 'service': 'spadesd',
+ 'data_ready': services.template(source='spadesd_run.j2',
+ target='/etc/sv/spadesd/run',
+ perms=0555),
+ 'start': runit_start,
+ 'stop': runit_stop,
+ },
+ ])
+ manager.manage()
"""
self.services = {}
for service in services or []:
@@ -107,12 +123,16 @@
for service_name in service_names or self.services.keys():
if self.is_ready(service_name):
self.fire_event('data_ready', service_name)
- self.fire_event('start', service_name, default=host.service_restart)
+ self.fire_event('start', service_name, default=[
+ host.service_restart,
+ open_ports])
self.save_ready(service_name)
else:
if self.was_ready(service_name):
self.fire_event('data_lost', service_name)
- self.fire_event('stop', service_name, default=host.service_stop)
+ self.fire_event('stop', service_name, default=[
+ close_ports,
+ host.service_stop])
self.save_lost(service_name)
def stop_services(self, *service_names):
@@ -122,7 +142,9 @@
If no service names are given, stops all registered services.
"""
for service_name in service_names or self.services.keys():
- self.fire_event('stop', service_name, default=host.service_stop)
+ self.fire_event('stop', service_name, default=[
+ close_ports,
+ host.service_stop])
def get_service(self, service_name):
"""
@@ -145,7 +167,7 @@
callbacks = [callbacks]
for callback in callbacks:
if isinstance(callback, ManagerCallback):
- callback(self, service_name)
+ callback(self, service_name, event_name)
else:
callback(service_name)
@@ -199,55 +221,73 @@
interface = None
required_keys = []
+ def __init__(self, *args, **kwargs):
+ super(RelationContext, self).__init__(*args, **kwargs)
+ self.get_data()
+
def __bool__(self):
"""
- Updates the data and returns True if all of the required_keys are available.
+ Returns True if all of the required_keys are available.
"""
- self.get_data()
return self.is_ready()
__nonzero__ = __bool__
+ def __repr__(self):
+ return super(RelationContext, self).__repr__()
+
def is_ready(self):
"""
- Returns True if all of the required_keys are available.
- """
- return set(self.get(self.interface, {}).keys()).issuperset(set(self.required_keys))
+ Returns True if all of the `required_keys` are available from any units.
+ """
+ ready = len(self.get(self.interface, [])) > 0
+ if not ready:
+ hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
+ return ready
+
+ def _is_ready(self, unit_data):
+ """
+ Helper method that tests a set of relation data and returns True if
+ all of the `required_keys` are present.
+ """
+ return set(unit_data.keys()).issuperset(set(self.required_keys))
def get_data(self):
"""
- Retrieve the relation data and store it under `self[self.interface]`.
-
- If there are more than one units related on the desired interface,
- then each unit will have its data stored under `self[self.interface][unit_id]`
- and one of the units with complete information will chosen at random
- to fill the values at `self[self.interface]`.
-
-
- For example:
-
- {
- 'foo': 'bar',
- 'unit/0': {
- 'foo': 'bar',
- },
- 'unit/1': {
- 'foo': 'baz',
- },
- }
+ Retrieve the relation data for each unit involved in a realtion and,
+ if complete, store it in a list under `self[self.interface]`. This
+ is automatically called when the RelationContext is instantiated.
+
+ The units are sorted lexographically first by the service ID, then by
+ the unit ID. Thus, if an interface has two other services, 'db:1'
+ and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
+ and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
+ set of data, the relation data for the units will be stored in the
+ order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
+
+ If you only care about a single unit on the relation, you can just
+ access it as `{{ interface[0]['key'] }}`. However, if you can at all
+ support multiple units on a relation, you should iterate over the list,
+ like:
+
+ {% for unit in interface -%}
+ {{ unit['key'] }}{% if not loop.last %},{% endif %}
+ {%- endfor %}
+
+ Note that since all sets of relation data from all related services and
+ units are in a single list, if you need to know which service or unit a
+ set of data came from, you'll need to extend this class to preserve
+ that information.
"""
if not hookenv.relation_ids(self.interface):
return
- ns = self.setdefault(self.interface, {})
- required = set(self.required_keys)
- for rid in hookenv.relation_ids(self.interface):
- for unit in hookenv.related_units(rid):
+ ns = self.setdefault(self.interface, [])
+ for rid in sorted(hookenv.relation_ids(self.interface)):
+ for unit in sorted(hookenv.related_units(rid)):
reldata = hookenv.relation_get(rid=rid, unit=unit)
- unit_ns = ns.setdefault(unit, {})
- unit_ns.update(reldata)
- if set(reldata.keys()).issuperset(required):
- ns.update(reldata)
+ if self._is_ready(reldata):
+ ns.append(reldata)
class ManagerCallback(object):
@@ -259,38 +299,49 @@
* `manager` The `ServiceManager` instance
* `service_name` The name of the service it's being triggered for
+ * `event_name` The name of the event that this callback is handling
"""
- def __call__(self, manager, service_name):
+ def __call__(self, manager, service_name, event_name):
raise NotImplementedError()
class TemplateCallback(ManagerCallback):
"""
- Create a callback that will render a template, for use as a ready action.
+ Callback class that will render a template, for use as a ready action.
The `target` param, if omitted, will default to `/etc/init/<service name>`.
"""
- def __init__(self, source, target=None, owner='root', group='root', perms=0444):
+ def __init__(self, source, target, owner='root', group='root', perms=0444):
self.source = source
self.target = target
self.owner = owner
self.group = group
self.perms = perms
- def __call__(self, manager, service_name):
+ def __call__(self, manager, service_name, event_name):
service = manager.get_service(service_name)
context = {}
for ctx in service.get('required_data', []):
context.update(ctx)
- target = self.target
- if target is None:
- target = '/etc/init/{}.conf'.format(service_name)
- templating.render(self.source, target, context,
+ templating.render(self.source, self.target, context,
self.owner, self.group, self.perms)
-def template(source, target=None, owner='root', group='root', perms=0444):
- """
- Helper factory for TemplateCallback.
- """
- return TemplateCallback(source, target, owner, group, perms)
+class PortManagerCallback(ManagerCallback):
+ """
+ Callback class that will open or close ports, for use as either
+ a start or stop action.
+ """
+ def __call__(self, manager, service_name, event_name):
+ service = manager.get_service(service_name)
+ for port in service.get('ports', []):
+ if event_name == 'start':
+ hookenv.open_port(port)
+ elif event_name == 'stop':
+ hookenv.close_port(port)
+
+
+# Convenience aliases
+render_template = template = TemplateCallback
+open_ports = PortManagerCallback()
+close_ports = PortManagerCallback()
=== modified file 'hooks/config.py'
--- hooks/config.py 2014-06-03 03:50:30 +0000
+++ hooks/config.py 2014-06-10 15:53:18 +0000
@@ -1,4 +1,5 @@
import os
+from charmhelpers.core import hookenv
from charmhelpers.core import services
from charmhelpers.contrib.cloudfoundry import contexts
@@ -61,7 +62,8 @@
{
'service': 'cf-dea-logging-agent',
'required_data': [contexts.NatsRelation(),
- contexts.LoggregatorRelation()],
+ contexts.LogRouterRelation(),
+ {'config': hookenv.config()}],
'data_ready': [
services.template(source='cf-dea-logging-agent.conf',
target='/etc/init/cf-dea-logging-agent.conf'),
=== modified file 'metadata.yaml'
--- metadata.yaml 2014-05-12 11:00:05 +0000
+++ metadata.yaml 2014-06-10 15:53:18 +0000
@@ -10,5 +10,5 @@
interface: nats
router:
interface: router
- loggregator:
- interface: loggregator
+ logrouter:
+ interface: logrouter
=== modified file 'templates/dea.yml'
--- templates/dea.yml 2014-04-13 00:22:46 +0000
+++ templates/dea.yml 2014-06-10 15:53:18 +0000
@@ -11,8 +11,7 @@
disk_overcommit_factor: 2
nats_servers:
- - nats://{{ nats["nats_user"] }}:{{ nats["nats_password"] }}@{{ nats["nats_address"] }}:{{ nats["nats_port"] }}/
-# nats_uri: nats://admin:admin@<nats-unit-public-address>:4022/
+ - nats://{{ nats[0]["user"] }}:{{ nats[0]["password"] }}@{{ nats[0]["address"] }}:{{ nats[0]["port"] }}/
pid_filename: /var/lib/cloudfoundry/cfdea/pids/dea_ng.pid
@@ -23,7 +22,7 @@
default_health_check_timeout: 60 # 1 minute
index: 0
-domain: {{ router["domain"] }}
+domain: {{ router[0]["domain"] }}
staging:
enabled: true
=== modified file 'templates/dea_logging_agent.json'
--- templates/dea_logging_agent.json 2014-05-07 11:50:56 +0000
+++ templates/dea_logging_agent.json 2014-06-10 15:53:18 +0000
@@ -1,13 +1,13 @@
{
"Index": 0,
- "LoggregatorAddress": "{{ loggregator['loggregator_address'] }}:3456",
- "SharedSecret": "{{ loggregator['shared_secret'] }}",
- "NatsHost": "{{ nats['nats_address'] }}",
- "NatsPort": {{ nats['nats_port'] }},
- "NatsUser": "{{ nats['nats_user'] }}",
- "NatsPass": "{{ nats['nats_password'] }}",
+ "LoggregatorAddress": "{{ logrouter[0]['address'] }}:{{ logrouter[0]['port'] }}",
+ "SharedSecret": "{{ logrouter[0]['shared_secret'] }}",
+ "NatsHost": "{{ nats[0]['address'] }}",
+ "NatsPort": {{ nats[0]['port'] }},
+ "NatsUser": "{{ nats[0]['user'] }}",
+ "NatsPass": "{{ nats[0]['password'] }}",
"VarzUser": "varz",
"VarzPass": "password",
- "VarzPort": 8888,
+ "VarzPort": {{ config['varz_port'] }},
"Syslog" : ""
}
References