cf-charmers team mailing list archive
-
cf-charmers team
-
Mailing list archive
-
Message #00381
[Merge] lp:~johnsca/charms/trusty/cf-loggregator/port-conflicts into lp:~cf-charmers/charms/trusty/cf-loggregator/trunk
Cory Johns has proposed merging lp:~johnsca/charms/trusty/cf-loggregator/port-conflicts into lp:~cf-charmers/charms/trusty/cf-loggregator/trunk.
Requested reviews:
Cloud Foundry Charmers (cf-charmers)
For more details, see:
https://code.launchpad.net/~johnsca/charms/trusty/cf-loggregator/port-conflicts/+merge/222675
Resolve port conflicts for cf-loggregator
https://codereview.appspot.com/103250045/
--
https://code.launchpad.net/~johnsca/charms/trusty/cf-loggregator/port-conflicts/+merge/222675
Your team Cloud Foundry Charmers is requested to review the proposed merge of lp:~johnsca/charms/trusty/cf-loggregator/port-conflicts into lp:~cf-charmers/charms/trusty/cf-loggregator/trunk.
=== modified file 'config.yaml'
--- config.yaml 2014-06-03 04:16:58 +0000
+++ config.yaml 2014-06-10 15:59:18 +0000
@@ -1,16 +1,10 @@
options:
- max-retained-logs:
+ max_retained_logs:
type: int
default: 20
description: |
Max lines of log output to buffer. If you wish to keep logs in a scalable
way sync them to a real logging backend.
- client_secret:
- type: string
- description: |
- The shared-secret for log clients to use when publishing log
- messages.
- default: 17b6cc14-eea8-433e-b8b7-a65ff8941e3b
source:
type: string
default: 'ppa:cf-charm/ppa'
@@ -30,3 +24,15 @@
description: |
Key ID to import to the apt keyring to support use with arbitary source
configuration from outside of Launchpad archives or PPA's.
+ incoming_port:
+ type: int
+ default: 3456
+ description: 'Port to accept log message emitters on'
+ outgoing_port:
+ type: int
+ default: 8080
+ description: 'Port to accept log message consumers on'
+ varz_port:
+ type: int
+ default: 8888
+ description: 'Port for varz endpoint to listen on'
=== modified file 'hooks/charmhelpers/contrib/cloudfoundry/contexts.py'
--- hooks/charmhelpers/contrib/cloudfoundry/contexts.py 2014-05-27 22:03:37 +0000
+++ hooks/charmhelpers/contrib/cloudfoundry/contexts.py 2014-06-10 15:59:18 +0000
@@ -33,7 +33,7 @@
class NatsRelation(RelationContext):
interface = 'nats'
- required_keys = ['nats_port', 'nats_address', 'nats_user', 'nats_password']
+ required_keys = ['address', 'port', 'user', 'password']
class MysqlRelation(RelationContext):
@@ -44,9 +44,10 @@
def get_data(self):
RelationContext.get_data(self)
if self.is_ready():
- if 'port' not in self['db']:
- self['db']['port'] = '3306'
- self['db']['dsn'] = self.dsn_template.format(**self['db'])
+ for unit in self['db']:
+ if 'port' not in unit:
+ unit['port'] = '3306'
+ unit['dsn'] = self.dsn_template.format(**unit)
class RouterRelation(RelationContext):
@@ -56,9 +57,19 @@
class LogRouterRelation(RelationContext):
interface = 'logrouter'
- required_keys = ['shared-secret', 'logrouter-address']
+ required_keys = ['shared_secret', 'address', 'incoming_port', 'outgoing_port']
class LoggregatorRelation(RelationContext):
interface = 'loggregator'
- required_keys = ['shared_secret', 'loggregator_address']
+ required_keys = ['address', 'incoming_port', 'outgoing_port']
+
+
+class EtcdRelation(RelationContext):
+ interface = 'etcd'
+ required_keys = ['hostname', 'port']
+
+
+class CloudControllerRelation(RelationContext):
+ interface = 'cc'
+ required_keys = ['hostname', 'port', 'user', 'password']
=== modified file 'hooks/charmhelpers/core/services.py'
--- hooks/charmhelpers/core/services.py 2014-05-29 17:28:42 +0000
+++ hooks/charmhelpers/core/services.py 2014-06-10 15:59:18 +0000
@@ -11,6 +11,14 @@
"""
Register a list of services, given their definitions.
+ Traditional charm authoring is focused on implementing hooks. That is,
+ the charm author is thinking in terms of "What hook am I handling; what
+ does this hook need to do?" However, in most cases, the real question
+ should be "Do I have the information I need to configure and start this
+ piece of software and, if so, what are the steps for doing so." The
+ ServiceManager framework tries to bring the focus to the data and the
+ setup tasks, in the most declarative way possible.
+
Service definitions are dicts in the following formats (all keys except
'service' are optional):
@@ -67,28 +75,28 @@
a mongodb relation and which runs a custom `db_migrate` function prior to
restarting the service, and a Runit serivce called spadesd.
- >>> manager = services.ServiceManager([
- ... {
- ... 'service': 'bingod',
- ... 'ports': [80, 443],
- ... 'required_data': [MongoRelation(), config()],
- ... 'data_ready': [
- ... services.template(source='bingod.conf'),
- ... services.template(source='bingod.ini',
- ... target='/etc/bingod.ini',
- ... owner='bingo', perms=0400),
- ... ],
- ... },
- ... {
- ... 'service': 'spadesd',
- ... 'data_ready': services.template(source='spadesd_run.j2',
- ... target='/etc/sv/spadesd/run',
- ... perms=0555),
- ... 'start': runit_start,
- ... 'stop': runit_stop,
- ... },
- ... ])
- ... manager.manage()
+ manager = services.ServiceManager([
+ {
+ 'service': 'bingod',
+ 'ports': [80, 443],
+ 'required_data': [MongoRelation(), config(), {'my': 'data'}],
+ 'data_ready': [
+ services.template(source='bingod.conf'),
+ services.template(source='bingod.ini',
+ target='/etc/bingod.ini',
+ owner='bingo', perms=0400),
+ ],
+ },
+ {
+ 'service': 'spadesd',
+ 'data_ready': services.template(source='spadesd_run.j2',
+ target='/etc/sv/spadesd/run',
+ perms=0555),
+ 'start': runit_start,
+ 'stop': runit_stop,
+ },
+ ])
+ manager.manage()
"""
self.services = {}
for service in services or []:
@@ -213,55 +221,73 @@
interface = None
required_keys = []
+ def __init__(self, *args, **kwargs):
+ super(RelationContext, self).__init__(*args, **kwargs)
+ self.get_data()
+
def __bool__(self):
"""
- Updates the data and returns True if all of the required_keys are available.
+ Returns True if all of the required_keys are available.
"""
- self.get_data()
return self.is_ready()
__nonzero__ = __bool__
+ def __repr__(self):
+ return super(RelationContext, self).__repr__()
+
def is_ready(self):
"""
- Returns True if all of the required_keys are available.
- """
- return set(self.get(self.interface, {}).keys()).issuperset(set(self.required_keys))
+ Returns True if all of the `required_keys` are available from any units.
+ """
+ ready = len(self.get(self.interface, [])) > 0
+ if not ready:
+ hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
+ return ready
+
+ def _is_ready(self, unit_data):
+ """
+ Helper method that tests a set of relation data and returns True if
+ all of the `required_keys` are present.
+ """
+ return set(unit_data.keys()).issuperset(set(self.required_keys))
def get_data(self):
"""
- Retrieve the relation data and store it under `self[self.interface]`.
-
- If there are more than one units related on the desired interface,
- then each unit will have its data stored under `self[self.interface][unit_id]`
- and one of the units with complete information will chosen at random
- to fill the values at `self[self.interface]`.
-
-
- For example:
-
- {
- 'foo': 'bar',
- 'unit/0': {
- 'foo': 'bar',
- },
- 'unit/1': {
- 'foo': 'baz',
- },
- }
+ Retrieve the relation data for each unit involved in a realtion and,
+ if complete, store it in a list under `self[self.interface]`. This
+ is automatically called when the RelationContext is instantiated.
+
+ The units are sorted lexographically first by the service ID, then by
+ the unit ID. Thus, if an interface has two other services, 'db:1'
+ and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
+ and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
+ set of data, the relation data for the units will be stored in the
+ order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
+
+ If you only care about a single unit on the relation, you can just
+ access it as `{{ interface[0]['key'] }}`. However, if you can at all
+ support multiple units on a relation, you should iterate over the list,
+ like:
+
+ {% for unit in interface -%}
+ {{ unit['key'] }}{% if not loop.last %},{% endif %}
+ {%- endfor %}
+
+ Note that since all sets of relation data from all related services and
+ units are in a single list, if you need to know which service or unit a
+ set of data came from, you'll need to extend this class to preserve
+ that information.
"""
if not hookenv.relation_ids(self.interface):
return
- ns = self.setdefault(self.interface, {})
- required = set(self.required_keys)
- for rid in hookenv.relation_ids(self.interface):
- for unit in hookenv.related_units(rid):
+ ns = self.setdefault(self.interface, [])
+ for rid in sorted(hookenv.relation_ids(self.interface)):
+ for unit in sorted(hookenv.related_units(rid)):
reldata = hookenv.relation_get(rid=rid, unit=unit)
- unit_ns = ns.setdefault(unit, {})
- unit_ns.update(reldata)
- if set(reldata.keys()).issuperset(required):
- ns.update(reldata)
+ if self._is_ready(reldata):
+ ns.append(reldata)
class ManagerCallback(object):
@@ -316,6 +342,6 @@
# Convenience aliases
-template = TemplateCallback
+render_template = template = TemplateCallback
open_ports = PortManagerCallback()
close_ports = PortManagerCallback()
=== modified file 'hooks/config.py'
--- hooks/config.py 2014-05-29 17:28:42 +0000
+++ hooks/config.py 2014-06-10 15:59:18 +0000
@@ -19,9 +19,12 @@
{
'service': LOGGREGATOR_JOB_NAME,
'required_data': [
- {'service_name': os.environ['JUJU_UNIT_NAME'].split('/')[0]},
- hookenv.config(),
contexts.NatsRelation(),
+ contexts.LogRouterRelation(),
+ {
+ 'service_name': os.environ['JUJU_UNIT_NAME'].split('/')[0],
+ 'config': hookenv.config(),
+ },
],
'data_ready': [
services.template(source='loggregator.conf',
=== modified file 'hooks/loggregator-relation-changed'
--- hooks/loggregator-relation-changed 2014-05-27 22:03:37 +0000
+++ hooks/loggregator-relation-changed 2014-06-10 15:59:18 +0000
@@ -2,7 +2,8 @@
from charmhelpers.core import hookenv
config = hookenv.config()
-loggregator_address = hookenv.unit_get('private-address').encode('utf-8')
hookenv.relation_set(None, {
- 'shared_secret': config['client_secret'],
- 'loggregator_address': loggregator_address})
+ 'address': hookenv.unit_get('private-address').encode('utf-8'),
+ 'incoming_port': config['incoming_port'],
+ 'outgoing_port': config['outgoing_port'],
+})
=== modified file 'metadata.yaml'
--- metadata.yaml 2014-05-12 11:12:06 +0000
+++ metadata.yaml 2014-06-10 15:59:18 +0000
@@ -12,5 +12,5 @@
requires:
nats:
interface: nats
- # logrouter:
- # interface: logrouter
\ No newline at end of file
+ logrouter:
+ interface: logrouter
=== modified file 'templates/loggregator.json'
--- templates/loggregator.json 2014-04-28 12:57:31 +0000
+++ templates/loggregator.json 2014-06-10 15:59:18 +0000
@@ -1,16 +1,16 @@
{
+ "IncomingPort": {{ config['incoming_port'] }},
+ "OutgoingPort": {{ config['outgoing_port'] }},
"Index": 0,
- "NatsHost": "{{ nats['nats_address'] }}",
- "VarzPort": 8888,
"SkipCertVerify": false,
+ "MaxRetainedLogMessages": {{ config['max_retained_logs'] }},
+ "Syslog": "",
+ "SharedSecret": "{{ logrouter[0]['shared_secret'] }}",
"VarzUser": "{{ service_name }}",
- "MaxRetainedLogMessages": 20,
- "OutgoingPort": 8080,
- "Syslog": "",
"VarzPass": "{{ service_name }}",
- "NatsUser": "{{ nats['nats_user'] }}",
- "NatsPass": "{{ nats['nats_password'] }}",
- "IncomingPort": 3456,
- "NatsPort": {{ nats['nats_port'] }},
- "SharedSecret": "{{ client_secret }}"
-}
\ No newline at end of file
+ "VarzPort": {{ config['varz_port'] }},
+ "NatsHost": "{{ nats[0]['address'] }}",
+ "NatsUser": "{{ nats[0]['user'] }}",
+ "NatsPass": "{{ nats[0]['password'] }}",
+ "NatsPort": {{ nats[0]['port'] }}
+}
References