cf-charmers team mailing list archive
-
cf-charmers team
-
Mailing list archive
-
Message #00376
[Merge] lp:~johnsca/charms/trusty/cf-hm9000/port-conflicts into lp:~cf-charmers/charms/trusty/cf-hm9000/trunk
Cory Johns has proposed merging lp:~johnsca/charms/trusty/cf-hm9000/port-conflicts into lp:~cf-charmers/charms/trusty/cf-hm9000/trunk.
Requested reviews:
Cloud Foundry Charmers (cf-charmers)
For more details, see:
https://code.launchpad.net/~johnsca/charms/trusty/cf-hm9000/port-conflicts/+merge/222673
Resolve port conflicts in cf-hm9000
https://codereview.appspot.com/108840044/
--
https://code.launchpad.net/~johnsca/charms/trusty/cf-hm9000/port-conflicts/+merge/222673
Your team Cloud Foundry Charmers is requested to review the proposed merge of lp:~johnsca/charms/trusty/cf-hm9000/port-conflicts into lp:~cf-charmers/charms/trusty/cf-hm9000/trunk.
=== removed file 'files/README.md'
--- files/README.md 2014-05-14 16:40:09 +0000
+++ files/README.md 1970-01-01 00:00:00 +0000
@@ -1,3 +0,0 @@
-# Contents
-
-ctl files and config from cf-release
=== removed file 'files/default-config.json'
--- files/default-config.json 2014-05-14 16:40:09 +0000
+++ files/default-config.json 1970-01-01 00:00:00 +0000
@@ -1,27 +0,0 @@
-{
- "heartbeat_period_in_seconds": 10,
-
- "cc_auth_user": "mcat",
- "cc_auth_password": "testing",
- "cc_base_url": "http://127.0.0.1:6001",
- "skip_cert_verify": true,
- "desired_state_batch_size": 500,
- "fetcher_network_timeout_in_seconds": 10,
-
- "store_schema_version": 1,
- "store_type": "etcd",
- "store_urls": ["http://127.0.0.1:4001"],
-
- "metrics_server_port": 7879,
- "metrics_server_user": "metrics_server_user",
- "metrics_server_password": "canHazMetrics?",
-
- "log_level": "INFO",
-
- "nats": [{
- "host": "127.0.0.1",
- "port": 4222,
- "user": "",
- "password": ""
- }]
-}
=== added file 'files/hm9000'
Binary files files/hm9000 1970-01-01 00:00:00 +0000 and files/hm9000 2014-06-10 15:56:14 +0000 differ
=== removed file 'files/hm9000.json.erb'
--- files/hm9000.json.erb 2014-05-14 16:40:09 +0000
+++ files/hm9000.json.erb 1970-01-01 00:00:00 +0000
@@ -1,30 +0,0 @@
-{
- "heartbeat_period_in_seconds": 10,
-
- "cc_auth_user": "<%= p("ccng.bulk_api_user") %>",
- "cc_auth_password": "<%= p("ccng.bulk_api_password") %>",
- "cc_base_url": "<%= p("cc.srv_api_uri") %>",
- "skip_cert_verify": <%= p("ssl.skip_cert_verify") %>,
- "desired_state_batch_size": 500,
- "fetcher_network_timeout_in_seconds": 10,
-
- "store_schema_version": 4,
- "store_urls": [<%= p("etcd.machines").map{|addr| "\"http://#{addr}:4001\""}.join(",")%>],
-
- "metrics_server_port": 0,
- "metrics_server_user": "",
- "metrics_server_password": "",
-
- "log_level": "INFO",
-
- "nats": <%=
- p("nats.machines").collect do |addr|
- {
- "host" => addr,
- "port" => p("nats.port"),
- "user" => p("nats.user"),
- "password" => p("nats.password")
- }
- end.to_json
-%>
-}
=== removed file 'files/hm9000_analyzer_ctl'
--- files/hm9000_analyzer_ctl 2014-05-14 16:40:09 +0000
+++ files/hm9000_analyzer_ctl 1970-01-01 00:00:00 +0000
@@ -1,41 +0,0 @@
-#!/bin/bash
-
-RUN_DIR=/var/vcap/sys/run/hm9000
-LOG_DIR=/var/vcap/sys/log/hm9000
-PIDFILE=$RUN_DIR/hm9000_analyzer.pid
-
-source /var/vcap/packages/common/utils.sh
-
-case $1 in
-
- start)
- pid_guard $PIDFILE "hm9000_analyzer"
-
- mkdir -p $RUN_DIR
- mkdir -p $LOG_DIR
-
- chown -R vcap:vcap $RUN_DIR
- chown -R vcap:vcap $LOG_DIR
-
- echo $$ > $PIDFILE
-
- exec chpst -u vcap:vcap /var/vcap/packages/hm9000/hm9000 \
- analyze \
- --config=/var/vcap/jobs/hm9000/config/hm9000.json \
- --poll \
- 1>>$LOG_DIR/hm9000_analyzer.stdout.log \
- 2>>$LOG_DIR/hm9000_analyzer.stderr.log
-
- ;;
-
- stop)
- kill_and_wait $PIDFILE
-
- ;;
-
- *)
- echo "Usage: hm9000_analyzer_ctl {start|stop}"
-
- ;;
-
-esac
=== removed file 'files/hm9000_api_server_ctl'
--- files/hm9000_api_server_ctl 2014-05-14 16:40:09 +0000
+++ files/hm9000_api_server_ctl 1970-01-01 00:00:00 +0000
@@ -1,40 +0,0 @@
-#!/bin/bash
-
-RUN_DIR=/var/vcap/sys/run/hm9000
-LOG_DIR=/var/vcap/sys/log/hm9000
-PIDFILE=$RUN_DIR/hm9000_api_server.pid
-
-source /var/vcap/packages/common/utils.sh
-
-case $1 in
-
- start)
- pid_guard $PIDFILE "hm9000_api_server"
-
- mkdir -p $RUN_DIR
- mkdir -p $LOG_DIR
-
- chown -R vcap:vcap $RUN_DIR
- chown -R vcap:vcap $LOG_DIR
-
- echo $$ > $PIDFILE
-
- exec chpst -u vcap:vcap /var/vcap/packages/hm9000/hm9000 \
- serve_api \
- --config=/var/vcap/jobs/hm9000/config/hm9000.json \
- 1>>$LOG_DIR/hm9000_api_server.stdout.log \
- 2>>$LOG_DIR/hm9000_api_server.stderr.log
-
- ;;
-
- stop)
- kill_and_wait $PIDFILE
-
- ;;
-
- *)
- echo "Usage: hm9000_api_server_ctl {start|stop}"
-
- ;;
-
-esac
=== removed file 'files/hm9000_evacuator_ctl'
--- files/hm9000_evacuator_ctl 2014-05-14 16:40:09 +0000
+++ files/hm9000_evacuator_ctl 1970-01-01 00:00:00 +0000
@@ -1,40 +0,0 @@
-#!/bin/bash
-
-RUN_DIR=/var/vcap/sys/run/hm9000
-LOG_DIR=/var/vcap/sys/log/hm9000
-PIDFILE=$RUN_DIR/hm9000_evacuator.pid
-
-source /var/vcap/packages/common/utils.sh
-
-case $1 in
-
- start)
- pid_guard $PIDFILE "hm9000_evacuator"
-
- mkdir -p $RUN_DIR
- mkdir -p $LOG_DIR
-
- chown -R vcap:vcap $RUN_DIR
- chown -R vcap:vcap $LOG_DIR
-
- echo $$ > $PIDFILE
-
- exec chpst -u vcap:vcap /var/vcap/packages/hm9000/hm9000 \
- evacuator \
- --config=/var/vcap/jobs/hm9000/config/hm9000.json \
- 1>>$LOG_DIR/hm9000_evacuator.stdout.log \
- 2>>$LOG_DIR/hm9000_evacuator.stderr.log
-
- ;;
-
- stop)
- kill_and_wait $PIDFILE
-
- ;;
-
- *)
- echo "Usage: hm9000_evacuator_ctl {start|stop}"
-
- ;;
-
-esac
=== removed file 'files/hm9000_fetcher_ctl'
--- files/hm9000_fetcher_ctl 2014-05-14 16:40:09 +0000
+++ files/hm9000_fetcher_ctl 1970-01-01 00:00:00 +0000
@@ -1,41 +0,0 @@
-#!/bin/bash
-
-RUN_DIR=/var/vcap/sys/run/hm9000
-LOG_DIR=/var/vcap/sys/log/hm9000
-PIDFILE=$RUN_DIR/hm9000_fetcher.pid
-
-source /var/vcap/packages/common/utils.sh
-
-case $1 in
-
- start)
- pid_guard $PIDFILE "hm9000_fetcher"
-
- mkdir -p $RUN_DIR
- mkdir -p $LOG_DIR
-
- chown -R vcap:vcap $RUN_DIR
- chown -R vcap:vcap $LOG_DIR
-
- echo $$ > $PIDFILE
-
- exec chpst -u vcap:vcap /var/vcap/packages/hm9000/hm9000 \
- fetch_desired \
- --config=/var/vcap/jobs/hm9000/config/hm9000.json \
- --poll \
- 1>>$LOG_DIR/hm9000_fetcher.stdout.log \
- 2>>$LOG_DIR/hm9000_fetcher.stderr.log
-
- ;;
-
- stop)
- kill_and_wait $PIDFILE
-
- ;;
-
- *)
- echo "Usage: hm9000_fetcher_ctl {start|stop}"
-
- ;;
-
-esac
=== removed file 'files/hm9000_listener_ctl'
--- files/hm9000_listener_ctl 2014-05-14 16:40:09 +0000
+++ files/hm9000_listener_ctl 1970-01-01 00:00:00 +0000
@@ -1,44 +0,0 @@
-#!/bin/bash
-
-RUN_DIR=/var/vcap/sys/run/hm9000
-LOG_DIR=/var/vcap/sys/log/hm9000
-PIDFILE=$RUN_DIR/hm9000_listener.pid
-
-source /var/vcap/packages/common/utils.sh
-
-case $1 in
-
- start)
- pid_guard $PIDFILE "hm9000_listener"
-
- mkdir -p $RUN_DIR
- mkdir -p $LOG_DIR
-
- chown -R vcap:vcap $RUN_DIR
- chown -R vcap:vcap $LOG_DIR
-
- <% if_p("syslog_aggregator") do %>
- /var/vcap/packages/syslog_aggregator/setup_syslog_forwarder.sh /var/vcap/jobs/hm9000/config
- <% end %>
-
- echo $$ > $PIDFILE
-
- exec chpst -u vcap:vcap /var/vcap/packages/hm9000/hm9000 \
- listen \
- --config=/var/vcap/jobs/hm9000/config/hm9000.json \
- 1>>$LOG_DIR/hm9000_listener.stdout.log \
- 2>>$LOG_DIR/hm9000_listener.stderr.log
-
- ;;
-
- stop)
- kill_and_wait $PIDFILE
-
- ;;
-
- *)
- echo "Usage: hm9000_listener_ctl {start|stop}"
-
- ;;
-
-esac
=== removed file 'files/hm9000_metrics_server_ctl'
--- files/hm9000_metrics_server_ctl 2014-05-14 16:40:09 +0000
+++ files/hm9000_metrics_server_ctl 1970-01-01 00:00:00 +0000
@@ -1,40 +0,0 @@
-#!/bin/bash
-
-RUN_DIR=/var/vcap/sys/run/hm9000
-LOG_DIR=/var/vcap/sys/log/hm9000
-PIDFILE=$RUN_DIR/hm9000_metrics_server.pid
-
-source /var/vcap/packages/common/utils.sh
-
-case $1 in
-
- start)
- pid_guard $PIDFILE "hm9000_metrics_server"
-
- mkdir -p $RUN_DIR
- mkdir -p $LOG_DIR
-
- chown -R vcap:vcap $RUN_DIR
- chown -R vcap:vcap $LOG_DIR
-
- echo $$ > $PIDFILE
-
- exec chpst -u vcap:vcap /var/vcap/packages/hm9000/hm9000 \
- serve_metrics \
- --config=/var/vcap/jobs/hm9000/config/hm9000.json \
- 1>>$LOG_DIR/hm9000_metrics_server.stdout.log \
- 2>>$LOG_DIR/hm9000_metrics_server.stderr.log
-
- ;;
-
- stop)
- kill_and_wait $PIDFILE
-
- ;;
-
- *)
- echo "Usage: hm9000_metrics_server_ctl {start|stop}"
-
- ;;
-
-esac
=== removed file 'files/hm9000_sender_ctl'
--- files/hm9000_sender_ctl 2014-05-14 16:40:09 +0000
+++ files/hm9000_sender_ctl 1970-01-01 00:00:00 +0000
@@ -1,41 +0,0 @@
-#!/bin/bash
-
-RUN_DIR=/var/vcap/sys/run/hm9000
-LOG_DIR=/var/vcap/sys/log/hm9000
-PIDFILE=$RUN_DIR/hm9000_sender.pid
-
-source /var/vcap/packages/common/utils.sh
-
-case $1 in
-
- start)
- pid_guard $PIDFILE "hm9000_sender"
-
- mkdir -p $RUN_DIR
- mkdir -p $LOG_DIR
-
- chown -R vcap:vcap $RUN_DIR
- chown -R vcap:vcap $LOG_DIR
-
- echo $$ > $PIDFILE
-
- exec chpst -u vcap:vcap /var/vcap/packages/hm9000/hm9000 \
- send \
- --config=/var/vcap/jobs/hm9000/config/hm9000.json \
- --poll \
- 1>>$LOG_DIR/hm9000_sender.stdout.log \
- 2>>$LOG_DIR/hm9000_sender.stderr.log
-
- ;;
-
- stop)
- kill_and_wait $PIDFILE
-
- ;;
-
- *)
- echo "Usage: hm9000_sender_ctl {start|stop}"
-
- ;;
-
-esac
=== removed file 'files/hm9000_shredder_ctl'
--- files/hm9000_shredder_ctl 2014-05-14 16:40:09 +0000
+++ files/hm9000_shredder_ctl 1970-01-01 00:00:00 +0000
@@ -1,41 +0,0 @@
-#!/bin/bash
-
-RUN_DIR=/var/vcap/sys/run/hm9000
-LOG_DIR=/var/vcap/sys/log/hm9000
-PIDFILE=$RUN_DIR/hm9000_shredder.pid
-
-source /var/vcap/packages/common/utils.sh
-
-case $1 in
-
- start)
- pid_guard $PIDFILE "hm9000_shredder"
-
- mkdir -p $RUN_DIR
- mkdir -p $LOG_DIR
-
- chown -R vcap:vcap $RUN_DIR
- chown -R vcap:vcap $LOG_DIR
-
- echo $$ > $PIDFILE
-
- exec chpst -u vcap:vcap /var/vcap/packages/hm9000/hm9000 \
- shred \
- --config=/var/vcap/jobs/hm9000/config/hm9000.json \
- --poll \
- 1>>$LOG_DIR/hm9000_shredder.stdout.log \
- 2>>$LOG_DIR/hm9000_shredder.stderr.log
-
- ;;
-
- stop)
- kill_and_wait $PIDFILE
-
- ;;
-
- *)
- echo "Usage: hm9000_shredder_ctl {start|stop}"
-
- ;;
-
-esac
=== removed file 'files/syslog_forwarder.conf.erb'
--- files/syslog_forwarder.conf.erb 2014-05-14 16:40:09 +0000
+++ files/syslog_forwarder.conf.erb 1970-01-01 00:00:00 +0000
@@ -1,65 +0,0 @@
-<% if_p("syslog_aggregator.address", "syslog_aggregator.port", "syslog_aggregator.transport") do |address, port, transport| %>
-$ModLoad imuxsock # local message reception (rsyslog uses a datagram socket)
-$MaxMessageSize 4k # default is 2k
-$WorkDirectory /var/vcap/sys/rsyslog/buffered # where messages should be buffered on disk
-
-# Forward vcap messages to the aggregator
-#
-$ActionResumeRetryCount -1 # Try until the server becomes available
-$ActionQueueType LinkedList # Allocate on-demand
-$ActionQueueFileName agg_backlog # Spill to disk if queue is full
-$ActionQueueMaxDiskSpace 32m # Max size for disk queue
-$ActionQueueLowWaterMark 2000 # Num messages. Assuming avg size of 512B, this is 1MiB.
-$ActionQueueHighWaterMark 8000 # Num messages. Assuming avg size of 512B, this is 4MiB. (If this is reached, messages will spill to disk until the low watermark is reached).
-$ActionQueueTimeoutEnqueue 0 # Discard messages if the queue + disk is full
-$ActionQueueSaveOnShutdown on # Save in-memory data to disk if rsyslog shuts down
-
-<% ip = spec.networks.send(properties.networks.apps).ip %>
-template(name="CfLogTemplate" type="list") {
- constant(value="<")
- property(name="pri")
- constant(value=">")
- property(name="timestamp" dateFormat="rfc3339")
- constant(value=" <%= ip.strip %> ")
- property(name="programname")
- constant(value=" [job=")
- property(name="programname")
- constant(value=" index=<%= spec.index.to_i %>] ")
- property(name="msg")
-}
-
-<% if transport == "relp" %>
-$ModLoad omrelp
-:programname, startswith, "vcap." :omrelp:<%= address %>:<%= port %>;CfLogTemplate
-<% elsif transport == "udp" %>
-:programname, startswith, "vcap." @<%= address %>:<%= port %>;CfLogTemplate
-<% elsif transport == "tcp" %>
-:programname, startswith, "vcap." @@<%= address %>:<%= port %>;CfLogTemplate
-<% else %>
-#only RELP, UDP, and TCP are supported
-<% end %>
-
-# Log vcap messages locally, too
-#$template VcapComponentLogFile, "/var/log/%programname:6:$%/%programname:6:$%.log"
-#$template VcapComponentLogFormat, "%timegenerated% %syslogseverity-text% -- %msg%\n"
-#:programname, startswith, "vcap." -?VcapComponentLogFile;VcapComponentLogFormat
-
-# Prevent them from reaching anywhere else
-:programname, startswith, "vcap." ~
-
-<% if properties.syslog_aggregator.all %>
- <% if transport == "relp" %>
-*.* :omrelp:<%= address %>:<%= port %>
- <% elsif transport == "udp" %>
-*.* @<%= address %>:<%= port %>
- <% elsif transport == "tcp" %>
-*.* @@<%= address %>:<%= port %>
- <% else %>
-#only RELP, UDP, and TCP are supported
- <% end %>
-<% end %>
-
-<% end.else do %>
-# Prevent them from reaching anywhere else
-:programname, startswith, "vcap." ~
-<% end %>
=== added file 'hooks/cc-relation-changed'
--- hooks/cc-relation-changed 1970-01-01 00:00:00 +0000
+++ hooks/cc-relation-changed 2014-06-10 15:56:14 +0000
@@ -0,0 +1,5 @@
+#!/usr/bin/env python
+from charmhelpers.core import services
+import config
+manager = services.ServiceManager(config.SERVICES)
+manager.manage()
=== modified file 'hooks/charmhelpers/contrib/cloudfoundry/common.py'
--- hooks/charmhelpers/contrib/cloudfoundry/common.py 2014-05-14 16:40:09 +0000
+++ hooks/charmhelpers/contrib/cloudfoundry/common.py 2014-06-10 15:56:14 +0000
@@ -1,11 +1,3 @@
-import sys
-import os
-import pwd
-import grp
-import subprocess
-
-from contextlib import contextmanager
-from charmhelpers.core.hookenv import log, ERROR, DEBUG
from charmhelpers.core import host
from charmhelpers.fetch import (
@@ -13,55 +5,6 @@
)
-def run(command, exit_on_error=True, quiet=False):
- '''Run a command and return the output.'''
- if not quiet:
- log("Running {!r}".format(command), DEBUG)
- p = subprocess.Popen(
- command, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
- shell=isinstance(command, basestring))
- p.stdin.close()
- lines = []
- for line in p.stdout:
- if line:
- if not quiet:
- print line
- lines.append(line)
- elif p.poll() is not None:
- break
-
- p.wait()
-
- if p.returncode == 0:
- return '\n'.join(lines)
-
- if p.returncode != 0 and exit_on_error:
- log("ERROR: {}".format(p.returncode), ERROR)
- sys.exit(p.returncode)
-
- raise subprocess.CalledProcessError(
- p.returncode, command, '\n'.join(lines))
-
-
-def chownr(path, owner, group):
- uid = pwd.getpwnam(owner).pw_uid
- gid = grp.getgrnam(group).gr_gid
- for root, dirs, files in os.walk(path):
- for momo in dirs:
- os.chown(os.path.join(root, momo), uid, gid)
- for momo in files:
- os.chown(os.path.join(root, momo), uid, gid)
-
-
-@contextmanager
-def chdir(d):
- cur = os.getcwd()
- try:
- yield os.chdir(d)
- finally:
- os.chdir(cur)
-
-
def prepare_cloudfoundry_environment(config_data, packages):
add_source(config_data['source'], config_data.get('key'))
apt_update(fatal=True)
=== removed file 'hooks/charmhelpers/contrib/cloudfoundry/config_helper.py'
--- hooks/charmhelpers/contrib/cloudfoundry/config_helper.py 2014-05-14 16:40:09 +0000
+++ hooks/charmhelpers/contrib/cloudfoundry/config_helper.py 1970-01-01 00:00:00 +0000
@@ -1,11 +0,0 @@
-import jinja2
-
-TEMPLATES_DIR = 'templates'
-
-def render_template(template_name, context, template_dir=TEMPLATES_DIR):
- templates = jinja2.Environment(
- loader=jinja2.FileSystemLoader(template_dir))
- template = templates.get_template(template_name)
- return template.render(context)
-
-
=== modified file 'hooks/charmhelpers/contrib/cloudfoundry/contexts.py'
--- hooks/charmhelpers/contrib/cloudfoundry/contexts.py 2014-05-14 16:40:09 +0000
+++ hooks/charmhelpers/contrib/cloudfoundry/contexts.py 2014-06-10 15:56:14 +0000
@@ -1,70 +1,75 @@
import os
import yaml
-from charmhelpers.core import hookenv
-from charmhelpers.contrib.openstack.context import OSContextGenerator
-
-
-class RelationContext(OSContextGenerator):
- def __call__(self):
- if not hookenv.relation_ids(self.interface):
- return {}
-
- ctx = {}
- for rid in hookenv.relation_ids(self.interface):
- for unit in hookenv.related_units(rid):
- reldata = hookenv.relation_get(rid=rid, unit=unit)
- required = set(self.required_keys)
- if set(reldata.keys()).issuperset(required):
- ns = ctx.setdefault(self.interface, {})
- for k, v in reldata.items():
- ns[k] = v
- return ctx
-
- return {}
-
-
-class ConfigContext(OSContextGenerator):
- def __call__(self):
- return hookenv.config()
-
-
-# Stores `config_data` hash into yaml file with `file_name` as a name
-# if `file_name` already exists, then it loads data from `file_name`.
-class StoredContext(OSContextGenerator):
+from charmhelpers.core.services import RelationContext
+
+
+class StoredContext(dict):
+ """
+ A data context that always returns the data that it was first created with.
+ """
def __init__(self, file_name, config_data):
- self.data = config_data
+ """
+ If the file exists, populate `self` with the data from the file.
+ Otherwise, populate with the given data and persist it to the file.
+ """
if os.path.exists(file_name):
- with open(file_name, 'r') as file_stream:
- self.data = yaml.load(file_stream)
- if not self.data:
- raise OSError("%s is empty" % file_name)
+ self.update(self.read_context(file_name))
else:
- with open(file_name, 'w') as file_stream:
- yaml.dump(config_data, file_stream)
- self.data = config_data
-
- def __call__(self):
- return self.data
-
-
-class StaticContext(OSContextGenerator):
- def __init__(self, data):
- self.data = data
-
- def __call__(self):
- return self.data
-
-
-class NatsContext(RelationContext):
+ self.store_context(file_name, config_data)
+ self.update(config_data)
+
+ def store_context(self, file_name, config_data):
+ with open(file_name, 'w') as file_stream:
+ yaml.dump(config_data, file_stream)
+
+ def read_context(self, file_name):
+ with open(file_name, 'r') as file_stream:
+ data = yaml.load(file_stream)
+ if not data:
+ raise OSError("%s is empty" % file_name)
+ return data
+
+
+class NatsRelation(RelationContext):
interface = 'nats'
- required_keys = ['nats_port', 'nats_address', 'nats_user', 'nats_password']
-
-
-class RouterContext(RelationContext):
+ required_keys = ['address', 'port', 'user', 'password']
+
+
+class MysqlRelation(RelationContext):
+ interface = 'db'
+ required_keys = ['user', 'password', 'host', 'database']
+ dsn_template = "mysql2://{user}:{password}@{host}:{port}/{database}"
+
+ def get_data(self):
+ RelationContext.get_data(self)
+ if self.is_ready():
+ for unit in self['db']:
+ if 'port' not in unit:
+ unit['port'] = '3306'
+ unit['dsn'] = self.dsn_template.format(**unit)
+
+
+class RouterRelation(RelationContext):
interface = 'router'
required_keys = ['domain']
-class LogRouterContext(RelationContext):
+
+class LogRouterRelation(RelationContext):
interface = 'logrouter'
- required_keys = ['shared-secret', 'logrouter-address']
+ required_keys = ['shared_secret', 'address', 'incoming_port', 'outgoing_port']
+
+
+class LoggregatorRelation(RelationContext):
+ interface = 'loggregator'
+ required_keys = ['address', 'incoming_port', 'outgoing_port']
+
+
+class EtcdRelation(RelationContext):
+ interface = 'etcd'
+ required_keys = ['hostname', 'port']
+
+
+class CloudControllerRelation(RelationContext):
+ interface = 'cc'
+ required_keys = ['hostname', 'port', 'user', 'password']
=== removed file 'hooks/charmhelpers/contrib/cloudfoundry/install.py'
--- hooks/charmhelpers/contrib/cloudfoundry/install.py 2014-05-14 16:40:09 +0000
+++ hooks/charmhelpers/contrib/cloudfoundry/install.py 1970-01-01 00:00:00 +0000
@@ -1,35 +0,0 @@
-import os
-import subprocess
-
-
-def install(src, dest, fileprops=None, sudo=False):
- """Install a file from src to dest. Dest can be a complete filename
- or a target directory. fileprops is a dict with 'owner' (username of owner)
- and mode (octal string) as keys, the defaults are 'ubuntu' and '400'
-
- When owner is passed or when access requires it sudo can be set to True and
- sudo will be used to install the file.
- """
- if not fileprops:
- fileprops = {}
- mode = fileprops.get('mode', '400')
- owner = fileprops.get('owner')
- cmd = ['install']
-
- if not os.path.exists(src):
- raise OSError(src)
-
- if not os.path.exists(dest) and not os.path.exists(os.path.dirname(dest)):
- # create all but the last component as path
- cmd.append('-D')
-
- if mode:
- cmd.extend(['-m', mode])
-
- if owner:
- cmd.extend(['-o', owner])
-
- if sudo:
- cmd.insert(0, 'sudo')
- cmd.extend([src, dest])
- subprocess.check_call(cmd)
=== removed file 'hooks/charmhelpers/contrib/cloudfoundry/services.py'
--- hooks/charmhelpers/contrib/cloudfoundry/services.py 2014-05-14 16:40:09 +0000
+++ hooks/charmhelpers/contrib/cloudfoundry/services.py 1970-01-01 00:00:00 +0000
@@ -1,118 +0,0 @@
-import os
-import tempfile
-from charmhelpers.core import host
-
-from charmhelpers.contrib.cloudfoundry.install import install
-from charmhelpers.core.hookenv import log
-from jinja2 import Environment, FileSystemLoader
-
-SERVICE_CONFIG = []
-TEMPLATE_LOADER = None
-
-
-def render_template(template_name, context):
- """Render template to a tempfile returning the name"""
- _, fn = tempfile.mkstemp()
- template = load_template(template_name)
- output = template.render(context)
- with open(fn, "w") as fp:
- fp.write(output)
- return fn
-
-
-def collect_contexts(context_providers):
- ctx = {}
- for provider in context_providers:
- c = provider()
- if not c:
- return {}
- ctx.update(c)
- return ctx
-
-
-def load_template(name):
- return TEMPLATE_LOADER.get_template(name)
-
-
-def configure_templates(template_dir):
- global TEMPLATE_LOADER
- TEMPLATE_LOADER = Environment(loader=FileSystemLoader(template_dir))
-
-
-def register(service_configs, template_dir):
- """Register a list of service configs.
-
- Service Configs are dicts in the following formats:
-
- {
- "service": <service name>,
- "templates": [ {
- 'target': <render target of template>,
- 'source': <optional name of template in passed in template_dir>
- 'file_properties': <optional dict taking owner and octal mode>
- 'contexts': [ context generators, see contexts.py ]
- }
- ] }
-
- If 'source' is not provided for a template the template_dir will
- be consulted for ``basename(target).j2``.
- """
- global SERVICE_CONFIG
- if template_dir:
- configure_templates(template_dir)
- SERVICE_CONFIG.extend(service_configs)
-
-
-def reset():
- global SERVICE_CONFIG
- SERVICE_CONFIG = []
-
-
-# def service_context(name):
-# contexts = collect_contexts(template['contexts'])
-
-def reconfigure_service(service_name, restart=True):
- global SERVICE_CONFIG
- service = None
- for service in SERVICE_CONFIG:
- if service['service'] == service_name:
- break
- if not service or service['service'] != service_name:
- raise KeyError('Service not registered: %s' % service_name)
-
- templates = service['templates']
- for template in templates:
- contexts = collect_contexts(template['contexts'])
- if contexts:
- template_target = template['target']
- default_template = "%s.j2" % os.path.basename(template_target)
- template_name = template.get('source', default_template)
- output_file = render_template(template_name, contexts)
- file_properties = template.get('file_properties')
- install(output_file, template_target, file_properties)
- os.unlink(output_file)
- else:
- restart = False
-
- if restart:
- host.service_restart(service_name)
-
-
-def stop_services():
- global SERVICE_CONFIG
- for service in SERVICE_CONFIG:
- if host.service_running(service['service']):
- host.service_stop(service['service'])
-
-
-def get_service(service_name):
- global SERVICE_CONFIG
- for service in SERVICE_CONFIG:
- if service_name == service['service']:
- return service
- return None
-
-
-def reconfigure_services(restart=True):
- for service in SERVICE_CONFIG:
- reconfigure_service(service['service'], restart=restart)
=== removed file 'hooks/charmhelpers/contrib/cloudfoundry/upstart_helper.py'
--- hooks/charmhelpers/contrib/cloudfoundry/upstart_helper.py 2014-05-14 16:40:09 +0000
+++ hooks/charmhelpers/contrib/cloudfoundry/upstart_helper.py 1970-01-01 00:00:00 +0000
@@ -1,14 +0,0 @@
-import os
-import glob
-from charmhelpers.core import hookenv
-from charmhelpers.core.hookenv import charm_dir
-from charmhelpers.contrib.cloudfoundry.install import install
-
-
-def install_upstart_scripts(dirname=os.path.join(hookenv.charm_dir(),
- 'files/upstart'),
- pattern='*.conf'):
- for script in glob.glob("%s/%s" % (dirname, pattern)):
- filename = os.path.join(dirname, script)
- hookenv.log('Installing upstart job:' + filename, hookenv.DEBUG)
- install(filename, '/etc/init')
=== modified file 'hooks/charmhelpers/contrib/openstack/context.py'
--- hooks/charmhelpers/contrib/openstack/context.py 2014-05-14 16:40:09 +0000
+++ hooks/charmhelpers/contrib/openstack/context.py 2014-06-10 15:56:14 +0000
@@ -570,7 +570,7 @@
if self.plugin == 'ovs':
ctxt.update(self.ovs_ctxt())
- elif self.plugin == 'nvp':
+ elif self.plugin in ['nvp', 'nsx']:
ctxt.update(self.nvp_ctxt())
alchemy_flags = config('neutron-alchemy-flags')
=== modified file 'hooks/charmhelpers/contrib/openstack/neutron.py'
--- hooks/charmhelpers/contrib/openstack/neutron.py 2014-05-14 16:40:09 +0000
+++ hooks/charmhelpers/contrib/openstack/neutron.py 2014-06-10 15:56:14 +0000
@@ -114,14 +114,30 @@
'server_packages': ['neutron-server',
'neutron-plugin-nicira'],
'server_services': ['neutron-server']
+ },
+ 'nsx': {
+ 'config': '/etc/neutron/plugins/vmware/nsx.ini',
+ 'driver': 'vmware',
+ 'contexts': [
+ context.SharedDBContext(user=config('neutron-database-user'),
+ database=config('neutron-database'),
+ relation_prefix='neutron',
+ ssl_dir=NEUTRON_CONF_DIR)],
+ 'services': [],
+ 'packages': [],
+ 'server_packages': ['neutron-server',
+ 'neutron-plugin-vmware'],
+ 'server_services': ['neutron-server']
}
}
- # NOTE: patch in ml2 plugin for icehouse onwards
if release >= 'icehouse':
+ # NOTE: patch in ml2 plugin for icehouse onwards
plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
plugins['ovs']['server_packages'] = ['neutron-server',
'neutron-plugin-ml2']
+ # NOTE: patch in vmware renames nvp->nsx for icehouse onwards
+ plugins['nvp'] = plugins['nsx']
return plugins
=== modified file 'hooks/charmhelpers/contrib/openstack/utils.py'
--- hooks/charmhelpers/contrib/openstack/utils.py 2014-05-14 16:40:09 +0000
+++ hooks/charmhelpers/contrib/openstack/utils.py 2014-06-10 15:56:14 +0000
@@ -131,6 +131,11 @@
def get_os_codename_package(package, fatal=True):
'''Derive OpenStack release codename from an installed package.'''
apt.init()
+
+ # Tell apt to build an in-memory cache to prevent race conditions (if
+ # another process is already building the cache).
+ apt.config.set("Dir::Cache::pkgcache", "")
+
cache = apt.Cache()
try:
@@ -183,7 +188,7 @@
if cname == codename:
return version
#e = "Could not determine OpenStack version for package: %s" % pkg
- #error_out(e)
+ # error_out(e)
os_rel = None
=== modified file 'hooks/charmhelpers/contrib/storage/linux/lvm.py'
--- hooks/charmhelpers/contrib/storage/linux/lvm.py 2014-05-14 16:40:09 +0000
+++ hooks/charmhelpers/contrib/storage/linux/lvm.py 2014-06-10 15:56:14 +0000
@@ -62,7 +62,7 @@
pvd = check_output(['pvdisplay', block_device]).splitlines()
for l in pvd:
if l.strip().startswith('VG Name'):
- vg = ' '.join(l.split()).split(' ').pop()
+ vg = ' '.join(l.strip().split()[2:])
return vg
=== modified file 'hooks/charmhelpers/contrib/storage/linux/utils.py'
--- hooks/charmhelpers/contrib/storage/linux/utils.py 2014-05-14 16:40:09 +0000
+++ hooks/charmhelpers/contrib/storage/linux/utils.py 2014-06-10 15:56:14 +0000
@@ -1,4 +1,5 @@
-from os import stat
+import os
+import re
from stat import S_ISBLK
from subprocess import (
@@ -14,7 +15,9 @@
:returns: boolean: True if path is a block device, False if not.
'''
- return S_ISBLK(stat(path).st_mode)
+ if not os.path.exists(path):
+ return False
+ return S_ISBLK(os.stat(path).st_mode)
def zap_disk(block_device):
@@ -29,7 +32,18 @@
'--clear', block_device])
dev_end = check_output(['blockdev', '--getsz', block_device])
gpt_end = int(dev_end.split()[0]) - 100
- check_call(['dd', 'if=/dev/zero', 'of=%s'%(block_device),
+ check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
'bs=1M', 'count=1'])
- check_call(['dd', 'if=/dev/zero', 'of=%s'%(block_device),
- 'bs=512', 'count=100', 'seek=%s'%(gpt_end)])
+ check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
+ 'bs=512', 'count=100', 'seek=%s' % (gpt_end)])
+
+def is_device_mounted(device):
+ '''Given a device path, return True if that device is mounted, and False
+ if it isn't.
+
+ :param device: str: Full path of the device to check.
+ :returns: boolean: True if the path represents a mounted device, False if
+ it doesn't.
+ '''
+ out = check_output(['mount'])
+ return bool(re.search(device + r"[0-9]+\b", out))
=== modified file 'hooks/charmhelpers/core/hookenv.py'
--- hooks/charmhelpers/core/hookenv.py 2014-05-14 16:40:09 +0000
+++ hooks/charmhelpers/core/hookenv.py 2014-06-10 15:56:14 +0000
@@ -155,6 +155,100 @@
return os.path.basename(sys.argv[0])
+class Config(dict):
+ """A Juju charm config dictionary that can write itself to
+ disk (as json) and track which values have changed since
+ the previous hook invocation.
+
+ Do not instantiate this object directly - instead call
+ ``hookenv.config()``
+
+ Example usage::
+
+ >>> # inside a hook
+ >>> from charmhelpers.core import hookenv
+ >>> config = hookenv.config()
+ >>> config['foo']
+ 'bar'
+ >>> config['mykey'] = 'myval'
+ >>> config.save()
+
+
+ >>> # user runs `juju set mycharm foo=baz`
+ >>> # now we're inside subsequent config-changed hook
+ >>> config = hookenv.config()
+ >>> config['foo']
+ 'baz'
+ >>> # test to see if this val has changed since last hook
+ >>> config.changed('foo')
+ True
+ >>> # what was the previous value?
+ >>> config.previous('foo')
+ 'bar'
+ >>> # keys/values that we add are preserved across hooks
+ >>> config['mykey']
+ 'myval'
+ >>> # don't forget to save at the end of hook!
+ >>> config.save()
+
+ """
+ CONFIG_FILE_NAME = '.juju-persistent-config'
+
+ def __init__(self, *args, **kw):
+ super(Config, self).__init__(*args, **kw)
+ self._prev_dict = None
+ self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
+ if os.path.exists(self.path):
+ self.load_previous()
+
+ def load_previous(self, path=None):
+ """Load previous copy of config from disk so that current values
+ can be compared to previous values.
+
+ :param path:
+
+ File path from which to load the previous config. If `None`,
+ config is loaded from the default location. If `path` is
+ specified, subsequent `save()` calls will write to the same
+ path.
+
+ """
+ self.path = path or self.path
+ with open(self.path) as f:
+ self._prev_dict = json.load(f)
+
+ def changed(self, key):
+ """Return true if the value for this key has changed since
+ the last save.
+
+ """
+ if self._prev_dict is None:
+ return True
+ return self.previous(key) != self.get(key)
+
+ def previous(self, key):
+ """Return previous value for this key, or None if there
+ is no "previous" value.
+
+ """
+ if self._prev_dict:
+ return self._prev_dict.get(key)
+ return None
+
+ def save(self):
+ """Save this config to disk.
+
+ Preserves items in _prev_dict that do not exist in self.
+
+ """
+ if self._prev_dict:
+ for k, v in self._prev_dict.iteritems():
+ if k not in self:
+ self[k] = v
+ with open(self.path, 'w') as f:
+ json.dump(self, f)
+
+
@cached
def config(scope=None):
"""Juju charm configuration"""
@@ -163,7 +257,10 @@
config_cmd_line.append(scope)
config_cmd_line.append('--format=json')
try:
- return json.loads(subprocess.check_output(config_cmd_line))
+ config_data = json.loads(subprocess.check_output(config_cmd_line))
+ if scope is not None:
+ return config_data
+ return Config(config_data)
except ValueError:
return None
=== modified file 'hooks/charmhelpers/core/host.py'
--- hooks/charmhelpers/core/host.py 2014-05-14 16:40:09 +0000
+++ hooks/charmhelpers/core/host.py 2014-06-10 15:56:14 +0000
@@ -12,6 +12,9 @@
import string
import subprocess
import hashlib
+import shutil
+import apt_pkg
+from contextlib import contextmanager
from collections import OrderedDict
@@ -60,6 +63,11 @@
return False
+def service_available(service_name):
+ """Determine whether a system service is available"""
+ return service('status', service_name)
+
+
def adduser(username, password=None, shell='/bin/bash', system_user=False):
"""Add a user to the system"""
try:
@@ -143,6 +151,16 @@
target.write(content)
+def copy_file(src, dst, owner='root', group='root', perms=0444):
+ """Create or overwrite a file with the contents of another file"""
+ log("Writing file {} {}:{} {:o} from {}".format(dst, owner, group, perms, src))
+ uid = pwd.getpwnam(owner).pw_uid
+ gid = grp.getgrnam(group).gr_gid
+ shutil.copyfile(src, dst)
+ os.chown(dst, uid, gid)
+ os.chmod(dst, perms)
+
+
def mount(device, mountpoint, options=None, persist=False):
"""Mount a filesystem at a particular mountpoint"""
cmd_args = ['mount']
@@ -295,3 +313,37 @@
if 'link/ether' in words:
hwaddr = words[words.index('link/ether') + 1]
return hwaddr
+
+
+def cmp_pkgrevno(package, revno, pkgcache=None):
+ '''Compare supplied revno with the revno of the installed package
+ 1 => Installed revno is greater than supplied arg
+ 0 => Installed revno is the same as supplied arg
+ -1 => Installed revno is less than supplied arg
+ '''
+ if not pkgcache:
+ apt_pkg.init()
+ pkgcache = apt_pkg.Cache()
+ pkg = pkgcache[package]
+ return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
+
+
+@contextmanager
+def chdir(d):
+ cur = os.getcwd()
+ try:
+ yield os.chdir(d)
+ finally:
+ os.chdir(cur)
+
+
+def chownr(path, owner, group):
+ uid = pwd.getpwnam(owner).pw_uid
+ gid = grp.getgrnam(group).gr_gid
+
+ for root, dirs, files in os.walk(path):
+ for name in dirs + files:
+ full = os.path.join(root, name)
+ broken_symlink = os.path.lexists(full) and not os.path.exists(full)
+ if not broken_symlink:
+ os.chown(full, uid, gid)
=== added file 'hooks/charmhelpers/core/services.py'
--- hooks/charmhelpers/core/services.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/core/services.py 2014-06-10 15:56:14 +0000
@@ -0,0 +1,347 @@
+import os
+import sys
+from collections import Iterable
+from charmhelpers.core import templating
+from charmhelpers.core import host
+from charmhelpers.core import hookenv
+
+
+class ServiceManager(object):
+ def __init__(self, services=None):
+ """
+ Register a list of services, given their definitions.
+
+ Traditional charm authoring is focused on implementing hooks. That is,
+ the charm author is thinking in terms of "What hook am I handling; what
+ does this hook need to do?" However, in most cases, the real question
+ should be "Do I have the information I need to configure and start this
+ piece of software and, if so, what are the steps for doing so." The
+ ServiceManager framework tries to bring the focus to the data and the
+ setup tasks, in the most declarative way possible.
+
+ Service definitions are dicts in the following formats (all keys except
+ 'service' are optional):
+
+ {
+ "service": <service name>,
+ "required_data": <list of required data contexts>,
+ "data_ready": <one or more callbacks>,
+ "data_lost": <one or more callbacks>,
+ "start": <one or more callbacks>,
+ "stop": <one or more callbacks>,
+ "ports": <list of ports to manage>,
+ }
+
+ The 'required_data' list should contain dicts of required data (or
+ dependency managers that act like dicts and know how to collect the data).
+ Only when all items in the 'required_data' list are populated are the list
+ of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
+ information.
+
+ The 'data_ready' value should be either a single callback, or a list of
+ callbacks, to be called when all items in 'required_data' pass `is_ready()`.
+ Each callback will be called with the service name as the only parameter.
+ After these all of the 'data_ready' callbacks are called, the 'start'
+ callbacks are fired.
+
+ The 'data_lost' value should be either a single callback, or a list of
+ callbacks, to be called when a 'required_data' item no longer passes
+ `is_ready()`. Each callback will be called with the service name as the
+ only parameter. After these all of the 'data_ready' callbacks are called,
+ the 'stop' callbacks are fired.
+
+ The 'start' value should be either a single callback, or a list of
+ callbacks, to be called when starting the service, after the 'data_ready'
+ callbacks are complete. Each callback will be called with the service
+ name as the only parameter. This defaults to
+ `[host.service_start, services.open_ports]`.
+
+ The 'stop' value should be either a single callback, or a list of
+ callbacks, to be called when stopping the service. If the service is
+ being stopped because it no longer has all of its 'required_data', this
+ will be called after all of the 'data_lost' callbacks are complete.
+ Each callback will be called with the service name as the only parameter.
+ This defaults to `[services.close_ports, host.service_stop]`.
+
+ The 'ports' value should be a list of ports to manage. The default
+ 'start' handler will open the ports after the service is started,
+ and the default 'stop' handler will close the ports prior to stopping
+ the service.
+
+
+ Examples:
+
+ The following registers an Upstart service called bingod that depends on
+ a mongodb relation and which runs a custom `db_migrate` function prior to
+ restarting the service, and a Runit serivce called spadesd.
+
+ manager = services.ServiceManager([
+ {
+ 'service': 'bingod',
+ 'ports': [80, 443],
+ 'required_data': [MongoRelation(), config(), {'my': 'data'}],
+ 'data_ready': [
+ services.template(source='bingod.conf'),
+ services.template(source='bingod.ini',
+ target='/etc/bingod.ini',
+ owner='bingo', perms=0400),
+ ],
+ },
+ {
+ 'service': 'spadesd',
+ 'data_ready': services.template(source='spadesd_run.j2',
+ target='/etc/sv/spadesd/run',
+ perms=0555),
+ 'start': runit_start,
+ 'stop': runit_stop,
+ },
+ ])
+ manager.manage()
+ """
+ self.services = {}
+ for service in services or []:
+ service_name = service['service']
+ self.services[service_name] = service
+
+ def manage(self):
+ """
+ Handle the current hook by doing The Right Thing with the registered services.
+ """
+ hook_name = os.path.basename(sys.argv[0])
+ if hook_name == 'stop':
+ self.stop_services()
+ else:
+ self.reconfigure_services()
+
+ def reconfigure_services(self, *service_names):
+ """
+ Update all files for one or more registered services, and,
+ if ready, optionally restart them.
+
+ If no service names are given, reconfigures all registered services.
+ """
+ for service_name in service_names or self.services.keys():
+ if self.is_ready(service_name):
+ self.fire_event('data_ready', service_name)
+ self.fire_event('start', service_name, default=[
+ host.service_restart,
+ open_ports])
+ self.save_ready(service_name)
+ else:
+ if self.was_ready(service_name):
+ self.fire_event('data_lost', service_name)
+ self.fire_event('stop', service_name, default=[
+ close_ports,
+ host.service_stop])
+ self.save_lost(service_name)
+
+ def stop_services(self, *service_names):
+ """
+ Stop one or more registered services, by name.
+
+ If no service names are given, stops all registered services.
+ """
+ for service_name in service_names or self.services.keys():
+ self.fire_event('stop', service_name, default=[
+ close_ports,
+ host.service_stop])
+
+ def get_service(self, service_name):
+ """
+ Given the name of a registered service, return its service definition.
+ """
+ service = self.services.get(service_name)
+ if not service:
+ raise KeyError('Service not registered: %s' % service_name)
+ return service
+
+ def fire_event(self, event_name, service_name, default=None):
+ """
+ Fire a data_ready, data_lost, start, or stop event on a given service.
+ """
+ service = self.get_service(service_name)
+ callbacks = service.get(event_name, default)
+ if not callbacks:
+ return
+ if not isinstance(callbacks, Iterable):
+ callbacks = [callbacks]
+ for callback in callbacks:
+ if isinstance(callback, ManagerCallback):
+ callback(self, service_name, event_name)
+ else:
+ callback(service_name)
+
+ def is_ready(self, service_name):
+ """
+ Determine if a registered service is ready, by checking its 'required_data'.
+
+ A 'required_data' item can be any mapping type, and is considered ready
+ if `bool(item)` evaluates as True.
+ """
+ service = self.get_service(service_name)
+ reqs = service.get('required_data', [])
+ return all(bool(req) for req in reqs)
+
+ def save_ready(self, service_name):
+ """
+ Save an indicator that the given service is now data_ready.
+ """
+ ready_file = '{}/.ready.{}'.format(hookenv.charm_dir(), service_name)
+ with open(ready_file, 'a'):
+ pass
+
+ def save_lost(self, service_name):
+ """
+ Save an indicator that the given service is no longer data_ready.
+ """
+ ready_file = '{}/.ready.{}'.format(hookenv.charm_dir(), service_name)
+ if os.path.exists(ready_file):
+ os.remove(ready_file)
+
+ def was_ready(self, service_name):
+ """
+ Determine if the given service was previously data_ready.
+ """
+ ready_file = '{}/.ready.{}'.format(hookenv.charm_dir(), service_name)
+ return os.path.exists(ready_file)
+
+
+class RelationContext(dict):
+ """
+ Base class for a context generator that gets relation data from juju.
+
+ Subclasses must provide `interface`, which is the interface type of interest,
+ and `required_keys`, which is the set of keys required for the relation to
+ be considered complete. The first relation for the interface that is complete
+ will be used to populate the data for template.
+
+ The generated context will be namespaced under the interface type, to prevent
+ potential naming conflicts.
+ """
+ interface = None
+ required_keys = []
+
+ def __init__(self, *args, **kwargs):
+ super(RelationContext, self).__init__(*args, **kwargs)
+ self.get_data()
+
+ def __bool__(self):
+ """
+ Returns True if all of the required_keys are available.
+ """
+ return self.is_ready()
+
+ __nonzero__ = __bool__
+
+ def __repr__(self):
+ return super(RelationContext, self).__repr__()
+
+ def is_ready(self):
+ """
+ Returns True if all of the `required_keys` are available from any units.
+ """
+ ready = len(self.get(self.interface, [])) > 0
+ if not ready:
+ hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
+ return ready
+
+ def _is_ready(self, unit_data):
+ """
+ Helper method that tests a set of relation data and returns True if
+ all of the `required_keys` are present.
+ """
+ return set(unit_data.keys()).issuperset(set(self.required_keys))
+
+ def get_data(self):
+ """
+ Retrieve the relation data for each unit involved in a realtion and,
+ if complete, store it in a list under `self[self.interface]`. This
+ is automatically called when the RelationContext is instantiated.
+
+ The units are sorted lexographically first by the service ID, then by
+ the unit ID. Thus, if an interface has two other services, 'db:1'
+ and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
+ and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
+ set of data, the relation data for the units will be stored in the
+ order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
+
+ If you only care about a single unit on the relation, you can just
+ access it as `{{ interface[0]['key'] }}`. However, if you can at all
+ support multiple units on a relation, you should iterate over the list,
+ like:
+
+ {% for unit in interface -%}
+ {{ unit['key'] }}{% if not loop.last %},{% endif %}
+ {%- endfor %}
+
+ Note that since all sets of relation data from all related services and
+ units are in a single list, if you need to know which service or unit a
+ set of data came from, you'll need to extend this class to preserve
+ that information.
+ """
+ if not hookenv.relation_ids(self.interface):
+ return
+
+ ns = self.setdefault(self.interface, [])
+ for rid in sorted(hookenv.relation_ids(self.interface)):
+ for unit in sorted(hookenv.related_units(rid)):
+ reldata = hookenv.relation_get(rid=rid, unit=unit)
+ if self._is_ready(reldata):
+ ns.append(reldata)
+
+
+class ManagerCallback(object):
+ """
+ Special case of a callback that takes the `ServiceManager` instance
+ in addition to the service name.
+
+ Subclasses should implement `__call__` which should accept two parameters:
+
+ * `manager` The `ServiceManager` instance
+ * `service_name` The name of the service it's being triggered for
+ * `event_name` The name of the event that this callback is handling
+ """
+ def __call__(self, manager, service_name, event_name):
+ raise NotImplementedError()
+
+
+class TemplateCallback(ManagerCallback):
+ """
+ Callback class that will render a template, for use as a ready action.
+
+ The `target` param, if omitted, will default to `/etc/init/<service name>`.
+ """
+ def __init__(self, source, target, owner='root', group='root', perms=0444):
+ self.source = source
+ self.target = target
+ self.owner = owner
+ self.group = group
+ self.perms = perms
+
+ def __call__(self, manager, service_name, event_name):
+ service = manager.get_service(service_name)
+ context = {}
+ for ctx in service.get('required_data', []):
+ context.update(ctx)
+ templating.render(self.source, self.target, context,
+ self.owner, self.group, self.perms)
+
+
+class PortManagerCallback(ManagerCallback):
+ """
+ Callback class that will open or close ports, for use as either
+ a start or stop action.
+ """
+ def __call__(self, manager, service_name, event_name):
+ service = manager.get_service(service_name)
+ for port in service.get('ports', []):
+ if event_name == 'start':
+ hookenv.open_port(port)
+ elif event_name == 'stop':
+ hookenv.close_port(port)
+
+
+# Convenience aliases
+render_template = template = TemplateCallback
+open_ports = PortManagerCallback()
+close_ports = PortManagerCallback()
=== added file 'hooks/charmhelpers/core/templating.py'
--- hooks/charmhelpers/core/templating.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/core/templating.py 2014-06-10 15:56:14 +0000
@@ -0,0 +1,51 @@
+import os
+
+from charmhelpers.core import host
+from charmhelpers.core import hookenv
+
+
+def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None):
+ """
+ Render a template.
+
+ The `source` path, if not absolute, is relative to the `templates_dir`.
+
+ The `target` path should be absolute.
+
+ The context should be a dict containing the values to be replaced in the
+ template.
+
+ The `owner`, `group`, and `perms` options will be passed to `write_file`.
+
+ If omitted, `templates_dir` defaults to the `templates` folder in the charm.
+
+ Note: Using this requires python-jinja2; if it is not installed, calling
+ this will attempt to use charmhelpers.fetch.apt_install to install it.
+ """
+ try:
+ from jinja2 import FileSystemLoader, Environment, exceptions
+ except ImportError:
+ try:
+ from charmhelpers.fetch import apt_install
+ except ImportError:
+ hookenv.log('Could not import jinja2, and could not import '
+ 'charmhelpers.fetch to install it',
+ level=hookenv.ERROR)
+ raise
+ apt_install('python-jinja2', fatal=True)
+ from jinja2 import FileSystemLoader, Environment, exceptions
+
+ if templates_dir is None:
+ templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
+ loader = Environment(loader=FileSystemLoader(templates_dir))
+ try:
+ source = source
+ template = loader.get_template(source)
+ except exceptions.TemplateNotFound as e:
+ hookenv.log('Could not load template %s from %s.' %
+ (source, templates_dir),
+ level=hookenv.ERROR)
+ raise e
+ content = template.render(context)
+ host.mkdir(os.path.dirname(target))
+ host.write_file(target, content, owner, group, perms)
=== modified file 'hooks/charmhelpers/fetch/__init__.py'
--- hooks/charmhelpers/fetch/__init__.py 2014-05-14 16:40:09 +0000
+++ hooks/charmhelpers/fetch/__init__.py 2014-06-10 15:56:14 +0000
@@ -1,4 +1,5 @@
import importlib
+import time
from yaml import safe_load
from charmhelpers.core.host import (
lsb_release
@@ -15,6 +16,7 @@
import apt_pkg
import os
+
CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
"""
@@ -56,10 +58,62 @@
'precise-proposed/icehouse': 'precise-proposed/icehouse',
}
+# The order of this list is very important. Handlers should be listed in from
+# least- to most-specific URL matching.
+FETCH_HANDLERS = (
+ 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
+ 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
+)
+
+APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
+APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks.
+APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times.
+
+
+class SourceConfigError(Exception):
+ pass
+
+
+class UnhandledSource(Exception):
+ pass
+
+
+class AptLockError(Exception):
+ pass
+
+
+class BaseFetchHandler(object):
+
+ """Base class for FetchHandler implementations in fetch plugins"""
+
+ def can_handle(self, source):
+ """Returns True if the source can be handled. Otherwise returns
+ a string explaining why it cannot"""
+ return "Wrong source type"
+
+ def install(self, source):
+ """Try to download and unpack the source. Return the path to the
+ unpacked files or raise UnhandledSource."""
+ raise UnhandledSource("Wrong source type {}".format(source))
+
+ def parse_url(self, url):
+ return urlparse(url)
+
+ def base_url(self, url):
+ """Return url without querystring or fragment"""
+ parts = list(self.parse_url(url))
+ parts[4:] = ['' for i in parts[4:]]
+ return urlunparse(parts)
+
def filter_installed_packages(packages):
"""Returns a list of packages that require installation"""
apt_pkg.init()
+
+ # Tell apt to build an in-memory cache to prevent race conditions (if
+ # another process is already building the cache).
+ apt_pkg.config.set("Dir::Cache::pkgcache", "")
+
cache = apt_pkg.Cache()
_pkgs = []
for package in packages:
@@ -87,14 +141,7 @@
cmd.extend(packages)
log("Installing {} with options: {}".format(packages,
options))
- env = os.environ.copy()
- if 'DEBIAN_FRONTEND' not in env:
- env['DEBIAN_FRONTEND'] = 'noninteractive'
-
- if fatal:
- subprocess.check_call(cmd, env=env)
- else:
- subprocess.call(cmd, env=env)
+ _run_apt_command(cmd, fatal)
def apt_upgrade(options=None, fatal=False, dist=False):
@@ -109,24 +156,13 @@
else:
cmd.append('upgrade')
log("Upgrading with options: {}".format(options))
-
- env = os.environ.copy()
- if 'DEBIAN_FRONTEND' not in env:
- env['DEBIAN_FRONTEND'] = 'noninteractive'
-
- if fatal:
- subprocess.check_call(cmd, env=env)
- else:
- subprocess.call(cmd, env=env)
+ _run_apt_command(cmd, fatal)
def apt_update(fatal=False):
"""Update local apt cache"""
cmd = ['apt-get', 'update']
- if fatal:
- subprocess.check_call(cmd)
- else:
- subprocess.call(cmd)
+ _run_apt_command(cmd, fatal)
def apt_purge(packages, fatal=False):
@@ -137,10 +173,7 @@
else:
cmd.extend(packages)
log("Purging {}".format(packages))
- if fatal:
- subprocess.check_call(cmd)
- else:
- subprocess.call(cmd)
+ _run_apt_command(cmd, fatal)
def apt_hold(packages, fatal=False):
@@ -151,6 +184,7 @@
else:
cmd.extend(packages)
log("Holding {}".format(packages))
+
if fatal:
subprocess.check_call(cmd)
else:
@@ -188,10 +222,6 @@
key])
-class SourceConfigError(Exception):
- pass
-
-
def configure_sources(update=False,
sources_var='install_sources',
keys_var='install_keys'):
@@ -224,17 +254,6 @@
if update:
apt_update(fatal=True)
-# The order of this list is very important. Handlers should be listed in from
-# least- to most-specific URL matching.
-FETCH_HANDLERS = (
- 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
- 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
-)
-
-
-class UnhandledSource(Exception):
- pass
-
def install_remote(source):
"""
@@ -265,30 +284,6 @@
return install_remote(source)
-class BaseFetchHandler(object):
-
- """Base class for FetchHandler implementations in fetch plugins"""
-
- def can_handle(self, source):
- """Returns True if the source can be handled. Otherwise returns
- a string explaining why it cannot"""
- return "Wrong source type"
-
- def install(self, source):
- """Try to download and unpack the source. Return the path to the
- unpacked files or raise UnhandledSource."""
- raise UnhandledSource("Wrong source type {}".format(source))
-
- def parse_url(self, url):
- return urlparse(url)
-
- def base_url(self, url):
- """Return url without querystring or fragment"""
- parts = list(self.parse_url(url))
- parts[4:] = ['' for i in parts[4:]]
- return urlunparse(parts)
-
-
def plugins(fetch_handlers=None):
if not fetch_handlers:
fetch_handlers = FETCH_HANDLERS
@@ -306,3 +301,40 @@
log("FetchHandler {} not found, skipping plugin".format(
handler_name))
return plugin_list
+
+
+def _run_apt_command(cmd, fatal=False):
+ """
+ Run an APT command, checking output and retrying if the fatal flag is set
+ to True.
+
+ :param: cmd: str: The apt command to run.
+ :param: fatal: bool: Whether the command's output should be checked and
+ retried.
+ """
+ env = os.environ.copy()
+
+ if 'DEBIAN_FRONTEND' not in env:
+ env['DEBIAN_FRONTEND'] = 'noninteractive'
+
+ if fatal:
+ retry_count = 0
+ result = None
+
+ # If the command is considered "fatal", we need to retry if the apt
+ # lock was not acquired.
+
+ while result is None or result == APT_NO_LOCK:
+ try:
+ result = subprocess.check_call(cmd, env=env)
+ except subprocess.CalledProcessError, e:
+ retry_count = retry_count + 1
+ if retry_count > APT_NO_LOCK_RETRY_COUNT:
+ raise
+ result = e.returncode
+ log("Couldn't acquire DPKG lock. Will retry in {} seconds."
+ "".format(APT_NO_LOCK_RETRY_DELAY))
+ time.sleep(APT_NO_LOCK_RETRY_DELAY)
+
+ else:
+ subprocess.call(cmd, env=env)
=== modified file 'hooks/config-changed'
--- hooks/config-changed 2014-05-14 16:40:09 +0000
+++ hooks/config-changed 2014-06-10 15:56:14 +0000
@@ -1,2 +1,5 @@
-#!/bin/bash
-# config-changed occurs everytime a new configuration value is updated (juju set)
+#!/usr/bin/env python
+from charmhelpers.core import services
+import config
+manager = services.ServiceManager(config.SERVICES)
+manager.manage()
=== added file 'hooks/config.py'
--- hooks/config.py 1970-01-01 00:00:00 +0000
+++ hooks/config.py 2014-06-10 15:56:14 +0000
@@ -0,0 +1,80 @@
+from charmhelpers.core import services
+from charmhelpers.contrib.cloudfoundry import contexts
+
+HM9K_PACKAGES = ['python-jinja2', 'cfhm9000']
+
+HM_DIR = '/var/lib/cloudfoundry/cfhm9000'
+WORKSPACE_DIR = '/var/lib/cloudfoundry/hm-workspace'
+
+hm_relations = [contexts.NatsRelation(),
+ contexts.EtcdRelation(),
+ contexts.CloudControllerRelation()]
+
+SERVICES = [
+ {
+ 'service': 'cf-hm9k-fetcher',
+ 'required_data': hm_relations,
+ 'data_ready': [
+ services.template(source='hm9000.json',
+ target=HM_DIR + '/config/hm9000.json'),
+ services.template(source='cf-hm9k-fetcher.conf',
+ target='/etc/init/cf-hm9k-fetcher.conf'),
+ ],
+ },
+ {
+ 'service': 'cf-hm9k-listener',
+ 'required_data': hm_relations,
+ 'data_ready': [
+ services.template(source='cf-hm9k-listener.conf',
+ target='/etc/init/cf-hm9k-listener.conf'),
+ ],
+ },
+ {
+ 'service': 'cf-hm9k-analyzer',
+ 'required_data': hm_relations,
+ 'data_ready': [
+ services.template(source='cf-hm9k-analyzer.conf',
+ target='/etc/init/cf-hm9k-analyzer.conf'),
+ ],
+ },
+ {
+ 'service': 'cf-hm9k-sender',
+ 'required_data': hm_relations,
+ 'data_ready': [
+ services.template(source='cf-hm9k-sender.conf',
+ target='/etc/init/cf-hm9k-sender.conf'),
+ ],
+ },
+ {
+ 'service': 'cf-hm9k-metrics-server',
+ 'required_data': hm_relations,
+ 'data_ready': [
+ services.template(source='cf-hm9k-metrics-server.conf',
+ target='/etc/init/cf-hm9k-metrics-server.conf'),
+ ],
+ },
+ {
+ 'service': 'cf-hm9k-api-server',
+ 'required_data': hm_relations,
+ 'data_ready': [
+ services.template(source='cf-hm9k-api-server.conf',
+ target='/etc/init/cf-hm9k-api-server.conf'),
+ ],
+ },
+ {
+ 'service': 'cf-hm9k-evacuator',
+ 'required_data': hm_relations,
+ 'data_ready': [
+ services.template(source='cf-hm9k-evacuator.conf',
+ target='/etc/init/cf-hm9k-evacuator.conf'),
+ ],
+ },
+ {
+ 'service': 'cf-hm9k-shredder',
+ 'required_data': hm_relations,
+ 'data_ready': [
+ services.template(source='cf-hm9k-shredder.conf',
+ target='/etc/init/cf-hm9k-shredder.conf'),
+ ],
+ },
+]
=== added file 'hooks/etcd-relation-changed'
--- hooks/etcd-relation-changed 1970-01-01 00:00:00 +0000
+++ hooks/etcd-relation-changed 2014-06-10 15:56:14 +0000
@@ -0,0 +1,5 @@
+#!/usr/bin/env python
+from charmhelpers.core import services
+import config
+manager = services.ServiceManager(config.SERVICES)
+manager.manage()
=== modified file 'hooks/install'
--- hooks/install 2014-05-14 16:40:09 +0000
+++ hooks/install 2014-06-10 15:56:14 +0000
@@ -1,8 +1,43 @@
-#!/bin/bash
-# Here do anything needed to install the service
-# i.e. apt-get install -y foo or bzr branch http://myserver/mycode /srv/webroot
-# Make sure this hook exits cleanly and is idempotent, common problems here are
-# failing to account for a debconf question on a dependency, or trying to pull
-# from github without installing git first.
-
-apt-get install -y cf-hm9000
+#!/usr/bin/env python
+# vim: et ai ts=4 sw=4:
+
+import os
+import subprocess
+
+from charmhelpers.core import host
+from charmhelpers.core import hookenv
+from charmhelpers.contrib.cloudfoundry.common import (
+ prepare_cloudfoundry_environment
+)
+
+import config
+
+CHARM_DIR = hookenv.charm_dir()
+
+
+def install():
+ prepare_cloudfoundry_environment(hookenv.config(), config.HM9K_PACKAGES)
+ install_from_source()
+
+
+def install_from_source():
+ subprocess.check_call([
+ 'git', 'clone',
+ 'https://github.com/cloudfoundry/hm-workspace', config.WORKSPACE_DIR])
+ host.mkdir(config.WORKSPACE_DIR + '/bin')
+ with host.chdir(config.WORKSPACE_DIR):
+ subprocess.check_call(['git', 'submodule', 'update', '--init'])
+ with host.chdir(config.WORKSPACE_DIR + '/src/github.com/cloudfoundry/hm9000'):
+ subprocess.check_call(['go', 'install', '.'],
+ env={'GOPATH': config.WORKSPACE_DIR})
+
+
+def install_from_charm():
+ host.copy_file(
+ os.path.join(hookenv.charm_dir(), 'files/hm9000'),
+ config.WORKSPACE_DIR + '/bin/hm9000',
+ owner='vcap', group='vcap', perms=0555)
+
+
+if __name__ == '__main__':
+ install()
=== added file 'hooks/metrics-relation-changed'
--- hooks/metrics-relation-changed 1970-01-01 00:00:00 +0000
+++ hooks/metrics-relation-changed 2014-06-10 15:56:14 +0000
@@ -0,0 +1,5 @@
+#!/usr/bin/env python
+from charmhelpers.core import services
+import config
+manager = services.ServiceManager(config.SERVICES)
+manager.manage()
=== added file 'hooks/nats-relation-changed'
--- hooks/nats-relation-changed 1970-01-01 00:00:00 +0000
+++ hooks/nats-relation-changed 2014-06-10 15:56:14 +0000
@@ -0,0 +1,5 @@
+#!/usr/bin/env python
+from charmhelpers.core import services
+import config
+manager = services.ServiceManager(config.SERVICES)
+manager.manage()
=== removed file 'hooks/relation-name-relation-broken'
--- hooks/relation-name-relation-broken 2014-05-14 16:40:09 +0000
+++ hooks/relation-name-relation-broken 1970-01-01 00:00:00 +0000
@@ -1,2 +0,0 @@
-#!/bin/sh
-# This hook runs when the full relation is removed (not just a single member)
=== removed file 'hooks/relation-name-relation-changed'
--- hooks/relation-name-relation-changed 2014-05-14 16:40:09 +0000
+++ hooks/relation-name-relation-changed 1970-01-01 00:00:00 +0000
@@ -1,9 +0,0 @@
-#!/bin/bash
-# This must be renamed to the name of the relation. The goal here is to
-# affect any change needed by relationships being formed, modified, or broken
-# This script should be idempotent.
-juju-log $JUJU_REMOTE_UNIT modified its settings
-juju-log Relation settings:
-relation-get
-juju-log Relation members:
-relation-list
=== removed file 'hooks/relation-name-relation-departed'
--- hooks/relation-name-relation-departed 2014-05-14 16:40:09 +0000
+++ hooks/relation-name-relation-departed 1970-01-01 00:00:00 +0000
@@ -1,5 +0,0 @@
-#!/bin/sh
-# This must be renamed to the name of the relation. The goal here is to
-# affect any change needed by the remote unit leaving the relationship.
-# This script should be idempotent.
-juju-log $JUJU_REMOTE_UNIT departed
=== removed file 'hooks/relation-name-relation-joined'
--- hooks/relation-name-relation-joined 2014-05-14 16:40:09 +0000
+++ hooks/relation-name-relation-joined 1970-01-01 00:00:00 +0000
@@ -1,5 +0,0 @@
-#!/bin/sh
-# This must be renamed to the name of the relation. The goal here is to
-# affect any change needed by relationships being formed
-# This script should be idempotent.
-juju-log $JUJU_REMOTE_UNIT joined
=== modified file 'hooks/start'
--- hooks/start 2014-05-14 16:40:09 +0000
+++ hooks/start 2014-06-10 15:56:14 +0000
@@ -1,4 +1,5 @@
-#!/bin/bash
-# Here put anything that is needed to start the service.
-# Note that currently this is run directly after install
-# i.e. 'service apache2 start'
+#!/usr/bin/env python
+from charmhelpers.core import services
+import config
+manager = services.ServiceManager(config.SERVICES)
+manager.manage()
=== modified file 'hooks/stop'
--- hooks/stop 2014-05-14 16:40:09 +0000
+++ hooks/stop 2014-06-10 15:56:14 +0000
@@ -1,7 +1,5 @@
-#!/bin/bash
-# This will be run when the service is being torn down, allowing you to disable
-# it in various ways..
-# For example, if your web app uses a text file to signal to the load balancer
-# that it is live... you could remove it and sleep for a bit to allow the load
-# balancer to stop sending traffic.
-# rm /srv/webroot/server-live.txt && sleep 30
+#!/usr/bin/env python
+from charmhelpers.core import services
+import config
+manager = services.ServiceManager(config.SERVICES)
+manager.manage()
=== modified file 'hooks/upgrade-charm'
--- hooks/upgrade-charm 2014-05-14 16:40:09 +0000
+++ hooks/upgrade-charm 2014-06-10 15:56:14 +0000
@@ -1,6 +1,5 @@
-#!/bin/bash
-# This hook is executed each time a charm is upgraded after the new charm
-# contents have been unpacked
-# Best practice suggests you execute the hooks/install and
-# hooks/config-changed to ensure all updates are processed
-
+#!/usr/bin/env python
+from charmhelpers.core import services
+import config
+manager = services.ServiceManager(config.SERVICES)
+manager.manage()
=== modified file 'metadata.yaml'
--- metadata.yaml 2014-05-14 16:40:09 +0000
+++ metadata.yaml 2014-06-10 15:56:14 +0000
@@ -1,6 +1,6 @@
name: cf-hm9000
-summary: Whit Morriss <whit.morriss@xxxxxxxxxxxxx>
-maintainer: cloudfoundry-charmers
+summary: Health Monitor for Cloud Foundry
+maintainer: cf-charmers
description: |
Deploys the hm9000 health monitoring system for cloud foundry
categories:
@@ -9,13 +9,10 @@
provides:
metrics:
interface: http
- api:
- interface: nats
requires:
nats:
interface: nats
- storage:
- interface: etcd
-# peers:
-# peer-relation:
-# interface: interface-name
+ etcd:
+ interface: http
+ cc:
+ interface: controller
=== removed file 'notes.md'
--- notes.md 2014-05-14 16:40:09 +0000
+++ notes.md 1970-01-01 00:00:00 +0000
@@ -1,8 +0,0 @@
-# Notes
-
-## Relations
-
- - etcd
- - nats
-
-
=== added directory 'templates'
=== added file 'templates/cf-hm9k-analyzer.conf'
--- templates/cf-hm9k-analyzer.conf 1970-01-01 00:00:00 +0000
+++ templates/cf-hm9k-analyzer.conf 2014-06-10 15:56:14 +0000
@@ -0,0 +1,17 @@
+description "Cloud Foundry HM9000"
+author "cf-charmers <cf-charmers@xxxxxxxxxxxxxxxxxxx>"
+start on runlevel [2345]
+stop on runlevel [!2345]
+#expect daemon
+#apparmor load <profile-path>
+setuid vcap
+setgid vcap
+respawn
+respawn limit 10 5
+normal exit 0
+
+env GOPATH=/var/lib/cloudfoundry/hm-workspace/
+export GOPATH
+
+chdir /var/lib/cloudfoundry/hm-workspace
+exec ./bin/hm9000 analyze --poll --config=/var/lib/cloudfoundry/cfhm9000/config/hm9000.json
=== added file 'templates/cf-hm9k-api-server.conf'
--- templates/cf-hm9k-api-server.conf 1970-01-01 00:00:00 +0000
+++ templates/cf-hm9k-api-server.conf 2014-06-10 15:56:14 +0000
@@ -0,0 +1,17 @@
+description "Cloud Foundry HM9000"
+author "cf-charmers <cf-charmers@xxxxxxxxxxxxxxxxxxx>"
+start on runlevel [2345]
+stop on runlevel [!2345]
+#expect daemon
+#apparmor load <profile-path>
+setuid vcap
+setgid vcap
+respawn
+respawn limit 10 5
+normal exit 0
+
+env GOPATH=/var/lib/cloudfoundry/hm-workspace/
+export GOPATH
+
+chdir /var/lib/cloudfoundry/hm-workspace
+exec ./bin/hm9000 serve_api --config=/var/lib/cloudfoundry/cfhm9000/config/hm9000.json
=== added file 'templates/cf-hm9k-evacuator.conf'
--- templates/cf-hm9k-evacuator.conf 1970-01-01 00:00:00 +0000
+++ templates/cf-hm9k-evacuator.conf 2014-06-10 15:56:14 +0000
@@ -0,0 +1,17 @@
+description "Cloud Foundry HM9000"
+author "cf-charmers <cf-charmers@xxxxxxxxxxxxxxxxxxx>"
+start on runlevel [2345]
+stop on runlevel [!2345]
+#expect daemon
+#apparmor load <profile-path>
+setuid vcap
+setgid vcap
+respawn
+respawn limit 10 5
+normal exit 0
+
+env GOPATH=/var/lib/cloudfoundry/hm-workspace/
+export GOPATH
+
+chdir /var/lib/cloudfoundry/hm-workspace
+exec ./bin/hm9000 evacuator --config=/var/lib/cloudfoundry/cfhm9000/config/hm9000.json
=== added file 'templates/cf-hm9k-fetcher.conf'
--- templates/cf-hm9k-fetcher.conf 1970-01-01 00:00:00 +0000
+++ templates/cf-hm9k-fetcher.conf 2014-06-10 15:56:14 +0000
@@ -0,0 +1,17 @@
+description "Cloud Foundry HM9000"
+author "cf-charmers <cf-charmers@xxxxxxxxxxxxxxxxxxx>"
+start on runlevel [2345]
+stop on runlevel [!2345]
+#expect daemon
+#apparmor load <profile-path>
+setuid vcap
+setgid vcap
+respawn
+respawn limit 10 5
+normal exit 0
+
+env GOPATH=/var/lib/cloudfoundry/hm-workspace/
+export GOPATH
+
+chdir /var/lib/cloudfoundry/hm-workspace
+exec ./bin/hm9000 fetch_desired --poll --config=/var/lib/cloudfoundry/cfhm9000/config/hm9000.json
=== added file 'templates/cf-hm9k-listener.conf'
--- templates/cf-hm9k-listener.conf 1970-01-01 00:00:00 +0000
+++ templates/cf-hm9k-listener.conf 2014-06-10 15:56:14 +0000
@@ -0,0 +1,17 @@
+description "Cloud Foundry HM9000"
+author "cf-charmers <cf-charmers@xxxxxxxxxxxxxxxxxxx>"
+start on runlevel [2345]
+stop on runlevel [!2345]
+#expect daemon
+#apparmor load <profile-path>
+setuid vcap
+setgid vcap
+respawn
+respawn limit 10 5
+normal exit 0
+
+env GOPATH=/var/lib/cloudfoundry/hm-workspace/
+export GOPATH
+
+chdir /var/lib/cloudfoundry/hm-workspace
+exec ./bin/hm9000 listen --config=/var/lib/cloudfoundry/cfhm9000/config/hm9000.json
=== added file 'templates/cf-hm9k-metrics-server.conf'
--- templates/cf-hm9k-metrics-server.conf 1970-01-01 00:00:00 +0000
+++ templates/cf-hm9k-metrics-server.conf 2014-06-10 15:56:14 +0000
@@ -0,0 +1,17 @@
+description "Cloud Foundry HM9000"
+author "cf-charmers <cf-charmers@xxxxxxxxxxxxxxxxxxx>"
+start on runlevel [2345]
+stop on runlevel [!2345]
+#expect daemon
+#apparmor load <profile-path>
+setuid vcap
+setgid vcap
+respawn
+respawn limit 10 5
+normal exit 0
+
+env GOPATH=/var/lib/cloudfoundry/hm-workspace/
+export GOPATH
+
+chdir /var/lib/cloudfoundry/hm-workspace
+exec ./bin/hm9000 serve_metrics --config=/var/lib/cloudfoundry/cfhm9000/config/hm9000.json
=== added file 'templates/cf-hm9k-sender.conf'
--- templates/cf-hm9k-sender.conf 1970-01-01 00:00:00 +0000
+++ templates/cf-hm9k-sender.conf 2014-06-10 15:56:14 +0000
@@ -0,0 +1,17 @@
+description "Cloud Foundry HM9000"
+author "cf-charmers <cf-charmers@xxxxxxxxxxxxxxxxxxx>"
+start on runlevel [2345]
+stop on runlevel [!2345]
+#expect daemon
+#apparmor load <profile-path>
+setuid vcap
+setgid vcap
+respawn
+respawn limit 10 5
+normal exit 0
+
+env GOPATH=/var/lib/cloudfoundry/hm-workspace/
+export GOPATH
+
+chdir /var/lib/cloudfoundry/hm-workspace
+exec ./bin/hm9000 send --poll --config=/var/lib/cloudfoundry/cfhm9000/config/hm9000.json
=== added file 'templates/cf-hm9k-shredder.conf'
--- templates/cf-hm9k-shredder.conf 1970-01-01 00:00:00 +0000
+++ templates/cf-hm9k-shredder.conf 2014-06-10 15:56:14 +0000
@@ -0,0 +1,17 @@
+description "Cloud Foundry HM9000"
+author "cf-charmers <cf-charmers@xxxxxxxxxxxxxxxxxxx>"
+start on runlevel [2345]
+stop on runlevel [!2345]
+#expect daemon
+#apparmor load <profile-path>
+setuid vcap
+setgid vcap
+respawn
+respawn limit 10 5
+normal exit 0
+
+env GOPATH=/var/lib/cloudfoundry/hm-workspace/
+export GOPATH
+
+chdir /var/lib/cloudfoundry/hm-workspace
+exec ./bin/hm9000 shred --poll --config=/var/lib/cloudfoundry/cfhm9000/config/hm9000.json
=== added file 'templates/hm9000.json'
--- templates/hm9000.json 1970-01-01 00:00:00 +0000
+++ templates/hm9000.json 2014-06-10 15:56:14 +0000
@@ -0,0 +1,31 @@
+{
+ "heartbeat_period_in_seconds": 10,
+
+ "cc_auth_user": "{{cc[0]['user']}}",
+ "cc_auth_password": "{{cc[0]['password']}}",
+ "cc_base_url": "http://{{cc[0]['hostname']}}:{{cc[0]['port']}}",
+ "skip_cert_verify": true,
+ "desired_state_batch_size": 500,
+ "fetcher_network_timeout_in_seconds": 10,
+
+ "store_schema_version": 1,
+ "store_type": "etcd",
+ "store_urls": [
+ {% for unit in etcd -%}
+ "http://{{unit['hostname']}}:{{unit['port']}}"{% if not loop.last %},{% endif -%}
+ {%- endfor %}
+ ],
+
+ "metrics_server_port": 7879,
+ "metrics_server_user": "metrics_server_user",
+ "metrics_server_password": "canHazMetrics?",
+
+ "log_level": "INFO",
+
+ "nats": [{
+ "host": "{{nats[0]['address']}}",
+ "port": {{nats[0]['port']}},
+ "user": "{{nats[0]['user']}}",
+ "password": "{{nats[0]['password']}}"
+ }]
+}
References