← Back to team overview

bigdata-dev team mailing list archive

[Merge] lp:~bigdata-dev/charms/trusty/apache-hadoop-compute-slave/datanode-blocking into lp:~bigdata-dev/charms/trusty/apache-hadoop-compute-slave/trunk

 

Cory Johns has proposed merging lp:~bigdata-dev/charms/trusty/apache-hadoop-compute-slave/datanode-blocking into lp:~bigdata-dev/charms/trusty/apache-hadoop-compute-slave/trunk.

Requested reviews:
  Juju Big Data Development (bigdata-dev)

For more details, see:
https://code.launchpad.net/~bigdata-dev/charms/trusty/apache-hadoop-compute-slave/datanode-blocking/+merge/267597

Fixed datanode component being blocked by nodemanager component, and thus blocking hdfs-master on yarn-master
Fixed status reporting not being accurate when relations were removed
-- 
Your team Juju Big Data Development is requested to review the proposed merge of lp:~bigdata-dev/charms/trusty/apache-hadoop-compute-slave/datanode-blocking into lp:~bigdata-dev/charms/trusty/apache-hadoop-compute-slave/trunk.
=== modified file 'hooks/callbacks.py'
--- hooks/callbacks.py	2015-06-24 22:12:57 +0000
+++ hooks/callbacks.py	2015-08-10 23:08:18 +0000
@@ -24,37 +24,50 @@
 def update_blocked_status():
     if unitdata.kv().get('charm.active', False):
         return
-    rels = (
-        ('Yarn', 'ResourceManager', ResourceManagerMaster()),
+    rels = [
         ('HDFS', 'NameNode', NameNodeMaster()),
-    )
+    ]
     missing_rel = [rel for rel, res, impl in rels if not impl.connected_units()]
-    missing_hosts = [rel for rel, res, impl in rels if not impl.am_i_registered()]
-    not_ready = [(rel, res) for rel, res, impl in rels if not impl.is_ready()]
+    rels.append(('Yarn', 'ResourceManager', ResourceManagerMaster()))
+    not_ready = [(rel, res) for rel, res, impl in rels if impl.connected_units() and not impl.is_ready()]
+    missing_hosts = [rel for rel, res, impl in rels if impl.connected_units() and not impl.am_i_registered()]
     if missing_rel:
         hookenv.status_set('blocked', 'Waiting for relation to %s master%s' % (
             ' and '.join(missing_rel),
             's' if len(missing_rel) > 1 else '',
         )),
-    elif missing_hosts:
-        hookenv.status_set('waiting', 'Waiting for /etc/hosts registration on %s' % (
-            ' and '.join(missing_hosts),
-        ))
     elif not_ready:
         unready_rels, unready_ress = zip(*not_ready)
         hookenv.status_set('waiting', 'Waiting for %s to provide %s' % (
             ' and '.join(unready_rels),
             ' and '.join(unready_ress),
         ))
+    elif missing_hosts:
+        hookenv.status_set('waiting', 'Waiting for /etc/hosts registration on %s' % (
+            ' and '.join(missing_hosts),
+        ))
 
 
 def update_working_status():
     if unitdata.kv().get('charm.active', False):
         hookenv.status_set('maintenance', 'Updating configuration')
         return
-    hookenv.status_set('maintenance', 'Setting up NodeManager and DataNode')
+    yarn_connected = ResourceManagerMaster().connected_units()
+    hookenv.status_set('maintenance', 'Setting up DataNode%s' % (
+        ' and NodeManager' if yarn_connected else '',
+    ))
 
 
 def update_active_status():
-    unitdata.kv().set('charm.active', True)
-    hookenv.status_set('active', 'Ready')
+    hdfs_ready = NameNodeMaster().is_ready()
+    yarn_connected = ResourceManagerMaster().connected_units()
+    yarn_ready = ResourceManagerMaster().is_ready()
+    if hdfs_ready and (not yarn_connected or yarn_ready):
+        unitdata.kv().set('charm.active', True)
+        hookenv.status_set('active', 'Ready')
+    else:
+        update_blocked_status()
+
+
+def clear_active_flag():
+    unitdata.kv().set('charm.active', False)

=== modified file 'hooks/common.py'
--- hooks/common.py	2015-06-24 22:12:57 +0000
+++ hooks/common.py	2015-08-10 23:08:18 +0000
@@ -71,40 +71,61 @@
             ],
         },
         {
-            'name': 'compute-slave',
+            'name': 'datanode',
             'provides': [
                 jujubigdata.relations.DataNode(),
+            ],
+            'requires': [
+                hadoop.is_installed,
+                hdfs_relation,
+                hdfs_relation.am_i_registered,
+            ],
+            'callbacks': [
+                callbacks.update_working_status,
+                hdfs_relation.register_provided_hosts,
+                jujubigdata.utils.manage_etc_hosts,
+                hdfs_relation.install_ssh_keys,
+                hdfs.configure_datanode,
+                hdfs.start_datanode,
+                charmframework.helpers.open_ports(
+                    dist_config.exposed_ports('compute-slave-hdfs')),
+                callbacks.update_active_status,
+            ],
+            'cleanup': [
+                callbacks.clear_active_flag,
+                charmframework.helpers.close_ports(
+                    dist_config.exposed_ports('compute-slave-hdfs')),
+                hdfs.stop_datanode,
+                callbacks.update_blocked_status,
+            ],
+        },
+        {
+            'name': 'nodemanager',
+            'provides': [
                 jujubigdata.relations.NodeManager(),
             ],
             'requires': [
                 hadoop.is_installed,
-                hdfs_relation,
                 yarn_relation,
-                hdfs_relation.am_i_registered,
                 yarn_relation.am_i_registered,
             ],
             'callbacks': [
                 callbacks.update_working_status,
-                hdfs_relation.register_provided_hosts,
                 yarn_relation.register_provided_hosts,
                 jujubigdata.utils.manage_etc_hosts,
-                hdfs_relation.install_ssh_keys,
                 yarn_relation.install_ssh_keys,
-                hdfs.configure_datanode,
                 yarn.configure_nodemanager,
-                hdfs.start_datanode,
                 yarn.start_nodemanager,
                 charmframework.helpers.open_ports(
-                    dist_config.exposed_ports('compute-slave-hdfs') +
                     dist_config.exposed_ports('compute-slave-yarn')),
                 callbacks.update_active_status,
             ],
             'cleanup': [
+                callbacks.clear_active_flag,
                 charmframework.helpers.close_ports(
-                    dist_config.exposed_ports('compute-slave-hdfs') +
                     dist_config.exposed_ports('compute-slave-yarn')),
-                hdfs.stop_datanode,
                 yarn.stop_nodemanager,
+                callbacks.update_active_status,  # might still be active w/ for HDFS-only
             ],
         },
     ])

=== added file 'hooks/datanode-relation-departed'
--- hooks/datanode-relation-departed	1970-01-01 00:00:00 +0000
+++ hooks/datanode-relation-departed	2015-08-10 23:08:18 +0000
@@ -0,0 +1,16 @@
+#!/usr/bin/env python
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import common
+
+common.manage()

=== added file 'hooks/nodemanager-relation-departed'
--- hooks/nodemanager-relation-departed	1970-01-01 00:00:00 +0000
+++ hooks/nodemanager-relation-departed	2015-08-10 23:08:18 +0000
@@ -0,0 +1,16 @@
+#!/usr/bin/env python
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import common
+
+common.manage()


Follow ups