← Back to team overview

bigdata-dev team mailing list archive

[Merge] lp:~bigdata-dev/charms/trusty/apache-hive/trunk into lp:charms/trusty/apache-hive

 

Cory Johns has proposed merging lp:~bigdata-dev/charms/trusty/apache-hive/trunk into lp:charms/trusty/apache-hive.

Requested reviews:
  charmers (charmers)

For more details, see:
https://code.launchpad.net/~bigdata-dev/charms/trusty/apache-hive/trunk/+merge/273615

Test cleanups for CWR, and moving binaries to S3
-- 
Your team Juju Big Data Development is subscribed to branch lp:~bigdata-dev/charms/trusty/apache-hive/trunk.
=== modified file 'README.md'
--- README.md	2015-08-24 23:23:36 +0000
+++ README.md	2015-10-06 20:46:56 +0000
@@ -84,7 +84,7 @@
 
 ## Contact Information
 
-- <bigdata-dev@xxxxxxxxxxxxxxxxxxx>
+- <bigdata@xxxxxxxxxxxxxxxx>
 
 
 ## Help

=== modified file 'resources.yaml'
--- resources.yaml	2015-08-24 23:23:36 +0000
+++ resources.yaml	2015-10-06 20:46:56 +0000
@@ -7,10 +7,10 @@
     pypi: jujubigdata>=4.0.0,<5.0.0
 optional_resources:
   hive-ppc64le:
-    url: https://git.launchpad.net/bigdata-data/plain/apache/ppc64le/apache-hive-0.13.0-bin.tar.gz?id=c34a21c939f5fce9ab89b95d65fe2df50e7bbab0
+    url: https://s3.amazonaws.com/jujubigdata/apache/ppc64le/apache-hive-0.13.0-bin-4c83564.tar.gz
     hash: 4c835644eb72a08df059b86c45fb159b95df08e831334cb57e24654ef078e7ee
     hash_type: sha256
   hive-x86_64:
-    url: https://git.launchpad.net/bigdata-data/plain/apache/x86_64/apache-hive-1.0.0-bin.tar.gz?id=c34a21c939f5fce9ab89b95d65fe2df50e7bbab0
+    url: https://s3.amazonaws.com/jujubigdata/apache/x86_64/apache-hive-1.0.0-bin-b8e121f.tar.gz
     hash: b8e121f435defeb94d810eb6867d2d1c27973e4a3b4099f2716dbffafb274184
     hash_type: sha256

=== removed file 'tests/00-setup'
--- tests/00-setup	2015-09-16 21:43:12 +0000
+++ tests/00-setup	1970-01-01 00:00:00 +0000
@@ -1,8 +0,0 @@
-#!/bin/bash
-
-if ! dpkg -s amulet &> /dev/null; then
-    echo Installing Amulet...
-    sudo add-apt-repository -y ppa:juju/stable
-    sudo apt-get update
-    sudo apt-get -y install amulet
-fi

=== added file 'tests/01-basic-deployment.py'
--- tests/01-basic-deployment.py	1970-01-01 00:00:00 +0000
+++ tests/01-basic-deployment.py	2015-10-06 20:46:56 +0000
@@ -0,0 +1,24 @@
+#!/usr/bin/env python3
+
+import unittest
+import amulet
+
+
+class TestDeploy(unittest.TestCase):
+    """
+    Trivial deployment test for Apache Hive.
+
+    This charm cannot do anything useful by itself, so integration testing
+    is done in the bundle.
+    """
+
+    def test_deploy(self):
+        self.d = amulet.Deployment(series='trusty')
+        self.d.add('hive', 'apache-hive')
+        self.d.setup(timeout=900)
+        self.d.sentry.wait(timeout=1800)
+        self.unit = self.d.sentry['hive'][0]
+
+
+if __name__ == '__main__':
+    unittest.main()

=== removed file 'tests/100-deploy-hive-mysql'
--- tests/100-deploy-hive-mysql	2015-09-17 14:17:43 +0000
+++ tests/100-deploy-hive-mysql	1970-01-01 00:00:00 +0000
@@ -1,100 +0,0 @@
-#!/usr/bin/env python3
-import unittest
-import amulet
-
-
-class TestDeploy(unittest.TestCase):
-    """
-    Deployment test for Apache Hive using HDFS as shared storage and YARN as
-    cluster job manager.
-    """
-
-    @classmethod
-    def setUpClass(cls):
-        cls.d = amulet.Deployment(series='trusty')
-        # Deploy a hadoop cluster
-        cls.d.add('yarn-master', charm='cs:trusty/apache-hadoop-yarn-master')
-        cls.d.add('hdfs-master', charm='cs:trusty/apache-hadoop-hdfs-master')
-        cls.d.add('compute-slave', charm='cs:trusty/apache-hadoop-compute-slave')
-        cls.d.add('plugin', charm='cs:trusty/apache-hadoop-plugin')
-        cls.d.relate('yarn-master:namenode', 'hdfs-master:namenode')
-        cls.d.relate('compute-slave:nodemanager', 'yarn-master:nodemanager')
-        cls.d.relate('compute-slave:datanode', 'hdfs-master:datanode')
-        cls.d.relate('plugin:resourcemanager', 'yarn-master:resourcemanager')
-        cls.d.relate('plugin:namenode', 'hdfs-master:namenode')
-
-        # Add MySQL service (hive needs binlog-format config)
-        cls.d.add('mysql', 'cs:trusty/mysql')
-        cls.d.configure('mysql', {'binlog-format': 'row'})
-
-        # Add Hive service
-        cls.d.add('hive', charm='cs:trusty/apache-hive')
-        cls.d.relate('hive:db', 'mysql:db')
-        cls.d.relate('hive:hadoop-plugin', 'plugin:hadoop-plugin')
-
-        cls.d.setup(timeout=3600)
-        cls.d.sentry.wait(timeout=3600)
-        cls.unit = cls.d.sentry.unit['hive/0']
-
-
-###########################################################################
-# Validate yarn mapreduce operation from the Hive node
-# 1) validate mapreduce execution - writing to hdfs
-###########################################################################
-    def test_yarn_mapreduce_exe1(self):
-        outdir = "/user/ubuntu/teragenout"
-        o, c = self.unit.run("su ubuntu -c 'hdfs dfs -rm -f -R %s &&"
-                             "hadoop jar /usr/lib/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-examples*.jar teragen 10000 %s'"
-                             % (outdir, outdir))
-        assert c == 0, "Teragen failed to execute: %s" % o
-
-###########################################################################
-# 2) validate successful mapreduce operation after the execution
-###########################################################################
-        o, c = self.unit.run("su hdfs -c 'hdfs dfs -ls %s/_SUCCESS'" % outdir)
-        assert c == 0, "Teragen executed, but expected output was not found: %s" % o
-
-###########################################################################
-# Validate mapreduce operation from Hive node - validates job chain operation
-# 1) validate mapreduce execution - reading and writing to hdfs
-###########################################################################
-    def test_yarn_mapreduce_exe2(self):
-        indir = "/user/ubuntu/teragenout"
-        outdir = "/user/ubuntu/terasortout"
-        o, c = self.unit.run("su ubuntu -c 'hdfs dfs -rm -f -R %s &&"
-                             "hadoop jar /usr/lib/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-examples*.jar terasort %s %s'"
-                             % (outdir, indir, outdir))
-        assert c == 0, "Terasort failed to execute: %s" % o
-
-###########################################################################
-# 2) validate a successful mapreduce operation after the execution
-###########################################################################
-        o, c = self.unit.run("su hdfs -c 'hdfs dfs -ls %s/_SUCCESS'" % outdir)
-        assert c == 0, "Terasort executed, but expected output was not found: %s" % o
-
-###########################################################################
-# Validate the service is running
-# Validate the JVM
-###########################################################################
-    def test_jvm_status(self):
-        o, c = self.unit.run("su hive -c 'pgrep -a java | grep HiveServer2'")
-        assert "HiveServer2" in o, "HiveServer2 not running"
-
-###########################################################################
-# Validate HIVE command line operation - create a HIVE table
-###########################################################################
-    def test_hive_create_tables(self):
-        o, c = self.unit.run("su hive -c \"hive -e 'create table test(col1 int, col2 string);' 2>&1\"")
-        assert c == 0, "Hive create table failed: %s" % o
-
-###########################################################################
-# Validate HIVE commandline operation - show HIVE tables
-###########################################################################
-    def test_hive_show_tables(self):
-        o, c = self.unit.run("su hive -c \"hive -e \'show tables;\' 2>&1\"")
-        # Look for the 'test' table from our previous test
-        assert "test" in o, "Hive show tables failed: %s" % o
-
-
-if __name__ == '__main__':
-    unittest.main()

=== removed directory 'tests/remote'
=== removed file 'tests/remote/test_dist_config.py'
--- tests/remote/test_dist_config.py	2015-08-21 21:51:45 +0000
+++ tests/remote/test_dist_config.py	1970-01-01 00:00:00 +0000
@@ -1,72 +0,0 @@
-#!/usr/bin/env python
-
-import grp
-import os
-import pwd
-import unittest
-
-import jujubigdata
-
-
-class TestDistConfig(unittest.TestCase):
-    """
-    Test that the ``dist.yaml`` settings were applied properly, such as users, groups, and dirs.
-
-    This is done as a remote test on the deployed unit rather than a regular
-    test under ``tests/`` because filling in the ``dist.yaml`` requires Juju
-    context (e.g., config).
-    """
-    @classmethod
-    def setUpClass(cls):
-        config = None
-        config_dir = os.environ['JUJU_CHARM_DIR']
-        config_file = 'dist.yaml'
-        if os.path.isfile(os.path.join(config_dir, config_file)):
-            config = os.path.join(config_dir, config_file)
-        if not config:
-            raise IOError('Could not find {} in {}'.format(config_file, config_dir))
-        reqs = ['vendor', 'hadoop_version', 'packages', 'groups', 'users',
-                'dirs', 'ports']
-        cls.dist_config = jujubigdata.utils.DistConfig(config, reqs)
-
-    def test_groups(self):
-        for name in self.dist_config.groups:
-            try:
-                grp.getgrnam(name)
-            except KeyError:
-                self.fail('Group {} is missing'.format(name))
-
-    def test_users(self):
-        for username, details in self.dist_config.users.items():
-            try:
-                user = pwd.getpwnam(username)
-            except KeyError:
-                self.fail('User {} is missing'.format(username))
-            for groupname in details['groups']:
-                try:
-                    group = grp.getgrnam(groupname)
-                except KeyError:
-                    self.fail('Group {} referenced by user {} does not exist'.format(
-                        groupname, username))
-                if group.gr_gid != user.pw_gid:
-                    self.assertIn(username, group.gr_mem, 'User {} not in group {}'.format(
-                        username, groupname))
-
-    def test_dirs(self):
-        for name, details in self.dist_config.dirs.items():
-            dirpath = self.dist_config.path(name)
-            self.assertTrue(dirpath.isdir(), 'Dir {} is missing'.format(name))
-            stat = dirpath.stat()
-            owner = pwd.getpwuid(stat.st_uid).pw_name
-            group = grp.getgrgid(stat.st_gid).gr_name
-            perms = stat.st_mode & ~0o40000
-            self.assertEqual(owner, details.get('owner', 'root'),
-                             'Dir {} ({}) has wrong owner: {}'.format(name, dirpath, owner))
-            self.assertEqual(group, details.get('group', 'root'),
-                             'Dir {} ({}) has wrong group: {}'.format(name, dirpath, group))
-            self.assertEqual(perms, details.get('perms', 0o755),
-                             'Dir {} ({}) has wrong perms: 0o{:o}'.format(name, dirpath, perms))
-
-
-if __name__ == '__main__':
-    unittest.main()

=== modified file 'tests/tests.yaml'
--- tests/tests.yaml	2015-06-25 15:00:26 +0000
+++ tests/tests.yaml	2015-10-06 20:46:56 +0000
@@ -1,10 +1,3 @@
-# Driver for bundletester: https://github.com/juju-solutions/bundletester
-#
-# It may be useful to alter the defaults during manual testing. For example,
-# set 'reset: false' to reuse existing charms instead of redeploying them.
-
-# Allow bootstrap of current env, default: true
-bootstrap: true
-
-# Use juju-deployer to reset env between test, default: true
-reset: true
+reset: false
+packages:
+  - amulet


Follow ups