← Back to team overview

bigdata-dev team mailing list archive

[Merge] lp:~bigdata-dev/charms/trusty/apache-zeppelin/trunk into lp:charms/trusty/apache-zeppelin

 

Cory Johns has proposed merging lp:~bigdata-dev/charms/trusty/apache-zeppelin/trunk into lp:charms/trusty/apache-zeppelin.

Requested reviews:
  charmers (charmers)

For more details, see:
https://code.launchpad.net/~bigdata-dev/charms/trusty/apache-zeppelin/trunk/+merge/273429

Remove trivial test in favor of bundle tests.
-- 
Your team Juju Big Data Development is subscribed to branch lp:~bigdata-dev/charms/trusty/apache-zeppelin/trunk.
=== removed directory 'tests'
=== removed file 'tests/00-setup'
--- tests/00-setup	2015-09-16 20:23:04 +0000
+++ tests/00-setup	1970-01-01 00:00:00 +0000
@@ -1,8 +0,0 @@
-#!/bin/bash
-
-if ! dpkg -s amulet &> /dev/null; then
-    echo Installing Amulet...
-    sudo add-apt-repository -y ppa:juju/stable
-    sudo apt-get update
-    sudo apt-get -y install amulet
-fi

=== removed file 'tests/100-deploy-spark-hdfs-yarn'
--- tests/100-deploy-spark-hdfs-yarn	2015-09-22 03:48:01 +0000
+++ tests/100-deploy-spark-hdfs-yarn	1970-01-01 00:00:00 +0000
@@ -1,62 +0,0 @@
-#!/usr/bin/env python3
-
-import unittest
-import amulet
-
-
-class TestDeploy(unittest.TestCase):
-    """
-    Deployment test for Apache Spark+Zeppelin using HDFS as shared storage
-    and YARN as cluster job manager.
-    """
-
-    @classmethod
-    def setUpClass(cls):
-        cls.d = amulet.Deployment(series='trusty')
-        # Deploy a hadoop cluster
-        cls.d.add('yarn-master', charm='cs:trusty/apache-hadoop-yarn-master')
-        cls.d.add('hdfs-master', charm='cs:trusty/apache-hadoop-hdfs-master')
-        cls.d.add('compute-slave', charm='cs:trusty/apache-hadoop-compute-slave', units=3)
-        cls.d.add('plugin', charm='cs:trusty/apache-hadoop-plugin')
-        cls.d.relate('yarn-master:namenode', 'hdfs-master:namenode')
-        cls.d.relate('compute-slave:nodemanager', 'yarn-master:nodemanager')
-        cls.d.relate('compute-slave:datanode', 'hdfs-master:datanode')
-        cls.d.relate('plugin:resourcemanager', 'yarn-master:resourcemanager')
-        cls.d.relate('plugin:namenode', 'hdfs-master:namenode')
-
-        # Add Spark Service
-        cls.d.add('spark', charm='cs:trusty/apache-spark')
-        cls.d.relate('spark:hadoop-plugin', 'plugin:hadoop-plugin')
-
-        # Add Apache Zeppelin
-        cls.d.add('zeppelin', charm='cs:trusty/apache-zeppelin')
-        cls.d.relate('zeppelin:spark', 'spark:spark')
-
-        cls.d.setup(timeout=3600)
-        cls.d.sentry.wait(timeout=3600)
-        cls.unit = cls.d.sentry.unit['spark/0']
-
-###########################################################################
-# Validate that the Spark HistoryServer is running
-###########################################################################
-    def test_spark_status(self):
-        o, c = self.unit.run("pgrep -a java | grep HistoryServer")
-        assert c == 0, "Spark HistoryServer not running"
-
-###########################################################################
-# Validate that the Zeppelin process is running
-###########################################################################
-    def test_zeppelin_status(self):
-        o, c = self.unit.run("pgrep -a java | grep zeppelin")
-        assert c == 0, "Zeppelin daemon not running"
-
-###########################################################################
-# Validate Spark commandline operation - run SparkPi
-###########################################################################
-    def test_spark_job(self):
-        o, c = self.unit.run("su ubuntu -c '/home/ubuntu/sparkpi.sh'")
-        assert c == 0, "SparkPi test failed: %s" % o
-
-
-if __name__ == '__main__':
-    unittest.main()

=== removed directory 'tests/remote'
=== removed file 'tests/remote/test_dist_config.py'
--- tests/remote/test_dist_config.py	2015-08-21 21:52:10 +0000
+++ tests/remote/test_dist_config.py	1970-01-01 00:00:00 +0000
@@ -1,72 +0,0 @@
-#!/usr/bin/env python
-
-import grp
-import os
-import pwd
-import unittest
-
-from charmhelpers.contrib import bigdata
-
-
-class TestDistConfig(unittest.TestCase):
-    """
-    Test that the ``dist.yaml`` settings were applied properly, such as users, groups, and dirs.
-
-    This is done as a remote test on the deployed unit rather than a regular
-    test under ``tests/`` because filling in the ``dist.yaml`` requires Juju
-    context (e.g., config).
-    """
-    @classmethod
-    def setUpClass(cls):
-        config = None
-        config_dir = os.environ['JUJU_CHARM_DIR']
-        config_file = 'dist.yaml'
-        if os.path.isfile(os.path.join(config_dir, config_file)):
-            config = os.path.join(config_dir, config_file)
-        if not config:
-            raise IOError('Could not find {} in {}'.format(config_file, config_dir))
-        reqs = ['vendor', 'hadoop_version', 'packages', 'groups', 'users',
-                'dirs', 'ports']
-        cls.dist_config = bigdata.utils.DistConfig(config, reqs)
-
-    def test_groups(self):
-        for name in self.dist_config.groups:
-            try:
-                grp.getgrnam(name)
-            except KeyError:
-                self.fail('Group {} is missing'.format(name))
-
-    def test_users(self):
-        for username, details in self.dist_config.users.items():
-            try:
-                user = pwd.getpwnam(username)
-            except KeyError:
-                self.fail('User {} is missing'.format(username))
-            for groupname in details['groups']:
-                try:
-                    group = grp.getgrnam(groupname)
-                except KeyError:
-                    self.fail('Group {} referenced by user {} does not exist'.format(
-                        groupname, username))
-                if group.gr_gid != user.pw_gid:
-                    self.assertIn(username, group.gr_mem, 'User {} not in group {}'.format(
-                        username, groupname))
-
-    def test_dirs(self):
-        for name, details in self.dist_config.dirs.items():
-            dirpath = self.dist_config.path(name)
-            self.assertTrue(dirpath.isdir(), 'Dir {} is missing'.format(name))
-            stat = dirpath.stat()
-            owner = pwd.getpwuid(stat.st_uid).pw_name
-            group = grp.getgrgid(stat.st_gid).gr_name
-            perms = stat.st_mode & ~0o40000
-            self.assertEqual(owner, details.get('owner', 'root'),
-                             'Dir {} ({}) has wrong owner: {}'.format(name, dirpath, owner))
-            self.assertEqual(group, details.get('group', 'root'),
-                             'Dir {} ({}) has wrong group: {}'.format(name, dirpath, group))
-            self.assertEqual(perms, details.get('perms', 0o755),
-                             'Dir {} ({}) has wrong perms: 0o{:o}'.format(name, dirpath, perms))
-
-
-if __name__ == '__main__':
-    unittest.main()

=== removed file 'tests/tests.yaml'
--- tests/tests.yaml	2015-06-26 16:46:56 +0000
+++ tests/tests.yaml	1970-01-01 00:00:00 +0000
@@ -1,10 +0,0 @@
-# Driver for bundletester: https://github.com/juju-solutions/bundletester
-#
-# It may be useful to alter the defaults during manual testing. For example,
-# set 'reset: false' to reuse existing charms instead of redeploying them.
-
-# Allow bootstrap of current env, default: true
-bootstrap: true
-
-# Use juju-deployer to reset env between test, default: true
-reset: true


Follow ups