← Back to team overview

launchpad-reviewers team mailing list archive

[Merge] lp:~allenap/maas/database-run into lp:maas

 

Gavin Panella has proposed merging lp:~allenap/maas/database-run into lp:maas with lp:~allenap/maas/convert-enums-redux as a prerequisite.

Requested reviews:
  Launchpad code reviewers (launchpad-reviewers)

For more details, see:
https://code.launchpad.net/~allenap/maas/database-run/+merge/104759

This moves the management of the database cluster into a tested Python module. I attempted to use van.pg, but it wasn't quite suitable; the exposed API was too restrictive.

The database (including cluster) are also now managed as a service, but to support testing and other uses, it also allows direct use of the new bin/database command to launch a shell, get status, stop, or destroy the cluster. Each user of the cluster also takes out a lock on the cluster, so that requests to stop it or shut it down while in use are not followed.

-- 
https://code.launchpad.net/~allenap/maas/database-run/+merge/104759
Your team Launchpad code reviewers is requested to review the proposed merge of lp:~allenap/maas/database-run into lp:maas.
=== modified file 'Makefile'
--- Makefile	2012-05-04 15:16:51 +0000
+++ Makefile	2012-05-04 15:16:51 +0000
@@ -7,6 +7,7 @@
 
 build: \
     bin/buildout \
+    bin/database \
     bin/maas bin/test.maas \
     bin/twistd.pserv bin/test.pserv \
     bin/twistd.txlongpoll \
@@ -19,6 +20,10 @@
 	$(python) bootstrap.py --distribute --setup-source distribute_setup.py
 	@touch --no-create $@  # Ensure it's newer than its dependencies.
 
+bin/database: bin/buildout buildout.cfg versions.cfg setup.py
+	bin/buildout install database
+	@touch --no-create $@
+
 bin/maas: bin/buildout buildout.cfg versions.cfg setup.py $(js_enums)
 	bin/buildout install maas
 	@touch --no-create $@
@@ -51,9 +56,6 @@
 	bin/buildout install repl
 	@touch --no-create bin/py bin/ipy
 
-dev-db:
-	utilities/maasdb start ./db/ disposable
-
 test: bin/test.maas bin/test.pserv $(js_enums)
 	bin/test.maas
 	bin/test.pserv
@@ -97,7 +99,6 @@
 	$(RM) $(js_enums)
 
 distclean: clean stop
-	utilities/maasdb delete-cluster ./db/
 	$(RM) -r eggs develop-eggs
 	$(RM) -r bin build dist logs/* parts
 	$(RM) tags TAGS .installed.cfg
@@ -107,10 +108,13 @@
 	$(RM) -r run/* services/*/supervise
 	$(RM) twisted/plugins/dropin.cache
 
-harness: bin/maas dev-db
+harness: bin/maas services/database/@start
 	bin/maas shell --settings=maas.demo
 
-syncdb: bin/maas dev-db
+dbharness: bin/database
+	bin/database shell --preserve
+
+syncdb: bin/maas services/database/@start
 	bin/maas syncdb --noinput
 	bin/maas migrate maasserver --noinput
 	bin/maas migrate metadataserver --noinput
@@ -119,7 +123,7 @@
   build
   check
   clean
-  dev-db
+  dbharness
   distclean
   doc
   enums
@@ -136,7 +140,7 @@
 # Development services.
 #
 
-service_names := pserv reloader txlongpoll web webapp
+service_names := database pserv reloader txlongpoll web webapp
 services := $(patsubst %,services/%/,$(service_names))
 
 run:
@@ -209,6 +213,8 @@
 
 # Dependencies for individual services.
 
+services/database/@deps: bin/database
+
 services/pserv/@deps: bin/twistd.pserv
 
 services/reloader/@deps:
@@ -217,7 +223,7 @@
 
 services/web/@deps:
 
-services/webapp/@deps: bin/maas dev-db
+services/webapp/@deps: bin/maas
 
 #
 # Phony stuff.

=== modified file 'buildout.cfg'
--- buildout.cfg	2012-04-18 15:14:17 +0000
+++ buildout.cfg	2012-05-04 15:16:51 +0000
@@ -41,7 +41,6 @@
   # Convenient developer dependencies
   Jinja2
   Pygments
-  pyinotify
   Sphinx
   docutils
   lxml
@@ -53,8 +52,6 @@
 extra-paths =
   ${buildout:directory}/src
   ${buildout:directory}
-dev-eggs =
-  pyinotify
 test-eggs =
   coverage
   fixtures
@@ -66,19 +63,31 @@
   testresources
   testtools
 
+[database]
+recipe = z3c.recipe.scripts
+eggs =
+  fixtures
+  psycopg2
+extra-paths = ${common:extra-paths}
+interpreter =
+entry-points = database=maastesting.services.database:main
+
 [maas]
 recipe = zc.recipe.egg
 # avahi and dbus should be listed as eggs
 # but they don't have links on PyPI and that makes buildout really
 # unhappy. It refuses to see them, even if they are in site-packages :-(
 # We rely on them being installed through system packages instead.
-eggs =
-  ${common:dev-eggs}
-  ${common:test-eggs}
-  convoy
-  django
+dev-eggs =
   django-debug-toolbar
+test-eggs =
+  ${common:test-eggs}
   django-nose
+eggs =
+  ${maas:dev-eggs}
+  ${maas:test-eggs}
+  convoy
+  django
   django-piston
   docutils
   oauth
@@ -115,7 +124,6 @@
 [pserv]
 recipe = zc.recipe.egg
 eggs =
-  ${common:dev-eggs}
   formencode
   oops-datedir-repo
   oops-twisted

=== modified file 'qa/scripts/prepare-for-juju-with-vdenv'
--- qa/scripts/prepare-for-juju-with-vdenv	2012-03-22 12:31:16 +0000
+++ qa/scripts/prepare-for-juju-with-vdenv	2012-05-04 15:16:51 +0000
@@ -37,7 +37,7 @@
     --username "${LOGNAME}" --password test \
     --email "${LOGNAME}@example.com"
 bin/maas reconcile
-utilities/maasdb shell db <<'EOF'
+make dbharness <<'EOF'
 UPDATE maasserver_node
    SET owner_id = NULL, status = 4
  WHERE hostname LIKE 'odev-node%';

=== added directory 'services/database'
=== added file 'services/database/down'
=== added file 'services/database/run'
--- services/database/run	1970-01-01 00:00:00 +0000
+++ services/database/run	2012-05-04 15:16:51 +0000
@@ -0,0 +1,19 @@
+#!/usr/bin/env bash
+# Copyright 2012 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+# Exit immediately if a command exits with a non-zero status.
+set -o errexit
+# Treat unset variables as an error when substituting.
+set -o nounset
+
+# Move to the project root.
+cd "$(dirname "$0")/../.."
+
+# Start logging, if requested. Not using multilog here right now
+# because there are race issues when restarting.
+[ -z "${logdir:-}" ] || exec &>> "${logdir}/current"
+
+# Start the database.
+script="$(readlink -f bin/database)"
+exec "${script}" run --preserve

=== modified file 'src/maas/demo.py'
--- src/maas/demo.py	2012-04-18 10:51:03 +0000
+++ src/maas/demo.py	2012-05-04 15:16:51 +0000
@@ -11,7 +11,7 @@
 
 __metaclass__ = type
 
-import os
+from os.path import abspath
 
 from maas import (
     development,
@@ -27,7 +27,7 @@
 import_settings(settings)
 import_settings(development)
 
-MEDIA_ROOT = os.path.join(os.getcwd(), "media/demo")
+MEDIA_ROOT = abspath("media/demo")
 
 MIDDLEWARE_CLASSES += (
     'debug_toolbar.middleware.DebugToolbarMiddleware',
@@ -39,7 +39,7 @@
 # For demo purposes, use a real provisioning server.
 USE_REAL_PSERV = True
 
-MAAS_CLI = os.path.join(os.getcwd(), 'bin', 'maas')
+MAAS_CLI = abspath("bin/maas")
 
 RABBITMQ_PUBLISH = True
 

=== modified file 'src/maas/development.py'
--- src/maas/development.py	2012-04-16 10:00:51 +0000
+++ src/maas/development.py	2012-05-04 15:16:51 +0000
@@ -12,7 +12,7 @@
 __metaclass__ = type
 
 import logging
-import os
+from os.path import abspath
 
 from maas import (
     import_local_settings,
@@ -58,13 +58,13 @@
         'NAME': 'maas',
         # For PostgreSQL, a "hostname" starting with a slash indicates a
         # Unix socket directory.
-        'HOST': '%s/db' % os.getcwd(),
+        'HOST': abspath('db'),
     }
 }
 
 # Absolute filesystem path to the directory that will hold user-uploaded files.
 # Example: "/home/media/media.lawrence.com/media/"
-MEDIA_ROOT = os.path.join(os.getcwd(), "media/development")
+MEDIA_ROOT = abspath("media/development")
 
 INSTALLED_APPS += (
     'django.contrib.admin',

=== added file 'src/maasserver/management/commands/dbshell.py'
--- src/maasserver/management/commands/dbshell.py	1970-01-01 00:00:00 +0000
+++ src/maasserver/management/commands/dbshell.py	2012-05-04 15:16:51 +0000
@@ -0,0 +1,35 @@
+# Copyright 2012 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+"""Django command: start a database shell.
+
+Overrides the default implementation.
+"""
+
+from __future__ import (
+    absolute_import,
+    print_function,
+    unicode_literals,
+    )
+
+__metaclass__ = type
+__all__ = ['Command']
+
+from django.core.management.commands import dbshell
+from django.db import connections, DEFAULT_DB_ALIAS
+
+from maastesting.services.database import ClusterFixture
+
+
+class Command(dbshell.Command):
+    """Customized "dbshell" command."""
+
+    def handle(self, **options):
+        # Don't call up to Django's dbshell, because that ends up exec'ing the
+        # shell, preventing this from clearing down the fixture.
+        connection = connections[options.get('database', DEFAULT_DB_ALIAS)]
+        datadir = connection.settings_dict["HOST"]
+        with ClusterFixture(datadir, preserve=True) as cluster:
+            dbname = connection.settings_dict["NAME"]
+            cluster.createdb(dbname)
+            cluster.shell(dbname)

=== removed file 'src/maasserver/management/commands/deletedb.py'
--- src/maasserver/management/commands/deletedb.py	2012-04-16 10:00:51 +0000
+++ src/maasserver/management/commands/deletedb.py	1970-01-01 00:00:00 +0000
@@ -1,31 +0,0 @@
-# Copyright 2012 Canonical Ltd.  This software is licensed under the
-# GNU Affero General Public License version 3 (see the file LICENSE).
-
-"""Django command: stop and delete the local database cluster."""
-
-from __future__ import (
-    absolute_import,
-    print_function,
-    unicode_literals,
-    )
-
-__metaclass__ = type
-__all__ = ['Command']
-
-from subprocess import check_call
-
-from django.core.management.base import (
-    BaseCommand,
-    CommandError,
-    )
-
-
-class Command(BaseCommand):
-    """Stop and delete the local development database cluster."""
-
-    help = "Delete the development database cluster."
-
-    def handle(self, *args, **kwargs):
-        if len(args) != 0:
-            raise CommandError("Too many arguments.")
-        check_call(['utilities/maasdb', 'delete-cluster', 'db'])

=== removed file 'src/maasserver/management/commands/query.py'
--- src/maasserver/management/commands/query.py	2012-04-16 10:00:51 +0000
+++ src/maasserver/management/commands/query.py	1970-01-01 00:00:00 +0000
@@ -1,41 +0,0 @@
-# Copyright 2012 Canonical Ltd.  This software is licensed under the
-# GNU Affero General Public License version 3 (see the file LICENSE).
-
-"""Django command: access the development database directly in SQL."""
-
-from __future__ import (
-    absolute_import,
-    print_function,
-    unicode_literals,
-    )
-
-__metaclass__ = type
-__all__ = ['Command']
-
-from subprocess import check_call
-
-from django.core.management.base import (
-    BaseCommand,
-    CommandError,
-    )
-
-
-class Command(BaseCommand):
-    """Custom django command: access the local development database directly.
-
-    Executes an SQL statement given on the command line, or opens an SQL
-    shell if no statement was given.
-    """
-
-    args = "[SQL statement]"
-    help = "Access the database directly in SQL."
-
-    def handle(self, *args, **kwargs):
-        if len(args) > 1:
-            raise CommandError("Too many arguments.")
-        elif len(args) == 1:
-            subcommand = 'query'
-        else:
-            subcommand = 'shell'
-        check_call(
-            ['utilities/maasdb', subcommand, 'db'] + list(args))

=== modified file 'src/maastesting/runner.py'
--- src/maastesting/runner.py	2012-04-16 10:00:51 +0000
+++ src/maastesting/runner.py	2012-05-04 15:16:51 +0000
@@ -14,9 +14,9 @@
     "TestRunner",
     ]
 
-from subprocess import check_call
-
+from django.conf import settings
 from django_nose import NoseTestSuiteRunner
+from maastesting.services.database import ClusterFixture
 
 
 class TestRunner(NoseTestSuiteRunner):
@@ -24,5 +24,13 @@
 
     def setup_databases(self, *args, **kwargs):
         """Fire up the db cluster, then punt to original implementation."""
-        check_call(['utilities/maasdb', 'start', './db/', 'disposable'])
+        self.cluster = ClusterFixture("db", preserve=True)
+        self.cluster.setUp()
+        for database in settings.DATABASES.values():
+            if database["HOST"] == self.cluster.datadir:
+                self.cluster.createdb(database["NAME"])
         return super(TestRunner, self).setup_databases(*args, **kwargs)
+
+    def teardown_databases(self, *args, **kwargs):
+        super(TestRunner, self).teardown_databases(*args, **kwargs)
+        self.cluster.cleanUp()

=== added directory 'src/maastesting/services'
=== added file 'src/maastesting/services/__init__.py'
=== added file 'src/maastesting/services/database.py'
--- src/maastesting/services/database.py	1970-01-01 00:00:00 +0000
+++ src/maastesting/services/database.py	2012-05-04 15:16:51 +0000
@@ -0,0 +1,435 @@
+# Copyright 2012 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+"""Manage a PostgreSQL database service."""
+
+from __future__ import (
+    absolute_import,
+    print_function,
+    unicode_literals,
+    )
+
+__metaclass__ = type
+__all__ = [
+    "Cluster",
+    "ClusterFixture",
+    ]
+
+import argparse
+from contextlib import closing
+from errno import (
+    EEXIST,
+    ENOENT,
+    ENOTEMPTY,
+    )
+from itertools import imap
+from os import (
+    devnull,
+    environ,
+    fdopen,
+    getpid,
+    listdir,
+    makedirs,
+    path,
+    rmdir,
+    unlink,
+    )
+import pipes
+from shutil import rmtree
+import signal
+from subprocess import (
+    CalledProcessError,
+    check_call,
+    )
+import sys
+from time import sleep
+
+from fixtures import Fixture
+import psycopg2
+
+
+PG_VERSION = "9.1"
+PG_BIN = "/usr/lib/postgresql/%s/bin" % PG_VERSION
+
+
+def path_with_pg_bin(exe_path):
+    """Return `exe_path` with `PG_BIN` added."""
+    exe_path = [
+        elem for elem in exe_path.split(path.pathsep)
+        if len(elem) != 0 and not elem.isspace()
+        ]
+    if PG_BIN not in exe_path:
+        exe_path.insert(0, PG_BIN)
+    return path.pathsep.join(exe_path)
+
+
+class Cluster:
+    """Represents a PostgreSQL cluster, running or not."""
+
+    def __init__(self, datadir):
+        self.datadir = path.abspath(datadir)
+
+    def execute(self, *command, **options):
+        """Execute a command with an environment suitable for this cluster."""
+        env = options.pop("env", environ).copy()
+        env["PATH"] = path_with_pg_bin(env.get("PATH", ""))
+        env["PGDATA"] = env["PGHOST"] = self.datadir
+        check_call(command, env=env, **options)
+
+    @property
+    def exists(self):
+        """Whether or not this cluster exists on disk."""
+        version_file = path.join(self.datadir, "PG_VERSION")
+        return path.exists(version_file)
+
+    @property
+    def pidfile(self):
+        """The (expected) pidfile for a running cluster.
+
+        Does *not* guarantee that the pidfile exists.
+        """
+        return path.join(self.datadir, "postmaster.pid")
+
+    @property
+    def logfile(self):
+        """The log file used (or will be used) by this cluster."""
+        return path.join(self.datadir, "backend.log")
+
+    @property
+    def running(self):
+        """Whether this cluster is running or not."""
+        with open(devnull, "rb") as null:
+            try:
+                self.execute("pg_ctl", "status", stdout=null)
+            except CalledProcessError, error:
+                if error.returncode == 1:
+                    return False
+                else:
+                    raise
+            else:
+                return True
+
+    def create(self):
+        """Create this cluster, if it does not exist."""
+        if not self.exists:
+            if not path.isdir(self.datadir):
+                makedirs(self.datadir)
+            self.execute("pg_ctl", "init", "-s", "-o", "-E utf8 -A trust")
+
+    def start(self):
+        """Start this cluster, if it's not already started."""
+        if not self.running:
+            self.create()
+            # pg_ctl options:
+            #  -l <file> -- log file.
+            #  -s -- no informational messages.
+            #  -w -- wait until startup is complete.
+            # postgres options:
+            #  -h <arg> -- host name; empty arg means Unix socket only.
+            #  -F -- don't bother fsync'ing.
+            #  -k -- socket directory.
+            self.execute(
+                "pg_ctl", "start", "-l", self.logfile, "-s", "-w",
+                "-o", "-h '' -F -k %s" % pipes.quote(self.datadir))
+
+    def connect(self, database="template1", autocommit=True):
+        """Connect to this cluster.
+
+        Starts the cluster if necessary.
+        """
+        self.start()
+        connection = psycopg2.connect(
+            database=database, host=self.datadir)
+        connection.autocommit = autocommit
+        return connection
+
+    def shell(self, database="template1"):
+        self.execute("psql", "--quiet", "--", database)
+
+    @property
+    def databases(self):
+        """The names of databases in this cluster."""
+        with closing(self.connect("postgres")) as conn:
+            with closing(conn.cursor()) as cur:
+                cur.execute("SELECT datname FROM pg_catalog.pg_database")
+                return {name for (name,) in cur.fetchall()}
+
+    def createdb(self, name):
+        """Create the named database."""
+        with closing(self.connect()) as conn:
+            with closing(conn.cursor()) as cur:
+                cur.execute("CREATE DATABASE %s" % name)
+
+    def dropdb(self, name):
+        """Drop the named database."""
+        with closing(self.connect()) as conn:
+            with closing(conn.cursor()) as cur:
+                cur.execute("DROP DATABASE %s" % name)
+
+    def stop(self):
+        """Stop this cluster, if started."""
+        if self.running:
+            # pg_ctl options:
+            #  -w -- wait for shutdown to complete.
+            #  -m <mode> -- shutdown mode.
+            self.execute("pg_ctl", "stop", "-s", "-w", "-m", "fast")
+
+    def destroy(self):
+        """Destroy this cluster, if it exists.
+
+        The cluster will be stopped if it's started.
+        """
+        if self.exists:
+            self.stop()
+            rmtree(self.datadir)
+
+
+class ProcessSemaphore:
+    """A sort-of-semaphore where it is considered locked if a directory cannot
+    be removed.
+
+    The locks are taken out one per-process, so this is a way of keeping a
+    reference to a shared resource between processes.
+    """
+
+    def __init__(self, lockdir):
+        super(ProcessSemaphore, self).__init__()
+        self.lockdir = lockdir
+        self.lockfile = path.join(
+            self.lockdir, "%d" % getpid())
+
+    def acquire(self):
+        try:
+            makedirs(self.lockdir)
+        except OSError, error:
+            if error.errno != EEXIST:
+                raise
+        open(self.lockfile, "w").close()
+
+    def release(self):
+        try:
+            unlink(self.lockfile)
+        except OSError, error:
+            if error.errno != ENOENT:
+                raise
+
+    @property
+    def locked(self):
+        try:
+            rmdir(self.lockdir)
+        except OSError, error:
+            if error.errno == ENOTEMPTY:
+                return True
+            elif error.errno == ENOENT:
+                return False
+            else:
+                raise
+        else:
+            return False
+
+    @property
+    def locked_by(self):
+        try:
+            return [
+                int(name) if name.isdigit() else name
+                for name in listdir(self.lockdir)
+                ]
+        except OSError, error:
+            if error.errno == ENOENT:
+                return []
+            else:
+                raise
+
+
+class ClusterFixture(Cluster, Fixture):
+    """A fixture for a `Cluster`."""
+
+    def __init__(self, datadir, preserve=False):
+        """
+        @param preserve: Leave the cluster and its databases behind, even if
+            this fixture creates them.
+        """
+        super(ClusterFixture, self).__init__(datadir)
+        self.preserve = preserve
+        self.lock = ProcessSemaphore(
+            path.join(self.datadir, "locks"))
+
+    def setUp(self):
+        super(ClusterFixture, self).setUp()
+        # Only destroy the cluster if we create it...
+        if not self.exists:
+            # ... unless we've been asked to preserve it.
+            if not self.preserve:
+                self.addCleanup(self.destroy)
+            self.create()
+        self.addCleanup(self.stop)
+        self.start()
+        self.addCleanup(self.lock.release)
+        self.lock.acquire()
+
+    def createdb(self, name):
+        """Create the named database if it does not exist already.
+
+        Arranges to drop the named database during clean-up, unless `preserve`
+        has been specified.
+        """
+        if name not in self.databases:
+            super(ClusterFixture, self).createdb(name)
+            if not self.preserve:
+                self.addCleanup(self.dropdb, name)
+
+    def dropdb(self, name):
+        """Drop the named database if it exists."""
+        if name in self.databases:
+            super(ClusterFixture, self).dropdb(name)
+
+    def stop(self):
+        """Stop the cluster, but only if there are no other users."""
+        if not self.lock.locked:
+            super(ClusterFixture, self).stop()
+
+    def destroy(self):
+        """Destroy the cluster, but only if there are no other users."""
+        if not self.lock.locked:
+            super(ClusterFixture, self).destroy()
+
+
+def setup():
+    # Ensure stdout and stderr are line-bufferred.
+    sys.stdout = fdopen(sys.stdout.fileno(), "ab", 1)
+    sys.stderr = fdopen(sys.stderr.fileno(), "ab", 1)
+    # Run the SIGINT handler on SIGTERM; `svc -d` sends SIGTERM.
+    signal.signal(signal.SIGTERM, signal.default_int_handler)
+
+
+def repr_pid(pid):
+    if isinstance(pid, int) or pid.isdigit():
+        try:
+            with open("/proc/%s/cmdline" % pid, "rb") as fd:
+                cmdline = fd.read().rstrip("\0").split("\0")
+        except IOError:
+            return "%s (*unknown*)" % pid
+        else:
+            return "%s (%s)" % (
+                pid, " ".join(imap(pipes.quote, cmdline)))
+    else:
+        return pipes.quote(pid)
+
+
+def locked_by_description(lock):
+    pids = sorted(lock.locked_by)
+    return "locked by:\n* %s" % (
+        "\n* ".join(imap(repr_pid, pids)))
+
+
+def error(*args, **kwargs):
+    kwargs.setdefault("file", sys.stderr)
+    return print(*args, **kwargs)
+
+
+def action_destroy(cluster):
+    """Destroy a cluster."""
+    action_stop(cluster)
+    cluster.destroy()
+    if cluster.exists:
+        if cluster.lock.locked:
+            message = "%s: cluster is %s" % (
+                cluster.datadir, locked_by_description(cluster.lock))
+        else:
+            message = "%s: cluster could not be removed." % cluster.datadir
+        error(message)
+        raise SystemExit(2)
+
+
+def action_run(cluster):
+    """Create and run a cluster."""
+    with cluster:
+        cluster.createdb("maas")
+        while cluster.running:
+            sleep(5.0)
+
+
+def action_shell(cluster):
+    """Spawn a ``psql`` shell for `maas` in the cluster."""
+    with cluster:
+        cluster.createdb("maas")
+        cluster.shell("maas")
+
+
+def action_status(cluster):
+    """Display a message about the state of the cluster.
+
+    The return code is also set:
+
+    - 0: cluster is running.
+    - 1: cluster exists, but is not running.
+    - 2: cluster does not exist.
+
+    """
+    if cluster.exists:
+        if cluster.running:
+            print("%s: running" % cluster.datadir)
+            raise SystemExit(0)
+        else:
+            print("%s: not running" % cluster.datadir)
+            raise SystemExit(1)
+    else:
+        print("%s: not created" % cluster.datadir)
+        raise SystemExit(2)
+
+
+def action_stop(cluster):
+    """Stop a cluster."""
+    cluster.stop()
+    if cluster.running:
+        if cluster.lock.locked:
+            message = "%s: cluster is %s" % (
+                cluster.datadir, locked_by_description(cluster.lock))
+        else:
+            message = "%s: cluster is still running." % cluster.datadir
+        error(message)
+        raise SystemExit(2)
+
+
+actions = {
+    "destroy": action_destroy,
+    "run": action_run,
+    "shell": action_shell,
+    "status": action_status,
+    "stop": action_stop,
+    }
+
+
+argument_parser = argparse.ArgumentParser(description=__doc__)
+argument_parser.add_argument(
+    "action", choices=sorted(actions), nargs="?", default="shell",
+    help="the action to perform (default: %(default)s)")
+argument_parser.add_argument(
+    "-D", "--datadir", dest="datadir", action="store_true",
+    default="db", help=(
+        "the directory in which to place, or find, the cluster "
+        "(default: %(default)s)"))
+argument_parser.add_argument(
+    "--preserve", dest="preserve", action="store_true",
+    default=False, help=(
+        "preserve the cluster and its databases when exiting, "
+        "even if it was necessary to create and start it "
+        "(default: %(default)s)"))
+
+
+def main(args=None):
+    args = argument_parser.parse_args(args)
+    try:
+        setup()
+        action = actions[args.action]
+        cluster = ClusterFixture(
+            datadir=args.datadir, preserve=args.preserve)
+        action(cluster)
+    except CalledProcessError, error:
+        raise SystemExit(error.returncode)
+    except KeyboardInterrupt:
+        pass
+
+
+if __name__ == "__main__":
+    main()

=== added directory 'src/maastesting/services/tests'
=== added file 'src/maastesting/services/tests/__init__.py'
=== added file 'src/maastesting/services/tests/test_database.py'
--- src/maastesting/services/tests/test_database.py	1970-01-01 00:00:00 +0000
+++ src/maastesting/services/tests/test_database.py	2012-05-04 15:16:51 +0000
@@ -0,0 +1,430 @@
+# Copyright 2012 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+"""Tests for `maastesting.services.database."""
+
+from __future__ import (
+    absolute_import,
+    print_function,
+    unicode_literals,
+    )
+
+__metaclass__ = type
+__all__ = []
+
+from contextlib import closing
+from os import (
+    getpid,
+    path,
+    )
+from StringIO import StringIO
+from subprocess import CalledProcessError
+import sys
+
+from maastesting.services import database
+from maastesting.services.database import (
+    Cluster,
+    ClusterFixture,
+    path_with_pg_bin,
+    PG_BIN,
+    ProcessSemaphore,
+    repr_pid,
+    )
+from maastesting.testcase import TestCase
+from testtools.matchers import (
+    DirExists,
+    FileExists,
+    Not,
+    StartsWith,
+    )
+
+
+class TestFunctions(TestCase):
+
+    def test_path_with_pg_bin(self):
+        self.assertEqual(PG_BIN, path_with_pg_bin(""))
+        self.assertEqual(
+            PG_BIN + path.pathsep + "/bin:/usr/bin",
+            path_with_pg_bin("/bin:/usr/bin"))
+
+    def test_repr_pid_not_a_number(self):
+        self.assertEqual("alice", repr_pid("alice"))
+        self.assertEqual("'alice and bob'", repr_pid("alice and bob"))
+
+    def test_repr_pid_not_a_process(self):
+        self.assertEqual("0 (*unknown*)", repr_pid(0))
+
+    def test_repr_pid_this_process(self):
+        pid = getpid()
+        self.assertThat(repr_pid(pid), StartsWith("%d (" % pid))
+
+
+class TestProcessSemaphore(TestCase):
+
+    def test_init(self):
+        lockdir = self.make_dir()
+        psem = ProcessSemaphore(lockdir)
+        self.assertEqual(lockdir, psem.lockdir)
+        self.assertEqual(
+            path.join(lockdir, "%s" % getpid()),
+            psem.lockfile)
+
+    def test_acquire(self):
+        psem = ProcessSemaphore(
+            path.join(self.make_dir(), "locks"))
+        psem.acquire()
+        self.assertThat(psem.lockfile, FileExists())
+        self.assertTrue(psem.locked)
+        self.assertEqual([getpid()], psem.locked_by)
+
+    def test_release(self):
+        psem = ProcessSemaphore(
+            path.join(self.make_dir(), "locks"))
+        psem.acquire()
+        psem.release()
+        self.assertThat(psem.lockfile, Not(FileExists()))
+        self.assertFalse(psem.locked)
+        self.assertEqual([], psem.locked_by)
+
+
+class TestCluster(TestCase):
+
+    make = Cluster
+
+    def test_init(self):
+        # The datadir passed into the Cluster constructor is resolved to an
+        # absolute path.
+        datadir = path.join(self.make_dir(), "locks")
+        cluster = self.make(path.relpath(datadir))
+        self.assertEqual(datadir, cluster.datadir)
+
+    def patch_check_call(self, returncode=0):
+        calls = []
+
+        def check_call(command, **options):
+            calls.append((command, options))
+            if returncode != 0:
+                raise CalledProcessError(returncode, command)
+
+        self.patch(database, "check_call", check_call)
+        return calls
+
+    def test_execute(self):
+        calls = self.patch_check_call()
+        cluster = self.make(self.make_dir())
+        cluster.execute("true")
+        [(command, options)] = calls
+        self.assertEqual(("true",), command)
+        self.assertIn("env", options)
+        env = options["env"]
+        self.assertEqual(cluster.datadir, env.get("PGDATA"))
+        self.assertEqual(cluster.datadir, env.get("PGHOST"))
+        self.assertThat(
+            env.get("PATH", ""),
+            StartsWith(PG_BIN + path.pathsep))
+
+    def test_exists(self):
+        cluster = self.make(self.make_dir())
+        # The PG_VERSION file is used as a marker of existence.
+        version_file = path.join(cluster.datadir, "PG_VERSION")
+        self.assertThat(version_file, Not(FileExists()))
+        self.assertFalse(cluster.exists)
+        open(version_file, "wb").close()
+        self.assertTrue(cluster.exists)
+
+    def test_pidfile(self):
+        self.assertEqual(
+            "/some/where/postmaster.pid",
+            self.make("/some/where").pidfile)
+
+    def test_logfile(self):
+        self.assertEqual(
+            "/some/where/backend.log",
+            self.make("/some/where").logfile)
+
+    def test_running(self):
+        calls = self.patch_check_call(returncode=0)
+        cluster = self.make("/some/where")
+        self.assertTrue(cluster.running)
+        [(command, options)] = calls
+        self.assertEqual(("pg_ctl", "status"), command)
+
+    def test_running_not(self):
+        self.patch_check_call(returncode=1)
+        cluster = self.make("/some/where")
+        self.assertFalse(cluster.running)
+
+    def test_running_error(self):
+        self.patch_check_call(returncode=2)
+        cluster = self.make("/some/where")
+        self.assertRaises(
+            CalledProcessError, getattr, cluster, "running")
+
+    def test_create(self):
+        cluster = self.make(self.make_dir())
+        cluster.create()
+        self.assertTrue(cluster.exists)
+        self.assertFalse(cluster.running)
+
+    def test_start_and_stop(self):
+        cluster = self.make(self.make_dir())
+        cluster.create()
+        try:
+            cluster.start()
+            self.assertTrue(cluster.running)
+        finally:
+            cluster.stop()
+            self.assertFalse(cluster.running)
+
+    def test_connect(self):
+        cluster = self.make(self.make_dir())
+        cluster.create()
+        self.addCleanup(cluster.stop)
+        cluster.start()
+        with closing(cluster.connect()) as conn:
+            with closing(conn.cursor()) as cur:
+                cur.execute("SELECT 1")
+                self.assertEqual([(1,)], cur.fetchall())
+
+    def test_databases(self):
+        cluster = self.make(self.make_dir())
+        cluster.create()
+        self.addCleanup(cluster.stop)
+        cluster.start()
+        self.assertEqual(
+            {"postgres", "template0", "template1"},
+            cluster.databases)
+
+    def test_createdb_and_dropdb(self):
+        cluster = self.make(self.make_dir())
+        cluster.create()
+        self.addCleanup(cluster.stop)
+        cluster.start()
+        cluster.createdb("setherial")
+        self.assertEqual(
+            {"postgres", "template0", "template1", "setherial"},
+            cluster.databases)
+        cluster.dropdb("setherial")
+        self.assertEqual(
+            {"postgres", "template0", "template1"},
+            cluster.databases)
+
+    def test_destroy(self):
+        cluster = self.make(self.make_dir())
+        cluster.create()
+        cluster.destroy()
+        self.assertFalse(cluster.exists)
+        self.assertFalse(cluster.running)
+        self.assertThat(cluster.datadir, Not(DirExists()))
+
+
+class TestClusterFixture(TestCluster):
+
+    def make(self, *args, **kwargs):
+        fixture = ClusterFixture(*args, **kwargs)
+        # Run the basic fixture set-up so that clean-ups can be added.
+        super(ClusterFixture, fixture).setUp()
+        return fixture
+
+    def test_init_fixture(self):
+        fixture = self.make("/some/where")
+        self.assertEqual(False, fixture.preserve)
+        self.assertIsInstance(fixture.lock, ProcessSemaphore)
+        self.assertEqual(
+            path.join(fixture.datadir, "locks"),
+            fixture.lock.lockdir)
+
+    def test_createdb_no_preserve(self):
+        fixture = self.make(self.make_dir(), preserve=False)
+        self.addCleanup(fixture.stop)
+        fixture.start()
+        fixture.createdb("danzig")
+        self.assertIn("danzig", fixture.databases)
+        # The database is only created if it does not already exist.
+        fixture.createdb("danzig")
+        # Creating a database arranges for it to be dropped when stopping the
+        # fixture.
+        fixture.cleanUp()
+        self.assertNotIn("danzig", fixture.databases)
+
+    def test_createdb_preserve(self):
+        fixture = self.make(self.make_dir(), preserve=True)
+        self.addCleanup(fixture.stop)
+        fixture.start()
+        fixture.createdb("emperor")
+        self.assertIn("emperor", fixture.databases)
+        # The database is only created if it does not already exist.
+        fixture.createdb("emperor")
+        # Creating a database arranges for it to be dropped when stopping the
+        # fixture.
+        fixture.cleanUp()
+        self.assertIn("emperor", fixture.databases)
+
+    def test_dropdb(self):
+        fixture = self.make(self.make_dir())
+        self.addCleanup(fixture.stop)
+        fixture.start()
+        # The database is only dropped if it exists.
+        fixture.dropdb("diekrupps")
+        fixture.dropdb("diekrupps")
+
+    def test_stop_locked(self):
+        # The cluster is not stopped if a lock is held.
+        fixture = self.make(self.make_dir())
+        self.addCleanup(fixture.stop)
+        fixture.start()
+        fixture.lock.acquire()
+        fixture.stop()
+        self.assertTrue(fixture.running)
+        fixture.lock.release()
+        fixture.stop()
+        self.assertFalse(fixture.running)
+
+    def test_destroy_locked(self):
+        # The cluster is not destroyed if a lock is held.
+        fixture = self.make(self.make_dir())
+        fixture.create()
+        fixture.lock.acquire()
+        fixture.destroy()
+        self.assertTrue(fixture.exists)
+        fixture.lock.release()
+        fixture.destroy()
+        self.assertFalse(fixture.exists)
+
+    def test_use_no_preserve(self):
+        # The cluster is stopped and destroyed when preserve=False.
+        with self.make(self.make_dir(), preserve=False) as fixture:
+            self.assertTrue(fixture.exists)
+            self.assertTrue(fixture.running)
+        self.assertFalse(fixture.exists)
+        self.assertFalse(fixture.running)
+
+    def test_use_no_preserve_cluster_already_exists(self):
+        # The cluster is stopped but *not* destroyed when preserve=False if it
+        # existed before the fixture was put into use.
+        fixture = self.make(self.make_dir(), preserve=False)
+        fixture.create()
+        with fixture:
+            self.assertTrue(fixture.exists)
+            self.assertTrue(fixture.running)
+        self.assertTrue(fixture.exists)
+        self.assertFalse(fixture.running)
+
+    def test_use_preserve(self):
+        # The cluster is not stopped and destroyed when preserve=True.
+        with self.make(self.make_dir(), preserve=True) as fixture:
+            self.assertTrue(fixture.exists)
+            self.assertTrue(fixture.running)
+            fixture.createdb("gallhammer")
+        self.assertTrue(fixture.exists)
+        self.assertFalse(fixture.running)
+        self.addCleanup(fixture.stop)
+        fixture.start()
+        self.assertIn("gallhammer", fixture.databases)
+
+
+class TestActions(TestCase):
+
+    class Finished(Exception):
+        """A marker exception used for breaking out."""
+
+    def test_run(self):
+        cluster = ClusterFixture(self.make_dir())
+        self.addCleanup(cluster.stop)
+
+        # Instead of sleeping, check the cluster is running, then break out.
+        def sleep_patch(time):
+            self.assertTrue(cluster.running)
+            self.assertIn("maas", cluster.databases)
+            raise self.Finished
+
+        self.patch(database, "sleep", sleep_patch)
+        self.assertRaises(self.Finished, database.action_run, cluster)
+
+    def test_shell(self):
+        cluster = ClusterFixture(self.make_dir())
+        self.addCleanup(cluster.stop)
+
+        def shell_patch(database):
+            self.assertEqual("maas", database)
+            raise self.Finished
+
+        self.patch(cluster, "shell", shell_patch)
+        self.assertRaises(self.Finished, database.action_shell, cluster)
+
+    def test_status_running(self):
+        cluster = ClusterFixture(self.make_dir())
+        self.addCleanup(cluster.stop)
+        cluster.start()
+        self.patch(sys, "stdout", StringIO())
+        code = self.assertRaises(
+            SystemExit, database.action_status, cluster).code
+        self.assertEqual(0, code)
+        self.assertEqual(
+            "%s: running\n" % cluster.datadir,
+            sys.stdout.getvalue())
+
+    def test_status_not_running(self):
+        cluster = ClusterFixture(self.make_dir())
+        cluster.create()
+        self.patch(sys, "stdout", StringIO())
+        code = self.assertRaises(
+            SystemExit, database.action_status, cluster).code
+        self.assertEqual(1, code)
+        self.assertEqual(
+            "%s: not running\n" % cluster.datadir,
+            sys.stdout.getvalue())
+
+    def test_status_not_created(self):
+        cluster = ClusterFixture(self.make_dir())
+        self.patch(sys, "stdout", StringIO())
+        code = self.assertRaises(
+            SystemExit, database.action_status, cluster).code
+        self.assertEqual(2, code)
+        self.assertEqual(
+            "%s: not created\n" % cluster.datadir,
+            sys.stdout.getvalue())
+
+    def test_stop(self):
+        cluster = ClusterFixture(self.make_dir())
+        self.addCleanup(cluster.stop)
+        cluster.start()
+        database.action_stop(cluster)
+        self.assertFalse(cluster.running)
+        self.assertTrue(cluster.exists)
+
+    def test_stop_when_locked(self):
+        cluster = ClusterFixture(self.make_dir())
+        self.addCleanup(cluster.stop)
+        cluster.start()
+        self.addCleanup(cluster.lock.release)
+        cluster.lock.acquire()
+        self.patch(sys, "stderr", StringIO())
+        error = self.assertRaises(
+            SystemExit, database.action_stop, cluster)
+        self.assertEqual(2, error.code)
+        self.assertThat(
+            sys.stderr.getvalue(), StartsWith(
+                "%s: cluster is locked by:" % cluster.datadir))
+        self.assertTrue(cluster.running)
+
+    def test_destroy(self):
+        cluster = ClusterFixture(self.make_dir())
+        self.addCleanup(cluster.stop)
+        cluster.start()
+        database.action_destroy(cluster)
+        self.assertFalse(cluster.running)
+        self.assertFalse(cluster.exists)
+
+    def test_destroy_when_locked(self):
+        cluster = ClusterFixture(self.make_dir())
+        cluster.create()
+        cluster.lock.acquire()
+        self.patch(sys, "stderr", StringIO())
+        error = self.assertRaises(
+            SystemExit, database.action_destroy, cluster)
+        self.assertEqual(2, error.code)
+        self.assertThat(
+            sys.stderr.getvalue(), StartsWith(
+                "%s: cluster is locked by:" % cluster.datadir))
+        self.assertTrue(cluster.exists)

=== removed file 'utilities/maasdb'
--- utilities/maasdb	2012-03-15 13:58:32 +0000
+++ utilities/maasdb	1970-01-01 00:00:00 +0000
@@ -1,207 +0,0 @@
-#! /bin/bash -e
-#
-# MAAS database control script.  See main() at the bottom for usage.
-#
-# Most functions take as their first argument a database cluster's data
-# directory.  This is where the database's socket, pidfile, log, and data will
-# reside.
-#
-# Some design choices for this module:
-#
-#  * Everything is PostgreSQL on Ubuntu.
-#  * Each branch gets its own cluster.  Kill & delete when done.
-#  * Databases run under the system user that creates them.  No root required.
-#  * No global configuration apart from a basic PostgreSQL install.
-#  * Connections use Unix sockets.  No TCP port hogging.
-
-POSTGRES_VERSION=9.1
-PGCTL="/usr/lib/postgresql/${POSTGRES_VERSION}/bin/pg_ctl"
-
-
-# Figure out the full absolute data directory path for a given cluster, even
-# if a relative path is given.
-maasdb_locate() {
-    local DATADIR
-    DATADIR="$1"
-
-    if test -z "$1"
-    then
-        echo "Specify a data directory for the MAAS database cluster." >&2
-        return 1
-    fi
-    if ! echo "$DATADIR" | grep '^/'
-    then
-        echo "`pwd`/$DATADIR"
-    fi
-}
-
-
-# Create a database cluster.
-maasdb_create_cluster() {
-    local DATADIR
-    DATADIR="`maasdb_locate "$1"`"
-
-    if ! test -d "$DATADIR/base"
-    then
-        mkdir -p -- "$DATADIR"
-        $PGCTL init -s -D "$DATADIR" -o '-E utf8 -A trust'
-    fi
-}
-
-
-# Start a database cluster.
-maasdb_start_cluster() {
-    local DATADIR DISPOSABLE EXTRA_POSTGRES_OPTS
-    DATADIR="`maasdb_locate "$1"`"
-    # Pass "disposable" as the second argument if the data in this database
-    # is not important at all and you're willing to cut corners for speed.
-    DISPOSABLE="$2"
-
-    if test "$DISPOSABLE" = "disposable"
-    then
-        #  -F -- don't bother fsync'ing.
-        EXTRA_POSTGRES_OPTS="-F"
-    else
-        EXTRA_POSTGRES_OPTS=""
-    fi
-
-    maasdb_create_cluster "$DATADIR"
-
-    if ! test -f "$DATADIR/postmaster.pid"
-    then
-        # pg_ctl options:
-        #  -D <dir> -- data directory.
-        #  -l <file> -- log file.
-        #  -s -- no informational messages.
-        #  -w -- wait until startup is complete.
-        # postgres options:
-        #  -h <arg> -- host name; empty arg means Unix socket only.
-        #  -k -- socket directory.
-        $PGCTL start \
-            -D "$DATADIR" -l "$DATADIR/backend.log" -s -w \
-            -o "-h '' -k '$DATADIR' $EXTRA_POSTGRES_OPTS"
-    fi
-}
-
-
-# Stop a database cluster.
-maasdb_stop_cluster() {
-    local DATADIR
-    DATADIR="`maasdb_locate "$1"`"
-
-    if test -f "$DATADIR/postmaster.pid"
-    then
-        $PGCTL stop -W -m fast -D "$DATADIR"
-    fi
-}
-
-
-# Initialize a MAAS database.
-maasdb_init_db() {
-    local DATADIR DISPOSABLE MARKER
-    DATADIR="`maasdb_locate "$1"`"
-    # Pass "disposable" as the second argument if the data in this database
-    # is not important at all and you're willing to cut corners for speed.
-    DISPOSABLE="$2"
-
-    maasdb_start_cluster "$DATADIR" "$DISPOSABLE"
-
-    MARKER="$DATADIR/maas-created"
-    if ! test -f "$MARKER"
-    then
-        createdb -h "$DATADIR" maas && touch "$MARKER"
-    fi
-}
-
-
-# Open a psql shell on a MAAS database.
-maasdb_shell() {
-    local DATADIR
-    DATADIR="`maasdb_locate "$1"`"
-
-    maasdb_init_db "$DATADIR"
-    psql -h "$DATADIR" maas
-}
-
-
-# Execute a query on a MAAS database.
-maasdb_query() {
-    local DATADIR QUERY
-    DATADIR="`maasdb_locate "$1"`"
-    QUERY="$2"
-
-    maasdb_init_db "$DATADIR"
-    psql -h "$DATADIR" maas -c "$QUERY"
-}
-
-
-# Delete an entire MAAS database and cluster.  Use only with extreme care!
-maasdb_delete_cluster() {
-    local DATADIR
-    DATADIR="`maasdb_locate "$1"`"
-
-    # Before deleting anything, does this at least look like a MAAS database
-    # cluster?
-    if test -d "$DATADIR/base"
-    then
-        maasdb_stop_cluster "$DATADIR"
-        # Removing the data directory may fail because of a race condition
-        # where db/global is nonempty because of a statistics write.  Rather
-        # than block on shutdown, be optimistic and spin on retries.
-        while ! rm -rf -- "$DATADIR"
-        do
-            # If this fails for a more persistent reason, too bad.  Ctrl-C.
-            echo "Retrying deletion of $DATADIR." >&2
-            sleep 0.01
-        done
-    fi
-}
-
-
-usage() {
-    cat <<EOF >&2
-Usage: maasdb <command> <cluster-path>
-
-Where <command> is one of:
-  start
-  stop
-  query
-  shell
-  delete-cluster
-
-And <cluster-path> is the path to the MAAS development database cluster,
-typically "./db"
-EOF
-}
-
-
-unknown_command() {
-    echo >&2 "** Unknown command: $1 **"
-    echo
-    usage
-    exit 1
-}
-
-
-main() {
-    local COMMAND DATADIR
-    COMMAND="$1"
-    DATADIR="$2"
-    if ! shift 2
-    then
-        usage
-        exit 1
-    fi
-
-    case "$COMMAND" in
-        start) maasdb_init_db "$DATADIR" "$@" ;;
-        stop) maasdb_stop_cluster "$DATADIR" "$@" ;;
-        query) maasdb_query "$DATADIR" "$@" ;;
-        shell) maasdb_shell "$DATADIR" "$@" ;;
-        delete-cluster) maasdb_delete_cluster "$DATADIR" "$@" ;;
-        *) unknown_command "$COMMAND" ;;
-    esac
-}
-
-
-main "$@"

=== modified file 'versions.cfg'
--- versions.cfg	2012-04-24 09:28:18 +0000
+++ versions.cfg	2012-05-04 15:16:51 +0000
@@ -27,7 +27,6 @@
 oops-twisted = 0.0.6
 oops-wsgi = 0.0.9
 pyasn1 = 0.0.11a
-pyinotify = 0.9.2
 pymongo = 2.1.1
 PyYAML = 3.10
 setuptools = 0.6.24


Follow ups