← Back to team overview

launchpad-reviewers team mailing list archive

[Merge] lp:~stub/launchpad/db-deploy into lp:launchpad

 

Stuart Bishop has proposed merging lp:~stub/launchpad/db-deploy into lp:launchpad.

Requested reviews:
  Launchpad code reviewers (launchpad-reviewers)

For more details, see:
https://code.launchpad.net/~stub/launchpad/db-deploy/+merge/84231

Record bzr branch details when applying database updates
-- 
https://code.launchpad.net/~stub/launchpad/db-deploy/+merge/84231
Your team Launchpad code reviewers is requested to review the proposed merge of lp:~stub/launchpad/db-deploy into lp:launchpad.
=== modified file 'database/schema/security.cfg'
--- database/schema/security.cfg	2011-11-28 20:07:20 +0000
+++ database/schema/security.cfg	2011-12-02 10:00:37 +0000
@@ -48,6 +48,7 @@
 public.is_team(text)                       = EXECUTE
 public.latestdatabasediskutilization       =
 public.launchpaddatabaserevision           = SELECT
+public.launchpaddatabaseupdatelog        = SELECT
 public.lp_account                          =
 public.lp_openididentifier                 =
 public.lp_person                           =
@@ -1142,7 +1143,6 @@
 public.karma                            = SELECT, INSERT, UPDATE
 public.karmaaction                      = SELECT, INSERT, UPDATE
 public.language                         = SELECT, INSERT, UPDATE
-public.launchpaddatabaserevision        = SELECT, INSERT, UPDATE
 public.libraryfilealias                 = SELECT, INSERT
 public.libraryfilecontent               = SELECT, INSERT
 public.logintoken                       = SELECT, INSERT, UPDATE

=== modified file 'database/schema/upgrade.py'
--- database/schema/upgrade.py	2011-10-10 14:58:41 +0000
+++ database/schema/upgrade.py	2011-12-02 10:00:37 +0000
@@ -19,9 +19,14 @@
 import re
 from tempfile import NamedTemporaryFile
 from textwrap import dedent
+import subprocess
 
 from canonical.launchpad.scripts import db_options, logger_options, logger
-from canonical.database.sqlbase import connect, ISOLATION_LEVEL_AUTOCOMMIT
+from canonical.database.sqlbase import (
+    connect,
+    ISOLATION_LEVEL_AUTOCOMMIT,
+    sqlvalues,
+    )
 from canonical.database.postgresql import fqn
 import replication.helpers
 
@@ -90,13 +95,26 @@
         AND LaunchpadDatabaseRevision.start_time <> prev_end_time;
 
     UPDATE LaunchpadDatabaseRevision
-    SET start_time=_start_time.start_time
+    SET
+        start_time=_start_time.start_time,
+        branch_nick = %s,
+        revno = %s,
+        revid = %s
     FROM _start_time
     WHERE
         LaunchpadDatabaseRevision.start_time
             = transaction_timestamp() AT TIME ZONE 'UTC';
     """)
-
+START_UPDATE_LOG_SQL = dedent("""\
+    INSERT INTO LaunchpadDatabaseUpdateLog (
+        start_time, end_time, branch_nick, revno, revid)
+    VALUES (transaction_timestamp() AT TIME ZONE 'UTC', NULL, %s, %s, %s);
+    """)
+FINISH_UPDATE_LOG_SQL = dedent("""\
+    UPDATE LaunchpadDatabaseUpdateLog
+    SET end_time = statement_timestamp() AT TIME ZONE 'UTC'
+    WHERE start_time = transaction_timestamp() AT TIME ZONE 'UTC';
+    """)
 
 def to_seconds(td):
     """Convert a timedelta to seconds."""
@@ -148,12 +166,31 @@
 
 def apply_patches_normal(con):
     """Update a non replicated database."""
+    # On dev environments, until we create a fresh database baseline the
+    # LaunchpadDatabaseUpdateLog tables does not exist at this point (it
+    # will be created later via database patch). Don't try to update
+    # LaunchpadDatabaseUpdateLog if it does not exist.
+    cur = con.cursor()
+    cur.execute("""
+        SELECT EXISTS (
+            SELECT TRUE FROM information_schema.tables
+            WHERE
+                table_schema='public'
+                AND table_name='launchpaddatabaseupdatelog')
+            """)
+    updatelog_exists = cur.fetchone()[0]
+
+    # Add a record to LaunchpadDatabaseUpdateLog that we are starting
+    # an update.
+    if updatelog_exists:
+        cur.execute(START_UPDATE_LOG_SQL % sqlvalues(*get_bzr_details()))
+
     # trusted.sql contains all our stored procedures, which may
     # be required for patches to apply correctly so must be run first.
     apply_other(con, 'trusted.sql')
 
     # Prepare to repair patch timestamps if necessary.
-    con.cursor().execute(FIX_PATCH_TIMES_PRE_SQL)
+    cur.execute(FIX_PATCH_TIMES_PRE_SQL)
 
     # Apply the patches
     patches = get_patchlist(con)
@@ -161,11 +198,16 @@
         apply_patch(con, major, minor, patch, patch_file)
 
     # Repair patch timestamps if necessary.
-    con.cursor().execute(FIX_PATCH_TIMES_POST_SQL)
+    cur.execute(
+        FIX_PATCH_TIMES_POST_SQL % sqlvalues(*get_bzr_details()))
 
     # Update comments.
     apply_comments(con)
 
+    # Update the LaunchpadDatabaseUpdateLog record, stating the
+    # completion time.
+    if updatelog_exists:
+        cur.execute(FINISH_UPDATE_LOG_SQL)
 
 def apply_patches_replicated():
     """Update a Slony-I cluster."""
@@ -181,73 +223,53 @@
     log.info("Waiting for cluster to sync, pre-update.")
     replication.helpers.sync(timeout=600)
 
+    # Slonik script we are generating.
     outf = StringIO()
 
     # Start a transaction block.
     print >> outf, "try {"
 
-    sql_to_run = []
-
-    def run_sql(script):
-        if os.path.isabs(script):
-            full_path = script
-        else:
-            full_path = os.path.abspath(os.path.join(SCHEMA_DIR, script))
-        assert os.path.exists(full_path), "%s doesn't exist." % full_path
-        sql_to_run.append(full_path)
-
-    # We are going to generate some temporary files using
-    # NamedTempoararyFile. Store them here so we can control when
-    # they get closed and cleaned up.
-    temporary_files = []
+    # All the SQL we need to run, combined into one file. This minimizes
+    # Slony-I syncs and downtime.
+    combined_sql = NamedTemporaryFile(prefix='dbupdate', suffix='.sql')
+    def add_sql(sql):
+        sql = sql.strip()
+        if sql != '':
+            assert sql.endswith(';'), "SQL fragment not terminated with ';'"
+            print >> combined_sql, sql
+            # Flush or we might lose statements from buffering.
+            combined_sql.flush()
+
+    # Add a LaunchpadDatabaseUpdateLog record that we are starting patch
+    # application.
+    add_sql(START_UPDATE_LOG_SQL % sqlvalues(*get_bzr_details()))
 
     # Apply trusted.sql
-    run_sql('trusted.sql')
-
-    # We are going to generate some temporary files using
-    # NamedTempoararyFile. Store them here so we can control when
-    # they get closed and cleaned up.
-    temporary_files = []
-
-    # Apply DB patches as one big hunk.
-    combined_script = NamedTemporaryFile(prefix='patch', suffix='.sql')
-    temporary_files.append(combined_script)
+    add_sql(open(os.path.join(SCHEMA_DIR, 'trusted.sql'), 'r').read())
 
     # Prepare to repair the start timestamps in
     # LaunchpadDatabaseRevision.
-    print >> combined_script, FIX_PATCH_TIMES_PRE_SQL
+    add_sql(FIX_PATCH_TIMES_PRE_SQL)
 
     patches = get_patchlist(con)
     for (major, minor, patch), patch_file in patches:
-        print >> combined_script, open(patch_file, 'r').read()
+        add_sql(open(patch_file, 'r').read())
 
         # Trigger a failure if the patch neglected to update
         # LaunchpadDatabaseRevision.
-        print >> combined_script, (
+        add_sql(
             "SELECT assert_patch_applied(%d, %d, %d);"
             % (major, minor, patch))
 
     # Fix the start timestamps in LaunchpadDatabaseRevision.
-    print >> combined_script, FIX_PATCH_TIMES_POST_SQL
-
-    combined_script.flush()
-    run_sql(combined_script.name)
-
-    # Now combine all the written SQL (probably trusted.sql and
-    # patch*.sql) into one big file, which we execute with a single
-    # slonik execute_script statement to avoid multiple syncs.
-    single = NamedTemporaryFile(prefix='single', suffix='.sql')
-    for path in sql_to_run:
-        print >> single, open(path, 'r').read()
-        print >> single, ""
-    single.flush()
+    add_sql(FIX_PATCH_TIMES_POST_SQL % sqlvalues(*get_bzr_details()))
 
     print >> outf, dedent("""\
         execute script (
             set id = @lpmain_set, event node = @master_node,
             filename='%s'
             );
-        """ % single.name)
+        """ % combined_sql.name)
 
     # Close transaction block and abort on error.
     print >> outf, dedent("""\
@@ -265,11 +287,6 @@
         raise SystemExit(4)
     log.info("slonik(1) schema upgrade script completed.")
 
-    # Cleanup our temporary files - they applied successfully.
-    for temporary_file in temporary_files:
-        temporary_file.close()
-    del temporary_files
-
     # Wait for replication to sync.
     log.info("Waiting for patches to apply to slaves and cluster to sync.")
     replication.helpers.sync(timeout=0)
@@ -598,6 +615,42 @@
     apply_other(con, 'comments.sql')
 
 
+_bzr_details_cache = None
+
+def get_bzr_details():
+    """Return (branch_nick, revno, revision_id) of this Bazaar branch.
+
+    Returns (None, None, None) if the tree this code is running from
+    is not a Bazaar branch.
+    """
+    global _bzr_details_cache
+    if _bzr_details_cache is None:
+        cmd = [
+            'bzr',
+            'version-info',
+            '--custom',
+            '--template={branch_nick} {revno} {revision_id}',
+            SCHEMA_DIR,
+            ]
+        p = subprocess.Popen(
+            cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE)
+        out, err = p.communicate()
+
+        if p.returncode == 0:
+            branch_nick, revno, revision_id = out.split(' ',3)
+            log.debug("branch-nick: %s", branch_nick)
+            log.debug("revno: %s", revno)
+            log.debug("revision-id: %s", revision_id)
+        else:
+            log.error("Failed to retrieve Bazaar branch details")
+            revision_id, revno, branch_nick = None, None, None
+
+        _bzr_details_cache = (branch_nick, revno, revision_id)
+
+    return _bzr_details_cache
+
+
 if __name__ == '__main__':
     parser = OptionParser()
     db_options(parser)