← Back to team overview

launchpad-reviewers team mailing list archive

[Merge] lp:~stub/launchpad/pending-db-changes into lp:launchpad

 

Stuart Bishop has proposed merging lp:~stub/launchpad/pending-db-changes into lp:launchpad.

Requested reviews:
  Launchpad code reviewers (launchpad-reviewers)
Related bugs:
  Bug #796140 in Launchpad itself: "Disk utilization stats gatherer not ignoring temporary tables"
  https://bugs.launchpad.net/launchpad/+bug/796140

For more details, see:
https://code.launchpad.net/~stub/launchpad/pending-db-changes/+merge/74169

= Summary =

A CREATE INDEX CONCURRENTLY got inturrupted on production, and we can't drop this index on the busy live system.

== Proposed fix ==

DB patch to drop the index next downtime update.
-- 
https://code.launchpad.net/~stub/launchpad/pending-db-changes/+merge/74169
Your team Launchpad code reviewers is requested to review the proposed merge of lp:~stub/launchpad/pending-db-changes into lp:launchpad.
=== modified file 'cronscripts/generate-contents-files.py'
--- cronscripts/generate-contents-files.py	2011-07-11 13:33:13 +0000
+++ cronscripts/generate-contents-files.py	2011-09-06 07:51:36 +0000
@@ -7,7 +7,6 @@
 
 import _pythonpath
 
-from canonical.config import config
 from lp.archivepublisher.scripts.generate_contents_files import (
     GenerateContentsFiles,
     )
@@ -15,5 +14,5 @@
 
 if __name__ == '__main__':
     script = GenerateContentsFiles(
-        "generate-contents", dbuser=config.archivepublisher.dbuser)
+        "generate-contents", dbuser='generate_contents_files')
     script.lock_and_run()

=== modified file 'cronscripts/publish-ftpmaster.py'
--- cronscripts/publish-ftpmaster.py	2011-03-31 06:29:09 +0000
+++ cronscripts/publish-ftpmaster.py	2011-09-06 07:51:36 +0000
@@ -7,11 +7,10 @@
 
 import _pythonpath
 
-from canonical.config import config
 from lp.archivepublisher.scripts.publish_ftpmaster import PublishFTPMaster
 
 
 if __name__ == '__main__':
     script = PublishFTPMaster(
-        "publish-ftpmaster", dbuser=config.archivepublisher.dbuser)
+        "publish-ftpmaster", 'publish_ftpmaster')
     script.lock_and_run()

=== modified file 'database/replication/helpers.py'
--- database/replication/helpers.py	2011-07-25 13:39:10 +0000
+++ database/replication/helpers.py	2011-09-06 07:51:36 +0000
@@ -145,7 +145,7 @@
         self.table_id, self.replication_set_id, self.master_node_id = row
 
 
-def sync(timeout):
+def sync(timeout, exit_on_fail=True):
     """Generate a sync event and wait for it to complete on all nodes.
 
     This means that all pending events have propagated and are in sync
@@ -154,8 +154,14 @@
 
     :param timeout: Number of seconds to wait for the sync. 0 to block
                     indefinitely.
+
+    :param exit_on_fail: If True, on failure of the sync
+                         SystemExit is raised using the slonik return code.
+
+    :returns: True if the sync completed successfully. False if
+              exit_on_fail is False and the script failed for any reason.
     """
-    return execute_slonik("", sync=timeout)
+    return execute_slonik("", sync=timeout, exit_on_fail=exit_on_fail)
 
 
 def execute_slonik(script, sync=None, exit_on_fail=True, auto_preamble=True):

=== modified file 'database/replication/slon_ctl.py'
--- database/replication/slon_ctl.py	2010-10-11 10:32:29 +0000
+++ database/replication/slon_ctl.py	2011-09-06 07:51:36 +0000
@@ -104,7 +104,7 @@
         log.debug("Logging to %s" % logfile)
         log.debug("PID file %s" % pidfile)
         # Hard code suitable command line arguments for development.
-        slon_args = "-d 2 -s 2000 -t 10000"
+        slon_args = "-d 2 -s 500 -t 2500"
         if lag is not None:
             slon_args = "%s -l '%s'" % (slon_args, lag)
         cmd = [

=== modified file 'database/sampledata/current-dev.sql'
--- database/sampledata/current-dev.sql	2011-07-13 06:06:53 +0000
+++ database/sampledata/current-dev.sql	2011-09-06 07:51:36 +0000
@@ -1918,22 +1918,22 @@
 
 ALTER TABLE distribution DISABLE TRIGGER ALL;
 
-INSERT INTO distribution (id, name, title, description, domainname, owner, displayname, summary, members, translationgroup, translationpermission, bug_supervisor, official_malone, official_rosetta, security_contact, driver, translation_focus, mirror_admin, upload_admin, upload_sender, date_created, homepage_content, icon, mugshot, logo, fti, official_answers, language_pack_admin, official_blueprints, enable_bug_expiration, bug_reporting_guidelines, reviewer_whiteboard, max_bug_heat, bug_reported_acknowledgement, answers_usage, blueprints_usage, translations_usage, registrant) VALUES (1, 'ubuntu', 'Ubuntu Linux', 'Ubuntu is a new approach to Linux Distribution that includes regular releases, and a simplified single-CD installation system.', 'ubuntulinux.org', 17, 'Ubuntu', 'Ubuntu is a new approach to Linux Distribution that includes regular releases, and a simplified single-CD installation system.', 17, NULL, 1, NULL, true, true, NULL, NULL, 3, 59, NULL, NULL, '2006-10-16 18:31:43.415195', NULL, NULL, NULL, NULL, NULL, true, NULL, true, true, NULL, NULL, NULL, NULL, 20, 20, 20, 60);
-INSERT INTO distribution (id, name, title, description, domainname, owner, displayname, summary, members, translationgroup, translationpermission, bug_supervisor, official_malone, official_rosetta, security_contact, driver, translation_focus, mirror_admin, upload_admin, upload_sender, date_created, homepage_content, icon, mugshot, logo, fti, official_answers, language_pack_admin, official_blueprints, enable_bug_expiration, bug_reporting_guidelines, reviewer_whiteboard, max_bug_heat, bug_reported_acknowledgement, answers_usage, blueprints_usage, translations_usage, registrant) VALUES (2, 'redhat', 'Redhat Advanced Server', 'Red Hat is a commercial distribution of the GNU/Linux Operating System.', 'redhat.com', 1, 'Red Hat', 'Red Hat is a commercial distribution of the GNU/Linux Operating System.', 1, NULL, 1, NULL, false, false, NULL, 8, NULL, 1, NULL, NULL, '2006-10-16 18:31:43.417928', NULL, NULL, NULL, NULL, NULL, false, NULL, false, false, NULL, NULL, NULL, NULL, 10, 10, 10, 60);
-INSERT INTO distribution (id, name, title, description, domainname, owner, displayname, summary, members, translationgroup, translationpermission, bug_supervisor, official_malone, official_rosetta, security_contact, driver, translation_focus, mirror_admin, upload_admin, upload_sender, date_created, homepage_content, icon, mugshot, logo, fti, official_answers, language_pack_admin, official_blueprints, enable_bug_expiration, bug_reporting_guidelines, reviewer_whiteboard, max_bug_heat, bug_reported_acknowledgement, answers_usage, blueprints_usage, translations_usage, registrant) VALUES (3, 'debian', 'Debian GNU/Linux', 'Debian GNU/Linux is
+INSERT INTO distribution (id, name, title, description, domainname, owner, displayname, summary, members, translationgroup, translationpermission, bug_supervisor, official_malone, official_rosetta, security_contact, driver, translation_focus, mirror_admin, upload_admin, upload_sender, date_created, homepage_content, icon, mugshot, logo, fti, official_answers, language_pack_admin, official_blueprints, enable_bug_expiration, bug_reporting_guidelines, reviewer_whiteboard, max_bug_heat, bug_reported_acknowledgement, answers_usage, blueprints_usage, translations_usage, registrant, package_derivatives_email) VALUES (1, 'ubuntu', 'Ubuntu Linux', 'Ubuntu is a new approach to Linux Distribution that includes regular releases, and a simplified single-CD installation system.', 'ubuntulinux.org', 17, 'Ubuntu', 'Ubuntu is a new approach to Linux Distribution that includes regular releases, and a simplified single-CD installation system.', 17, NULL, 1, NULL, true, true, NULL, NULL, 3, 59, NULL, NULL, '2006-10-16 18:31:43.415195', NULL, NULL, NULL, NULL, NULL, true, NULL, true, true, NULL, NULL, NULL, NULL, 20, 20, 20, 60, '{package_name}_derivatives@xxxxxxxxxxxxxxxxxxxxxx');
+INSERT INTO distribution (id, name, title, description, domainname, owner, displayname, summary, members, translationgroup, translationpermission, bug_supervisor, official_malone, official_rosetta, security_contact, driver, translation_focus, mirror_admin, upload_admin, upload_sender, date_created, homepage_content, icon, mugshot, logo, fti, official_answers, language_pack_admin, official_blueprints, enable_bug_expiration, bug_reporting_guidelines, reviewer_whiteboard, max_bug_heat, bug_reported_acknowledgement, answers_usage, blueprints_usage, translations_usage, registrant, package_derivatives_email) VALUES (2, 'redhat', 'Redhat Advanced Server', 'Red Hat is a commercial distribution of the GNU/Linux Operating System.', 'redhat.com', 1, 'Red Hat', 'Red Hat is a commercial distribution of the GNU/Linux Operating System.', 1, NULL, 1, NULL, false, false, NULL, 8, NULL, 1, NULL, NULL, '2006-10-16 18:31:43.417928', NULL, NULL, NULL, NULL, NULL, false, NULL, false, false, NULL, NULL, NULL, NULL, 10, 10, 10, 60, NULL);
+INSERT INTO distribution (id, name, title, description, domainname, owner, displayname, summary, members, translationgroup, translationpermission, bug_supervisor, official_malone, official_rosetta, security_contact, driver, translation_focus, mirror_admin, upload_admin, upload_sender, date_created, homepage_content, icon, mugshot, logo, fti, official_answers, language_pack_admin, official_blueprints, enable_bug_expiration, bug_reporting_guidelines, reviewer_whiteboard, max_bug_heat, bug_reported_acknowledgement, answers_usage, blueprints_usage, translations_usage, registrant, package_derivatives_email) VALUES (3, 'debian', 'Debian GNU/Linux', 'Debian GNU/Linux is
 a non commercial distribution of a GNU/Linux Operating System for many
 platforms.', 'debian.org', 1, 'Debian', 'Debian GNU/Linux is
 a non commercial distribution of a GNU/Linux Operating System for many
-platforms.', 1, NULL, 1, NULL, false, false, NULL, NULL, NULL, 1, NULL, NULL, '2006-10-16 18:31:43.418942', NULL, NULL, NULL, NULL, NULL, false, NULL, false, false, NULL, NULL, NULL, NULL, 10, 10, 10, 60);
-INSERT INTO distribution (id, name, title, description, domainname, owner, displayname, summary, members, translationgroup, translationpermission, bug_supervisor, official_malone, official_rosetta, security_contact, driver, translation_focus, mirror_admin, upload_admin, upload_sender, date_created, homepage_content, icon, mugshot, logo, fti, official_answers, language_pack_admin, official_blueprints, enable_bug_expiration, bug_reporting_guidelines, reviewer_whiteboard, max_bug_heat, bug_reported_acknowledgement, answers_usage, blueprints_usage, translations_usage, registrant) VALUES (4, 'gentoo', 'The Gentoo Linux', 'Gentoo is a very
+platforms.', 1, NULL, 1, NULL, false, false, NULL, NULL, NULL, 1, NULL, NULL, '2006-10-16 18:31:43.418942', NULL, NULL, NULL, NULL, NULL, false, NULL, false, false, NULL, NULL, NULL, NULL, 10, 10, 10, 60, NULL);
+INSERT INTO distribution (id, name, title, description, domainname, owner, displayname, summary, members, translationgroup, translationpermission, bug_supervisor, official_malone, official_rosetta, security_contact, driver, translation_focus, mirror_admin, upload_admin, upload_sender, date_created, homepage_content, icon, mugshot, logo, fti, official_answers, language_pack_admin, official_blueprints, enable_bug_expiration, bug_reporting_guidelines, reviewer_whiteboard, max_bug_heat, bug_reported_acknowledgement, answers_usage, blueprints_usage, translations_usage, registrant, package_derivatives_email) VALUES (4, 'gentoo', 'The Gentoo Linux', 'Gentoo is a very
 customizeable GNU/Linux Distribution that is designed to let you build every
-single package yourself, with your own preferences.', 'gentoo.org', 1, 'Gentoo', 'Gentoo is a very customizeable GNU/Linux Distribution that is designed to let you build every single package yourself, with your own preferences.', 1, NULL, 1, NULL, true, false, NULL, NULL, NULL, 1, NULL, NULL, '2006-10-16 18:31:43.41974', NULL, NULL, NULL, NULL, NULL, false, NULL, false, false, NULL, NULL, NULL, NULL, 10, 10, 10, 60);
-INSERT INTO distribution (id, name, title, description, domainname, owner, displayname, summary, members, translationgroup, translationpermission, bug_supervisor, official_malone, official_rosetta, security_contact, driver, translation_focus, mirror_admin, upload_admin, upload_sender, date_created, homepage_content, icon, mugshot, logo, fti, official_answers, language_pack_admin, official_blueprints, enable_bug_expiration, bug_reporting_guidelines, reviewer_whiteboard, max_bug_heat, bug_reported_acknowledgement, answers_usage, blueprints_usage, translations_usage, registrant) VALUES (5, 'kubuntu', 'Kubuntu - Free KDE-based Linux', 'Kubuntu is an entirely free Linux distribution that uses the K Desktop
+single package yourself, with your own preferences.', 'gentoo.org', 1, 'Gentoo', 'Gentoo is a very customizeable GNU/Linux Distribution that is designed to let you build every single package yourself, with your own preferences.', 1, NULL, 1, NULL, true, false, NULL, NULL, NULL, 1, NULL, NULL, '2006-10-16 18:31:43.41974', NULL, NULL, NULL, NULL, NULL, false, NULL, false, false, NULL, NULL, NULL, NULL, 10, 10, 10, 60, NULL);
+INSERT INTO distribution (id, name, title, description, domainname, owner, displayname, summary, members, translationgroup, translationpermission, bug_supervisor, official_malone, official_rosetta, security_contact, driver, translation_focus, mirror_admin, upload_admin, upload_sender, date_created, homepage_content, icon, mugshot, logo, fti, official_answers, language_pack_admin, official_blueprints, enable_bug_expiration, bug_reporting_guidelines, reviewer_whiteboard, max_bug_heat, bug_reported_acknowledgement, answers_usage, blueprints_usage, translations_usage, registrant, package_derivatives_email) VALUES (5, 'kubuntu', 'Kubuntu - Free KDE-based Linux', 'Kubuntu is an entirely free Linux distribution that uses the K Desktop
 Environment as its default desktop after install.', 'kubuntu.org', 1, 'Kubuntu', 'Kubuntu is an entirely free Linux distribution that uses the K Desktop
-Environment as its default desktop after install.', 1, NULL, 1, NULL, false, false, NULL, 8, NULL, 1, NULL, NULL, '2006-10-16 18:31:43.420551', NULL, NULL, NULL, NULL, NULL, false, NULL, false, false, NULL, NULL, NULL, NULL, 10, 10, 10, 60);
-INSERT INTO distribution (id, name, title, description, domainname, owner, displayname, summary, members, translationgroup, translationpermission, bug_supervisor, official_malone, official_rosetta, security_contact, driver, translation_focus, mirror_admin, upload_admin, upload_sender, date_created, homepage_content, icon, mugshot, logo, fti, official_answers, language_pack_admin, official_blueprints, enable_bug_expiration, bug_reporting_guidelines, reviewer_whiteboard, max_bug_heat, bug_reported_acknowledgement, answers_usage, blueprints_usage, translations_usage, registrant) VALUES (7, 'guadalinex', 'GuadaLinex: Linux for Andalucia', 'GuadaLinex is based on Ubuntu and adds full support for applications specific to the local environment in Andalucia.', 'guadalinex.es', 4, 'GuadaLinex', 'The GuadaLinex team produces a high quality linux for the Andalucian marketplace.', 32, NULL, 1, NULL, false, false, NULL, NULL, NULL, 4, NULL, NULL, '2006-10-16 18:31:43.421329', NULL, NULL, NULL, NULL, NULL, false, NULL, false, false, NULL, NULL, NULL, NULL, 10, 10, 10, 60);
-INSERT INTO distribution (id, name, title, description, domainname, owner, displayname, summary, members, translationgroup, translationpermission, bug_supervisor, official_malone, official_rosetta, security_contact, driver, translation_focus, mirror_admin, upload_admin, upload_sender, date_created, homepage_content, icon, mugshot, logo, fti, official_answers, language_pack_admin, official_blueprints, enable_bug_expiration, bug_reporting_guidelines, reviewer_whiteboard, max_bug_heat, bug_reported_acknowledgement, answers_usage, blueprints_usage, translations_usage, registrant) VALUES (8, 'ubuntutest', 'Ubuntu Test', 'Ubuntu Test', 'ubuntulinux.org', 17, 'ubuntutest', 'Ubuntu Test summary', 17, NULL, 1, NULL, false, false, NULL, NULL, NULL, 17, NULL, NULL, '2006-10-16 18:31:43.422162', NULL, NULL, NULL, NULL, NULL, false, NULL, false, false, NULL, NULL, NULL, NULL, 10, 10, 10, 60);
-INSERT INTO distribution (id, name, title, description, domainname, owner, displayname, summary, members, translationgroup, translationpermission, bug_supervisor, official_malone, official_rosetta, security_contact, driver, translation_focus, mirror_admin, upload_admin, upload_sender, date_created, homepage_content, icon, mugshot, logo, fti, official_answers, language_pack_admin, official_blueprints, enable_bug_expiration, bug_reporting_guidelines, reviewer_whiteboard, max_bug_heat, bug_reported_acknowledgement, answers_usage, blueprints_usage, translations_usage, registrant) VALUES (9, 'deribuntu', 'Deribuntu', 'Deribuntu', 'deribuntu', 16, 'Deribuntu', 'Deribuntu', 16, NULL, 1, NULL, false, false, NULL, NULL, NULL, 16, NULL, NULL, '2011-03-17 14:28:54.354337', NULL, NULL, NULL, NULL, NULL, false, NULL, false, false, NULL, NULL, NULL, NULL, 10, 10, 10, 60);
+Environment as its default desktop after install.', 1, NULL, 1, NULL, false, false, NULL, 8, NULL, 1, NULL, NULL, '2006-10-16 18:31:43.420551', NULL, NULL, NULL, NULL, NULL, false, NULL, false, false, NULL, NULL, NULL, NULL, 10, 10, 10, 60, NULL);
+INSERT INTO distribution (id, name, title, description, domainname, owner, displayname, summary, members, translationgroup, translationpermission, bug_supervisor, official_malone, official_rosetta, security_contact, driver, translation_focus, mirror_admin, upload_admin, upload_sender, date_created, homepage_content, icon, mugshot, logo, fti, official_answers, language_pack_admin, official_blueprints, enable_bug_expiration, bug_reporting_guidelines, reviewer_whiteboard, max_bug_heat, bug_reported_acknowledgement, answers_usage, blueprints_usage, translations_usage, registrant, package_derivatives_email) VALUES (7, 'guadalinex', 'GuadaLinex: Linux for Andalucia', 'GuadaLinex is based on Ubuntu and adds full support for applications specific to the local environment in Andalucia.', 'guadalinex.es', 4, 'GuadaLinex', 'The GuadaLinex team produces a high quality linux for the Andalucian marketplace.', 32, NULL, 1, NULL, false, false, NULL, NULL, NULL, 4, NULL, NULL, '2006-10-16 18:31:43.421329', NULL, NULL, NULL, NULL, NULL, false, NULL, false, false, NULL, NULL, NULL, NULL, 10, 10, 10, 60, NULL);
+INSERT INTO distribution (id, name, title, description, domainname, owner, displayname, summary, members, translationgroup, translationpermission, bug_supervisor, official_malone, official_rosetta, security_contact, driver, translation_focus, mirror_admin, upload_admin, upload_sender, date_created, homepage_content, icon, mugshot, logo, fti, official_answers, language_pack_admin, official_blueprints, enable_bug_expiration, bug_reporting_guidelines, reviewer_whiteboard, max_bug_heat, bug_reported_acknowledgement, answers_usage, blueprints_usage, translations_usage, registrant, package_derivatives_email) VALUES (8, 'ubuntutest', 'Ubuntu Test', 'Ubuntu Test', 'ubuntulinux.org', 17, 'ubuntutest', 'Ubuntu Test summary', 17, NULL, 1, NULL, false, false, NULL, NULL, NULL, 17, NULL, NULL, '2006-10-16 18:31:43.422162', NULL, NULL, NULL, NULL, NULL, false, NULL, false, false, NULL, NULL, NULL, NULL, 10, 10, 10, 60, NULL);
+INSERT INTO distribution (id, name, title, description, domainname, owner, displayname, summary, members, translationgroup, translationpermission, bug_supervisor, official_malone, official_rosetta, security_contact, driver, translation_focus, mirror_admin, upload_admin, upload_sender, date_created, homepage_content, icon, mugshot, logo, fti, official_answers, language_pack_admin, official_blueprints, enable_bug_expiration, bug_reporting_guidelines, reviewer_whiteboard, max_bug_heat, bug_reported_acknowledgement, answers_usage, blueprints_usage, translations_usage, registrant, package_derivatives_email) VALUES (9, 'deribuntu', 'Deribuntu', 'Deribuntu', 'deribuntu', 16, 'Deribuntu', 'Deribuntu', 16, NULL, 1, NULL, false, false, NULL, NULL, NULL, 16, NULL, NULL, '2011-03-17 14:28:54.354337', NULL, NULL, NULL, NULL, NULL, false, NULL, false, false, NULL, NULL, NULL, NULL, 10, 10, 10, 60, NULL);
 
 
 ALTER TABLE distribution ENABLE TRIGGER ALL;

=== modified file 'database/sampledata/current.sql'
--- database/sampledata/current.sql	2011-07-13 06:06:53 +0000
+++ database/sampledata/current.sql	2011-09-06 07:51:36 +0000
@@ -1917,21 +1917,21 @@
 
 ALTER TABLE distribution DISABLE TRIGGER ALL;
 
-INSERT INTO distribution (id, name, title, description, domainname, owner, displayname, summary, members, translationgroup, translationpermission, bug_supervisor, official_malone, official_rosetta, security_contact, driver, translation_focus, mirror_admin, upload_admin, upload_sender, date_created, homepage_content, icon, mugshot, logo, fti, official_answers, language_pack_admin, official_blueprints, enable_bug_expiration, bug_reporting_guidelines, reviewer_whiteboard, max_bug_heat, bug_reported_acknowledgement, answers_usage, blueprints_usage, translations_usage, registrant) VALUES (1, 'ubuntu', 'Ubuntu Linux', 'Ubuntu is a new approach to Linux Distribution that includes regular releases, and a simplified single-CD installation system.', 'ubuntulinux.org', 17, 'Ubuntu', 'Ubuntu is a new approach to Linux Distribution that includes regular releases, and a simplified single-CD installation system.', 17, NULL, 1, NULL, true, true, NULL, NULL, 3, 59, NULL, NULL, '2006-10-16 18:31:43.415195', NULL, NULL, NULL, NULL, NULL, true, NULL, true, true, NULL, NULL, NULL, NULL, 10, 10, 10, 60);
-INSERT INTO distribution (id, name, title, description, domainname, owner, displayname, summary, members, translationgroup, translationpermission, bug_supervisor, official_malone, official_rosetta, security_contact, driver, translation_focus, mirror_admin, upload_admin, upload_sender, date_created, homepage_content, icon, mugshot, logo, fti, official_answers, language_pack_admin, official_blueprints, enable_bug_expiration, bug_reporting_guidelines, reviewer_whiteboard, max_bug_heat, bug_reported_acknowledgement, answers_usage, blueprints_usage, translations_usage, registrant) VALUES (2, 'redhat', 'Redhat Advanced Server', 'Red Hat is a commercial distribution of the GNU/Linux Operating System.', 'redhat.com', 1, 'Red Hat', 'Red Hat is a commercial distribution of the GNU/Linux Operating System.', 1, NULL, 1, NULL, false, false, NULL, 8, NULL, 1, NULL, NULL, '2006-10-16 18:31:43.417928', NULL, NULL, NULL, NULL, NULL, false, NULL, false, false, NULL, NULL, NULL, NULL, 10, 10, 10, 60);
-INSERT INTO distribution (id, name, title, description, domainname, owner, displayname, summary, members, translationgroup, translationpermission, bug_supervisor, official_malone, official_rosetta, security_contact, driver, translation_focus, mirror_admin, upload_admin, upload_sender, date_created, homepage_content, icon, mugshot, logo, fti, official_answers, language_pack_admin, official_blueprints, enable_bug_expiration, bug_reporting_guidelines, reviewer_whiteboard, max_bug_heat, bug_reported_acknowledgement, answers_usage, blueprints_usage, translations_usage, registrant) VALUES (3, 'debian', 'Debian GNU/Linux', 'Debian GNU/Linux is
+INSERT INTO distribution (id, name, title, description, domainname, owner, displayname, summary, members, translationgroup, translationpermission, bug_supervisor, official_malone, official_rosetta, security_contact, driver, translation_focus, mirror_admin, upload_admin, upload_sender, date_created, homepage_content, icon, mugshot, logo, fti, official_answers, language_pack_admin, official_blueprints, enable_bug_expiration, bug_reporting_guidelines, reviewer_whiteboard, max_bug_heat, bug_reported_acknowledgement, answers_usage, blueprints_usage, translations_usage, registrant, package_derivatives_email) VALUES (1, 'ubuntu', 'Ubuntu Linux', 'Ubuntu is a new approach to Linux Distribution that includes regular releases, and a simplified single-CD installation system.', 'ubuntulinux.org', 17, 'Ubuntu', 'Ubuntu is a new approach to Linux Distribution that includes regular releases, and a simplified single-CD installation system.', 17, NULL, 1, NULL, true, true, NULL, NULL, 3, 59, NULL, NULL, '2006-10-16 18:31:43.415195', NULL, NULL, NULL, NULL, NULL, true, NULL, true, true, NULL, NULL, NULL, NULL, 10, 10, 10, 60, '{package_name}_derivatives@xxxxxxxxxxxxxxxxxxxxxx');
+INSERT INTO distribution (id, name, title, description, domainname, owner, displayname, summary, members, translationgroup, translationpermission, bug_supervisor, official_malone, official_rosetta, security_contact, driver, translation_focus, mirror_admin, upload_admin, upload_sender, date_created, homepage_content, icon, mugshot, logo, fti, official_answers, language_pack_admin, official_blueprints, enable_bug_expiration, bug_reporting_guidelines, reviewer_whiteboard, max_bug_heat, bug_reported_acknowledgement, answers_usage, blueprints_usage, translations_usage, registrant, package_derivatives_email) VALUES (2, 'redhat', 'Redhat Advanced Server', 'Red Hat is a commercial distribution of the GNU/Linux Operating System.', 'redhat.com', 1, 'Red Hat', 'Red Hat is a commercial distribution of the GNU/Linux Operating System.', 1, NULL, 1, NULL, false, false, NULL, 8, NULL, 1, NULL, NULL, '2006-10-16 18:31:43.417928', NULL, NULL, NULL, NULL, NULL, false, NULL, false, false, NULL, NULL, NULL, NULL, 10, 10, 10, 60, NULL);
+INSERT INTO distribution (id, name, title, description, domainname, owner, displayname, summary, members, translationgroup, translationpermission, bug_supervisor, official_malone, official_rosetta, security_contact, driver, translation_focus, mirror_admin, upload_admin, upload_sender, date_created, homepage_content, icon, mugshot, logo, fti, official_answers, language_pack_admin, official_blueprints, enable_bug_expiration, bug_reporting_guidelines, reviewer_whiteboard, max_bug_heat, bug_reported_acknowledgement, answers_usage, blueprints_usage, translations_usage, registrant, package_derivatives_email) VALUES (3, 'debian', 'Debian GNU/Linux', 'Debian GNU/Linux is
 a non commercial distribution of a GNU/Linux Operating System for many
 platforms.', 'debian.org', 1, 'Debian', 'Debian GNU/Linux is
 a non commercial distribution of a GNU/Linux Operating System for many
-platforms.', 1, NULL, 1, NULL, false, false, NULL, NULL, NULL, 1, NULL, NULL, '2006-10-16 18:31:43.418942', NULL, NULL, NULL, NULL, NULL, false, NULL, false, false, NULL, NULL, NULL, NULL, 10, 10, 10, 60);
-INSERT INTO distribution (id, name, title, description, domainname, owner, displayname, summary, members, translationgroup, translationpermission, bug_supervisor, official_malone, official_rosetta, security_contact, driver, translation_focus, mirror_admin, upload_admin, upload_sender, date_created, homepage_content, icon, mugshot, logo, fti, official_answers, language_pack_admin, official_blueprints, enable_bug_expiration, bug_reporting_guidelines, reviewer_whiteboard, max_bug_heat, bug_reported_acknowledgement, answers_usage, blueprints_usage, translations_usage, registrant) VALUES (4, 'gentoo', 'The Gentoo Linux', 'Gentoo is a very
+platforms.', 1, NULL, 1, NULL, false, false, NULL, NULL, NULL, 1, NULL, NULL, '2006-10-16 18:31:43.418942', NULL, NULL, NULL, NULL, NULL, false, NULL, false, false, NULL, NULL, NULL, NULL, 10, 10, 10, 60, NULL);
+INSERT INTO distribution (id, name, title, description, domainname, owner, displayname, summary, members, translationgroup, translationpermission, bug_supervisor, official_malone, official_rosetta, security_contact, driver, translation_focus, mirror_admin, upload_admin, upload_sender, date_created, homepage_content, icon, mugshot, logo, fti, official_answers, language_pack_admin, official_blueprints, enable_bug_expiration, bug_reporting_guidelines, reviewer_whiteboard, max_bug_heat, bug_reported_acknowledgement, answers_usage, blueprints_usage, translations_usage, registrant, package_derivatives_email) VALUES (4, 'gentoo', 'The Gentoo Linux', 'Gentoo is a very
 customizeable GNU/Linux Distribution that is designed to let you build every
-single package yourself, with your own preferences.', 'gentoo.org', 1, 'Gentoo', 'Gentoo is a very customizeable GNU/Linux Distribution that is designed to let you build every single package yourself, with your own preferences.', 1, NULL, 1, NULL, true, false, NULL, NULL, NULL, 1, NULL, NULL, '2006-10-16 18:31:43.41974', NULL, NULL, NULL, NULL, NULL, false, NULL, false, false, NULL, NULL, NULL, NULL, 10, 10, 10, 60);
-INSERT INTO distribution (id, name, title, description, domainname, owner, displayname, summary, members, translationgroup, translationpermission, bug_supervisor, official_malone, official_rosetta, security_contact, driver, translation_focus, mirror_admin, upload_admin, upload_sender, date_created, homepage_content, icon, mugshot, logo, fti, official_answers, language_pack_admin, official_blueprints, enable_bug_expiration, bug_reporting_guidelines, reviewer_whiteboard, max_bug_heat, bug_reported_acknowledgement, answers_usage, blueprints_usage, translations_usage, registrant) VALUES (5, 'kubuntu', 'Kubuntu - Free KDE-based Linux', 'Kubuntu is an entirely free Linux distribution that uses the K Desktop
+single package yourself, with your own preferences.', 'gentoo.org', 1, 'Gentoo', 'Gentoo is a very customizeable GNU/Linux Distribution that is designed to let you build every single package yourself, with your own preferences.', 1, NULL, 1, NULL, true, false, NULL, NULL, NULL, 1, NULL, NULL, '2006-10-16 18:31:43.41974', NULL, NULL, NULL, NULL, NULL, false, NULL, false, false, NULL, NULL, NULL, NULL, 10, 10, 10, 60, NULL);
+INSERT INTO distribution (id, name, title, description, domainname, owner, displayname, summary, members, translationgroup, translationpermission, bug_supervisor, official_malone, official_rosetta, security_contact, driver, translation_focus, mirror_admin, upload_admin, upload_sender, date_created, homepage_content, icon, mugshot, logo, fti, official_answers, language_pack_admin, official_blueprints, enable_bug_expiration, bug_reporting_guidelines, reviewer_whiteboard, max_bug_heat, bug_reported_acknowledgement, answers_usage, blueprints_usage, translations_usage, registrant, package_derivatives_email) VALUES (5, 'kubuntu', 'Kubuntu - Free KDE-based Linux', 'Kubuntu is an entirely free Linux distribution that uses the K Desktop
 Environment as its default desktop after install.', 'kubuntu.org', 1, 'Kubuntu', 'Kubuntu is an entirely free Linux distribution that uses the K Desktop
-Environment as its default desktop after install.', 1, NULL, 1, NULL, false, false, NULL, 8, NULL, 1, NULL, NULL, '2006-10-16 18:31:43.420551', NULL, NULL, NULL, NULL, NULL, false, NULL, false, false, NULL, NULL, NULL, NULL, 10, 10, 10, 60);
-INSERT INTO distribution (id, name, title, description, domainname, owner, displayname, summary, members, translationgroup, translationpermission, bug_supervisor, official_malone, official_rosetta, security_contact, driver, translation_focus, mirror_admin, upload_admin, upload_sender, date_created, homepage_content, icon, mugshot, logo, fti, official_answers, language_pack_admin, official_blueprints, enable_bug_expiration, bug_reporting_guidelines, reviewer_whiteboard, max_bug_heat, bug_reported_acknowledgement, answers_usage, blueprints_usage, translations_usage, registrant) VALUES (7, 'guadalinex', 'GuadaLinex: Linux for Andalucia', 'GuadaLinex is based on Ubuntu and adds full support for applications specific to the local environment in Andalucia.', 'guadalinex.es', 4, 'GuadaLinex', 'The GuadaLinex team produces a high quality linux for the Andalucian marketplace.', 32, NULL, 1, NULL, false, false, NULL, NULL, NULL, 4, NULL, NULL, '2006-10-16 18:31:43.421329', NULL, NULL, NULL, NULL, NULL, false, NULL, false, false, NULL, NULL, NULL, NULL, 10, 10, 10, 60);
-INSERT INTO distribution (id, name, title, description, domainname, owner, displayname, summary, members, translationgroup, translationpermission, bug_supervisor, official_malone, official_rosetta, security_contact, driver, translation_focus, mirror_admin, upload_admin, upload_sender, date_created, homepage_content, icon, mugshot, logo, fti, official_answers, language_pack_admin, official_blueprints, enable_bug_expiration, bug_reporting_guidelines, reviewer_whiteboard, max_bug_heat, bug_reported_acknowledgement, answers_usage, blueprints_usage, translations_usage, registrant) VALUES (8, 'ubuntutest', 'Ubuntu Test', 'Ubuntu Test', 'ubuntulinux.org', 17, 'ubuntutest', 'Ubuntu Test summary', 17, NULL, 1, NULL, false, false, NULL, NULL, NULL, 17, NULL, NULL, '2006-10-16 18:31:43.422162', NULL, NULL, NULL, NULL, NULL, false, NULL, false, false, NULL, NULL, NULL, NULL, 10, 10, 10, 60);
+Environment as its default desktop after install.', 1, NULL, 1, NULL, false, false, NULL, 8, NULL, 1, NULL, NULL, '2006-10-16 18:31:43.420551', NULL, NULL, NULL, NULL, NULL, false, NULL, false, false, NULL, NULL, NULL, NULL, 10, 10, 10, 60, NULL);
+INSERT INTO distribution (id, name, title, description, domainname, owner, displayname, summary, members, translationgroup, translationpermission, bug_supervisor, official_malone, official_rosetta, security_contact, driver, translation_focus, mirror_admin, upload_admin, upload_sender, date_created, homepage_content, icon, mugshot, logo, fti, official_answers, language_pack_admin, official_blueprints, enable_bug_expiration, bug_reporting_guidelines, reviewer_whiteboard, max_bug_heat, bug_reported_acknowledgement, answers_usage, blueprints_usage, translations_usage, registrant, package_derivatives_email) VALUES (7, 'guadalinex', 'GuadaLinex: Linux for Andalucia', 'GuadaLinex is based on Ubuntu and adds full support for applications specific to the local environment in Andalucia.', 'guadalinex.es', 4, 'GuadaLinex', 'The GuadaLinex team produces a high quality linux for the Andalucian marketplace.', 32, NULL, 1, NULL, false, false, NULL, NULL, NULL, 4, NULL, NULL, '2006-10-16 18:31:43.421329', NULL, NULL, NULL, NULL, NULL, false, NULL, false, false, NULL, NULL, NULL, NULL, 10, 10, 10, 60, NULL);
+INSERT INTO distribution (id, name, title, description, domainname, owner, displayname, summary, members, translationgroup, translationpermission, bug_supervisor, official_malone, official_rosetta, security_contact, driver, translation_focus, mirror_admin, upload_admin, upload_sender, date_created, homepage_content, icon, mugshot, logo, fti, official_answers, language_pack_admin, official_blueprints, enable_bug_expiration, bug_reporting_guidelines, reviewer_whiteboard, max_bug_heat, bug_reported_acknowledgement, answers_usage, blueprints_usage, translations_usage, registrant, package_derivatives_email) VALUES (8, 'ubuntutest', 'Ubuntu Test', 'Ubuntu Test', 'ubuntulinux.org', 17, 'ubuntutest', 'Ubuntu Test summary', 17, NULL, 1, NULL, false, false, NULL, NULL, NULL, 17, NULL, NULL, '2006-10-16 18:31:43.422162', NULL, NULL, NULL, NULL, NULL, false, NULL, false, false, NULL, NULL, NULL, NULL, 10, 10, 10, 60, NULL);
 
 
 ALTER TABLE distribution ENABLE TRIGGER ALL;

=== modified file 'database/schema/Makefile'
--- database/schema/Makefile	2011-01-31 11:10:39 +0000
+++ database/schema/Makefile	2011-09-06 07:51:36 +0000
@@ -128,7 +128,6 @@
 	@ psql -d ${EMPTY_DBNAME} -q -c "CREATE SCHEMA todrop;"
 	@ echo "* Creating functions"
 	@ psql -d ${EMPTY_DBNAME} -f trusted.sql   | grep : | cat
-	@ psql -d ${EMPTY_DBNAME} -f testfuncs.sql | grep : | cat
 	@ echo "* Installing tsearch2 into ts2 schema"
 	@ ${PYTHON} fti.py -q --setup-only -d ${EMPTY_DBNAME}
 	@ echo "* Loading base database schema"

=== modified file 'database/schema/comments.sql'
--- database/schema/comments.sql	2011-07-08 17:12:15 +0000
+++ database/schema/comments.sql	2011-09-06 07:51:36 +0000
@@ -1129,6 +1129,7 @@
 COMMENT ON COLUMN Distribution.max_bug_heat IS 'The highest heat value across bugs for this distribution.';
 COMMENT ON COLUMN Distribution.bug_reported_acknowledgement IS 'A message of acknowledgement to display to a bug reporter after they\'ve reported a new bug.';
 COMMENT ON COLUMN Distribution.registrant IS 'The person in launchpad who registered this distribution.';
+COMMENT ON COLUMN Distribution.package_derivatives_email IS 'The optional email address template to use when sending emails about package updates in a distributrion. The string {package_name} in the template will be replaced with the actual package name being updated.';
 
 -- DistroSeries
 
@@ -1212,6 +1213,7 @@
 
 -- BinaryPackagePublishingHistory
 COMMENT ON TABLE BinaryPackagePublishingHistory IS 'PackagePublishingHistory: The history of a BinaryPackagePublishing record. This table represents the lifetime of a publishing record from inception to deletion. Records are never removed from here and in time the publishing table may become a view onto this table. A column being NULL indicates there''s no data for that state transition. E.g. a package which is removed without being superseded won''t have datesuperseded or supersededby filled in.';
+COMMENT ON COLUMN BinaryPackagePublishingHistory.binarypackagename IS 'Reference to a BinaryPackageName.';
 COMMENT ON COLUMN BinaryPackagePublishingHistory.binarypackagerelease IS 'The binarypackage being published.';
 COMMENT ON COLUMN BinaryPackagePublishingHistory.distroarchseries IS 'The distroarchseries into which the binarypackage is being published.';
 COMMENT ON COLUMN BinaryPackagePublishingHistory.status IS 'The current status of the publishing.';
@@ -1676,6 +1678,7 @@
 COMMENT ON COLUMN DistroSeries.driver IS 'This is a person or team who can act as a driver for this specific release - note that the distribution drivers can also set goals for any release.';
 COMMENT ON COLUMN DistroSeries.changeslist IS 'The email address (name name) of the changes announcement list for this distroseries. If NULL, no announcement mail will be sent.';
 COMMENT ON COLUMN DistroSeries.defer_translation_imports IS 'Don''t accept PO imports for this release just now.';
+COMMENT ON COLUMN DistroSeries.include_long_descriptions IS 'Include long descriptions in Packages rather than in Translation-en.';
 
 
 -- DistroArchSeries
@@ -1839,6 +1842,7 @@
 -- SourcePackagePublishingHistory
 
 COMMENT ON TABLE SourcePackagePublishingHistory IS 'SourcePackagePublishingHistory: The history of a SourcePackagePublishing record. This table represents the lifetime of a publishing record from inception to deletion. Records are never removed from here and in time the publishing table may become a view onto this table. A column being NULL indicates there''s no data for that state transition. E.g. a package which is removed without being superseded won''t have datesuperseded or supersededby filled in.';
+COMMENT ON COLUMN SourcePackagePublishingHistory.sourcepackagename IS 'Reference to a SourcePackageName.';
 COMMENT ON COLUMN SourcePackagePublishingHistory.sourcepackagerelease IS 'The sourcepackagerelease being published.';
 COMMENT ON COLUMN SourcePackagePublishingHistory.distroseries IS 'The distroseries into which the sourcepackagerelease is being published.';
 COMMENT ON COLUMN SourcePackagePublishingHistory.status IS 'The current status of the publishing.';
@@ -1900,6 +1904,7 @@
 COMMENT ON COLUMN PackagingJob.productseries IS 'The productseries of the Packaging.';
 COMMENT ON COLUMN PackagingJob.sourcepackagename IS 'The sourcepackage of the Packaging.';
 COMMENT ON COLUMN PackagingJob.distroseries IS 'The distroseries of the Packaging.';
+COMMENT ON COLUMN PackagingJob.potemplate IS 'A POTemplate to restrict the job to or NULL if all templates need to be handled.';
 
 -- Translator / TranslationGroup
 

=== modified file 'database/schema/full-update.py'
--- database/schema/full-update.py	2011-07-26 08:37:52 +0000
+++ database/schema/full-update.py	2011-09-06 07:51:36 +0000
@@ -6,7 +6,7 @@
 
 import _pythonpath
 
-import os.path
+from datetime import datetime
 from optparse import OptionParser
 import subprocess
 import sys
@@ -105,16 +105,17 @@
     # work unattended.
     #
 
+    # Confirm we can invoke PGBOUNCER_INITD
+    log.debug("Confirming sudo access to pgbouncer startup script")
+    pgbouncer_rc = run_pgbouncer(log, 'status')
+    if pgbouncer_rc != 0:
+        return pgbouncer_rc
+
     # We initially ignore open connections, as they will shortly be
     # killed.
     if not NoConnectionCheckPreflight(log).check_all():
         return 99
 
-    # Confirm we can invoke PGBOUNCER_INITD
-    pgbouncer_rc = run_pgbouncer(log, 'status')
-    if pgbouncer_rc != 0:
-        return pgbouncer_rc
-
     #
     # Start the actual upgrade. Failures beyond this point need to
     # generate informative messages to help with recovery.
@@ -125,8 +126,11 @@
     upgrade_run = False
     security_run = False
 
+    outage_start = datetime.now()
+
     try:
         # Shutdown pgbouncer
+        log.info("Outage starts. Shutting down pgbouncer.")
         pgbouncer_rc = run_pgbouncer(log, 'stop')
         if pgbouncer_rc != 0:
             log.fatal("pgbouncer not shut down [%s]", pgbouncer_rc)
@@ -136,10 +140,12 @@
         if not KillConnectionsPreflight(log).check_all():
             return 100
 
+        log.info("Preflight check succeeded. Starting upgrade.")
         upgrade_rc = run_upgrade(options, log)
         if upgrade_rc != 0:
             return upgrade_rc
         upgrade_run = True
+        log.info("Database patches applied. Stored procedures updated.")
 
         security_rc = run_security(options, log)
         if security_rc != 0:
@@ -148,11 +154,13 @@
 
         log.info("All database upgrade steps completed")
 
+        log.info("Restarting pgbouncer")
         pgbouncer_rc = run_pgbouncer(log, 'start')
         if pgbouncer_rc != 0:
             log.fatal("pgbouncer not restarted [%s]", pgbouncer_rc)
             return pgbouncer_rc
         pgbouncer_down = False
+        log.info("Outage complete. %s", datetime.now() - outage_start)
 
         # We will start seeing connections as soon as pgbouncer is
         # reenabled, so ignore them here.
@@ -180,6 +188,7 @@
             pgbouncer_rc = run_pgbouncer(log, 'start')
             if pgbouncer_rc == 0:
                 log.info("Despite failures, pgbouncer restarted.")
+                log.info("Outage complete. %s", datetime.now() - outage_start)
             else:
                 log.fatal("pgbouncer is down and refuses to restart")
         if not upgrade_run:

=== added file 'database/schema/patch-2208-76-3.sql'
--- database/schema/patch-2208-76-3.sql	1970-01-01 00:00:00 +0000
+++ database/schema/patch-2208-76-3.sql	2011-09-06 07:51:36 +0000
@@ -0,0 +1,15 @@
+-- Copyright 2011 Canonical Ltd.  This software is licensed under the
+-- GNU Affero General Public License version 3 (see the file LICENSE).
+
+SET client_min_messages = ERROR;
+
+-- Drop old unused functions still lurking on production.
+DROP FUNCTION IF EXISTS is_blacklisted_name(text);
+DROP FUNCTION IF EXISTS name_blacklist_match(text);
+DROP FUNCTION IF EXISTS reverse(text);
+DROP FUNCTION IF EXISTS bug_summary_temp_journal_clean_row(bugsummary);
+DROP FUNCTION IF EXISTS valid_version(text);
+DROP FUNCTION IF EXISTS decendantrevision(integer);
+DROP FUNCTION IF EXISTS sleep_for_testing(float);
+
+INSERT INTO LaunchpadDatabaseRevision VALUES (2208, 76, 3);

=== added file 'database/schema/patch-2208-76-4.sql'
--- database/schema/patch-2208-76-4.sql	1970-01-01 00:00:00 +0000
+++ database/schema/patch-2208-76-4.sql	2011-09-06 07:51:36 +0000
@@ -0,0 +1,159 @@
+-- Copyright 2011 Canonical Ltd.  This software is licensed under the
+-- GNU Affero General Public License version 3 (see the file LICENSE).
+
+SET client_min_messages=ERROR;
+
+DROP FUNCTION bugsummary_rollup_journal();
+
+CREATE OR REPLACE FUNCTION bugsummary_rollup_journal(batchsize integer=NULL)
+RETURNS VOID
+LANGUAGE plpgsql VOLATILE
+CALLED ON NULL INPUT
+SECURITY DEFINER SET search_path TO public AS
+$$
+DECLARE
+    d bugsummary%ROWTYPE;
+    max_id integer;
+BEGIN
+    -- Lock so we don't content with other invokations of this
+    -- function. We can happily lock the BugSummary table for writes
+    -- as this function is the only thing that updates that table.
+    -- BugSummaryJournal remains unlocked so nothing should be blocked.
+    LOCK TABLE BugSummary IN ROW EXCLUSIVE MODE;
+
+    IF batchsize IS NULL THEN
+        SELECT MAX(id) INTO max_id FROM BugSummaryJournal;
+    ELSE
+        SELECT MAX(id) INTO max_id FROM (
+            SELECT id FROM BugSummaryJournal ORDER BY id LIMIT batchsize
+            ) AS Whatever;
+    END IF;
+
+    FOR d IN
+        SELECT
+            NULL as id,
+            SUM(count),
+            product,
+            productseries,
+            distribution,
+            distroseries,
+            sourcepackagename,
+            viewed_by,
+            tag,
+            status,
+            milestone,
+            importance,
+            has_patch,
+            fixed_upstream
+        FROM BugSummaryJournal
+        WHERE id <= max_id
+        GROUP BY
+            product, productseries, distribution, distroseries,
+            sourcepackagename, viewed_by, tag, status, milestone,
+            importance, has_patch, fixed_upstream
+        HAVING sum(count) <> 0
+    LOOP
+        IF d.count < 0 THEN
+            PERFORM bug_summary_dec(d);
+        ELSIF d.count > 0 THEN
+            PERFORM bug_summary_inc(d);
+        END IF;
+    END LOOP;
+
+    -- Clean out any counts we reduced to 0.
+    DELETE FROM BugSummary WHERE count=0;
+    -- Clean out the journal entries we have handled.
+    DELETE FROM BugSummaryJournal WHERE id <= max_id;
+END;
+$$;
+
+COMMENT ON FUNCTION bugsummary_rollup_journal(integer) IS
+'Collate and migrate rows from BugSummaryJournal to BugSummary';
+
+
+CREATE OR REPLACE FUNCTION bug_summary_dec(bugsummary) RETURNS VOID
+LANGUAGE SQL AS
+$$
+    -- We own the row reference, so in the absence of bugs this cannot
+    -- fail - just decrement the row.
+    UPDATE BugSummary SET count = count + $1.count
+    WHERE
+        ((product IS NULL AND $1.product IS NULL)
+            OR product = $1.product)
+        AND ((productseries IS NULL AND $1.productseries IS NULL)
+            OR productseries = $1.productseries)
+        AND ((distribution IS NULL AND $1.distribution IS NULL)
+            OR distribution = $1.distribution)
+        AND ((distroseries IS NULL AND $1.distroseries IS NULL)
+            OR distroseries = $1.distroseries)
+        AND ((sourcepackagename IS NULL AND $1.sourcepackagename IS NULL)
+            OR sourcepackagename = $1.sourcepackagename)
+        AND ((viewed_by IS NULL AND $1.viewed_by IS NULL)
+            OR viewed_by = $1.viewed_by)
+        AND ((tag IS NULL AND $1.tag IS NULL)
+            OR tag = $1.tag)
+        AND status = $1.status
+        AND ((milestone IS NULL AND $1.milestone IS NULL)
+            OR milestone = $1.milestone)
+        AND importance = $1.importance
+        AND has_patch = $1.has_patch
+        AND fixed_upstream = $1.fixed_upstream;
+$$;
+
+CREATE OR REPLACE FUNCTION bug_summary_inc(d bugsummary) RETURNS VOID
+LANGUAGE plpgsql AS
+$$
+BEGIN
+    -- Shameless adaption from postgresql manual
+    LOOP
+        -- first try to update the row
+        UPDATE BugSummary SET count = count + d.count
+        WHERE
+            ((product IS NULL AND $1.product IS NULL)
+                OR product = $1.product)
+            AND ((productseries IS NULL AND $1.productseries IS NULL)
+                OR productseries = $1.productseries)
+            AND ((distribution IS NULL AND $1.distribution IS NULL)
+                OR distribution = $1.distribution)
+            AND ((distroseries IS NULL AND $1.distroseries IS NULL)
+                OR distroseries = $1.distroseries)
+            AND ((sourcepackagename IS NULL AND $1.sourcepackagename IS NULL)
+                OR sourcepackagename = $1.sourcepackagename)
+            AND ((viewed_by IS NULL AND $1.viewed_by IS NULL)
+                OR viewed_by = $1.viewed_by)
+            AND ((tag IS NULL AND $1.tag IS NULL)
+                OR tag = $1.tag)
+            AND status = $1.status
+            AND ((milestone IS NULL AND $1.milestone IS NULL)
+                OR milestone = $1.milestone)
+            AND importance = $1.importance
+            AND has_patch = $1.has_patch
+            AND fixed_upstream = $1.fixed_upstream;
+        IF found THEN
+            RETURN;
+        END IF;
+        -- not there, so try to insert the key
+        -- if someone else inserts the same key concurrently,
+        -- we could get a unique-key failure
+        BEGIN
+            INSERT INTO BugSummary(
+                count, product, productseries, distribution,
+                distroseries, sourcepackagename, viewed_by, tag,
+                status, milestone,
+                importance, has_patch, fixed_upstream)
+            VALUES (
+                d.count, d.product, d.productseries, d.distribution,
+                d.distroseries, d.sourcepackagename, d.viewed_by, d.tag,
+                d.status, d.milestone,
+                d.importance, d.has_patch, d.fixed_upstream);
+            RETURN;
+        EXCEPTION WHEN unique_violation THEN
+            -- do nothing, and loop to try the UPDATE again
+        END;
+    END LOOP;
+END;
+$$;
+
+
+
+INSERT INTO LaunchpadDatabaseRevision VALUES (2208, 76, 4);

=== added file 'database/schema/patch-2208-78-1.sql'
--- database/schema/patch-2208-78-1.sql	1970-01-01 00:00:00 +0000
+++ database/schema/patch-2208-78-1.sql	2011-09-06 07:51:36 +0000
@@ -0,0 +1,8 @@
+-- Copyright 2011 Canonical Ltd.  This software is licensed under the
+-- GNU Affero General Public License version 3 (see the file LICENSE).
+
+SET client_min_messages=ERROR;
+
+ALTER TABLE BugMessage ALTER COLUMN owner SET NOT NULL;
+
+INSERT INTO LaunchpadDatabaseRevision VALUES (2208, 78, 1);

=== added file 'database/schema/patch-2208-78-2.sql'
--- database/schema/patch-2208-78-2.sql	1970-01-01 00:00:00 +0000
+++ database/schema/patch-2208-78-2.sql	2011-09-06 07:51:36 +0000
@@ -0,0 +1,5 @@
+SET client_min_messages = ERROR;
+
+DROP INDEX IF EXISTS temp_previewdiff;
+
+INSERT INTO LaunchpadDatabaseRevision VALUES (2208, 78, 2);

=== added file 'database/schema/patch-2208-79-0.sql'
--- database/schema/patch-2208-79-0.sql	1970-01-01 00:00:00 +0000
+++ database/schema/patch-2208-79-0.sql	2011-09-06 07:51:36 +0000
@@ -0,0 +1,9 @@
+-- Copyright 2011 Canonical Ltd.  This software is licensed under the
+-- GNU Affero General Public License version 3 (see the file LICENSE).
+
+SET client_min_messages=ERROR;
+
+ALTER TABLE distroseries
+    ADD COLUMN include_long_descriptions BOOLEAN NOT NULL DEFAULT TRUE;
+
+INSERT INTO LaunchpadDatabaseRevision VALUES (2208, 79, 0);

=== added file 'database/schema/patch-2208-79-1.sql'
--- database/schema/patch-2208-79-1.sql	1970-01-01 00:00:00 +0000
+++ database/schema/patch-2208-79-1.sql	2011-09-06 07:51:36 +0000
@@ -0,0 +1,26 @@
+-- Copyright 2011 Canonical Ltd.  This software is licensed under the
+-- GNU Affero General Public License version 3 (see the file LICENSE).
+SET client_min_messages=ERROR;
+
+ALTER TABLE PackagingJob
+  ADD COLUMN
+    potemplate INTEGER DEFAULT NULL
+      CONSTRAINT potemplate_fk REFERENCES POTemplate;
+
+ALTER TABLE PackagingJob
+  ALTER COLUMN productseries DROP NOT NULL,
+  ALTER COLUMN distroseries DROP NOT NULL,
+  ALTER COLUMN sourcepackagename DROP NOT NULL,
+  ADD CONSTRAINT translationtemplatejob_valid_link CHECK (
+    -- If there is a template, it is the template being moved.
+    (potemplate IS NOT NULL AND productseries IS NULL AND
+     distroseries IS NULL AND sourcepackagename IS NULL) OR
+    -- If there is no template, we need all of productseries, distroseries
+    -- and sourcepackagename because we are moving translations between
+    -- a productseries and a source package.
+    (potemplate IS NULL AND productseries IS NOT NULL AND
+     distroseries IS NOT NULL AND sourcepackagename IS NOT NULL));
+
+CREATE INDEX packagingjob__potemplate__idx ON PackagingJob (potemplate);
+
+INSERT INTO LaunchpadDatabaseRevision VALUES (2208, 79, 1);

=== added file 'database/schema/patch-2208-80-1.sql'
--- database/schema/patch-2208-80-1.sql	1970-01-01 00:00:00 +0000
+++ database/schema/patch-2208-80-1.sql	2011-09-06 07:51:36 +0000
@@ -0,0 +1,12 @@
+-- Copyright 2011 Canonical Ltd.  This software is licensed under the
+-- GNU Affero General Public License version 3 (see the file LICENSE).
+
+SET client_min_messages=ERROR;
+
+ALTER TABLE distribution
+    ADD COLUMN package_derivatives_email TEXT;
+UPDATE distribution
+    SET package_derivatives_email = '{package_name}_derivatives@xxxxxxxxxxxxxxxxxxxxxx'
+    WHERE name='ubuntu';
+
+INSERT INTO LaunchpadDatabaseRevision VALUES (2208, 80, 1);

=== added file 'database/schema/patch-2208-81-1.sql'
--- database/schema/patch-2208-81-1.sql	1970-01-01 00:00:00 +0000
+++ database/schema/patch-2208-81-1.sql	2011-09-06 07:51:36 +0000
@@ -0,0 +1,9 @@
+-- Copyright 2011 Canonical Ltd.  This software is licensed under the
+-- GNU Affero General Public License version 3 (see the file LICENSE).
+SET client_min_messages=ERROR;
+
+ALTER TABLE SourcePackagePublishingHistory ADD COLUMN sourcepackagename INTEGER REFERENCES SourcePackageName;
+ALTER TABLE BinaryPackagePublishingHistory ADD COLUMN binarypackagename INTEGER REFERENCES BinaryPackageName;
+
+INSERT INTO LaunchpadDatabaseRevision VALUES (2208, 81, 1);
+

=== modified file 'database/schema/preflight.py'
--- database/schema/preflight.py	2011-07-25 13:59:01 +0000
+++ database/schema/preflight.py	2011-09-06 07:51:36 +0000
@@ -15,7 +15,7 @@
 
 from datetime import timedelta
 from optparse import OptionParser
-import sys
+import time
 
 import psycopg2
 
@@ -226,7 +226,7 @@
         cluster to be quiescent.
         """
         if self.is_replicated:
-            success = replication.helpers.sync(30)
+            success = replication.helpers.sync(30, exit_on_fail=False)
             if success:
                 self.log.info(
                     "Replication events are being propagated.")
@@ -282,21 +282,40 @@
 
         System users are defined by SYSTEM_USERS.
         """
-        for node in self.lpmain_nodes:
-            cur = node.con.cursor()
-            cur.execute("""
-                SELECT
-                    procpid, datname, usename, pg_terminate_backend(procpid)
-                FROM pg_stat_activity
-                WHERE
-                    datname=current_database()
-                    AND procpid <> pg_backend_pid()
-                    AND usename NOT IN %s
-                """ % sqlvalues(SYSTEM_USERS))
-            for procpid, datname, usename, ignored in cur.fetchall():
-                self.log.warning(
-                    "Killed %s [%s] on %s", usename, procpid, datname)
-        return True
+        # We keep trying to terminate connections every 0.5 seconds for
+        # up to 10 seconds.
+        num_tries = 20
+        seconds_to_pause = 0.5
+        for loop_count in range(num_tries):
+            all_clear = True
+            for node in self.lpmain_nodes:
+                cur = node.con.cursor()
+                cur.execute("""
+                    SELECT
+                        procpid, datname, usename,
+                        pg_terminate_backend(procpid)
+                    FROM pg_stat_activity
+                    WHERE
+                        datname=current_database()
+                        AND procpid <> pg_backend_pid()
+                        AND usename NOT IN %s
+                    """ % sqlvalues(SYSTEM_USERS))
+                for procpid, datname, usename, ignored in cur.fetchall():
+                    all_clear = False
+                    if loop_count == num_tries - 1:
+                        self.log.fatal(
+                            "Unable to kill %s [%s] on %s",
+                            usename, procpid, datname)
+                    else:
+                        self.log.warning(
+                            "Killed %s [%s] on %s", usename, procpid, datname)
+            if all_clear:
+                break
+
+            # Wait a little for any terminated connections to actually
+            # terminate.
+            time.sleep(seconds_to_pause)
+        return all_clear
 
 
 def main():
@@ -337,4 +356,4 @@
 
 
 if __name__ == '__main__':
-    sys.exit(main())
+    raise SystemExit(main())

=== modified file 'database/schema/security.cfg'
--- database/schema/security.cfg	2011-08-31 16:12:38 +0000
+++ database/schema/security.cfg	2011-09-06 07:51:36 +0000
@@ -11,7 +11,6 @@
 
 [public]
 type=group
-public._killall_backends(text)             =
 public.activity()                          = EXECUTE
 public.add_test_openid_identifier(integer) = EXECUTE
 public.alllocks                            =
@@ -144,7 +143,7 @@
 public.bugnotificationrecipientarchive  = SELECT, UPDATE
 public.bugsummary                       = SELECT
 public.bugsummaryjournal                = SELECT
-public.bugsummary_rollup_journal()      = EXECUTE
+public.bugsummary_rollup_journal(integer) = EXECUTE
 public.bugtag                           = SELECT, INSERT, DELETE
 public.bugtrackercomponent              = SELECT, INSERT, UPDATE
 public.bugtrackercomponentgroup         = SELECT, INSERT, UPDATE
@@ -2173,7 +2172,7 @@
 public.bugsubscriptionfilterimportance  = SELECT
 public.bugsubscriptionfilterstatus      = SELECT
 public.bugsubscriptionfiltertag         = SELECT
-public.bugsummary_rollup_journal()      = EXECUTE
+public.bugsummary_rollup_journal(integer) = EXECUTE
 public.bugtag                           = SELECT
 public.bugwatch                         = SELECT, UPDATE
 public.bugwatchactivity                 = SELECT, DELETE
@@ -2294,3 +2293,11 @@
 public.potemplate                       = SELECT
 public.sourcepackagename                = SELECT
 type=user
+
+[generate_contents_files]
+type=user
+groups=archivepublisher
+
+[publish_ftpmaster]
+type=user
+groups=archivepublisher

=== modified file 'database/schema/security.py'
--- database/schema/security.py	2011-07-25 14:10:46 +0000
+++ database/schema/security.py	2011-09-06 07:51:36 +0000
@@ -26,22 +26,101 @@
 # The 'read' group does not get given select permission on the following
 # tables. This is to stop the ro user being given access to secrurity
 # sensitive information that interactive sessions don't need.
-SECURE_TABLES = [
+SECURE_TABLES = set((
     'public.accountpassword',
+    'public.accountpassword_id_seq',
     'public.oauthnonce',
+    'public.oauthnonce_id_seq',
     'public.openidnonce',
+    'public.openidnonce_id_seq',
     'public.openidconsumernonce',
-    ]
+    'public.openidconsumernonce_id_seq',
+    ))
+
+POSTGRES_ACL_MAP = {
+    'r': 'SELECT',
+    'w': 'UPDATE',
+    'a': 'INSERT',
+    'd': 'DELETE',
+    'D': 'TRUNCATE',
+    'x': 'REFERENCES',
+    't': 'TRIGGER',
+    'X': 'EXECUTE',
+    'U': 'USAGE',
+    'C': 'CREATE',
+    'c': 'CONNECT',
+    'T': 'TEMPORARY',
+    }
+
+
+def _split_postgres_aclitem(aclitem):
+    """Split a PostgreSQL aclitem textual representation.
+
+    Returns the (grantee, privs, grantor), unquoted and separated.
+    """
+    components = {'grantee': '', 'privs': '', 'grantor': ''}
+    current_component = 'grantee'
+    inside_quoted = False
+    maybe_finished_quoted = False
+    for char in aclitem:
+        if inside_quoted:
+            if maybe_finished_quoted:
+                maybe_finished_quoted = False
+                if char == '"':
+                    components[current_component] += '"'
+                    continue
+                else:
+                    inside_quoted = False
+            elif char == '"':
+                maybe_finished_quoted = True
+                continue
+        # inside_quoted may have just been made False, so no else block
+        # for you.
+        if not inside_quoted:
+            if char == '"':
+                inside_quoted = True
+                continue
+            elif char == '=':
+                current_component = 'privs'
+                continue
+            elif char == '/':
+                current_component = 'grantor'
+                continue
+        components[current_component] += char
+    return components['grantee'], components['privs'], components['grantor']
+
+
+def parse_postgres_acl(acl):
+    """Parse a PostgreSQL object ACL into a dict with permission names.
+
+    The dict is of the form {user: {permission: grant option}}.
+    """
+    parsed = {}
+    if acl is None:
+        return parsed
+    for entry in acl:
+        grantee, privs, grantor = _split_postgres_aclitem(entry)
+        if grantee == '':
+            grantee = 'public'
+        parsed_privs = []
+        for priv in privs:
+            if priv == '*':
+                parsed_privs[-1] = (parsed_privs[-1][0], True)
+                continue
+            parsed_privs.append((POSTGRES_ACL_MAP[priv], False))
+        parsed[grantee] = dict(parsed_privs)
+    return parsed
 
 
 class DbObject(object):
 
     def __init__(
-            self, schema, name, type_, owner, arguments=None, language=None):
+        self, schema, name, type_, owner, acl, arguments=None, language=None):
         self.schema = schema
         self.name = name
         self.type = type_
         self.owner = owner
+        self.acl = acl
         self.arguments = arguments
         self.language = language
 
@@ -80,7 +159,8 @@
                     WHEN 'S' THEN 'sequence'
                     WHEN 's' THEN 'special'
                 END as "Type",
-                u.usename as "Owner"
+                u.usename as "Owner",
+                c.relacl::text[] as "ACL"
             FROM pg_catalog.pg_class c
                 LEFT JOIN pg_catalog.pg_user u ON u.usesysid = c.relowner
                 LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
@@ -89,9 +169,10 @@
                 AND pg_catalog.pg_table_is_visible(c.oid)
             ORDER BY 1,2
             ''')
-        for schema, name, type_, owner in cur.fetchall():
+        for schema, name, type_, owner, acl in cur.fetchall():
             key = '%s.%s' % (schema, name)
-            self[key] = DbObject(schema, name, type_, owner)
+            self[key] = DbObject(
+                schema, name, type_, owner, parse_postgres_acl(acl))
 
         cur.execute(r"""
             SELECT
@@ -99,6 +180,7 @@
                 p.proname as "name",
                 pg_catalog.oidvectortypes(p.proargtypes) as "Argument types",
                 u.usename as "owner",
+                p.proacl::text[] as "acl",
                 l.lanname as "language"
             FROM pg_catalog.pg_proc p
                 LEFT JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace
@@ -110,9 +192,10 @@
                 AND pg_catalog.pg_function_is_visible(p.oid)
                 AND n.nspname <> 'pg_catalog'
                 """)
-        for schema, name, arguments, owner, language in cur.fetchall():
+        for schema, name, arguments, owner, acl, language in cur.fetchall():
             self['%s.%s(%s)' % (schema, name, arguments)] = DbObject(
-                    schema, name, 'function', owner, arguments, language)
+                    schema, name, 'function', owner, parse_postgres_acl(acl),
+                    arguments, language)
         # Pull a list of groups
         cur.execute("SELECT groname FROM pg_group")
         self.groups = [r[0] for r in cur.fetchall()]
@@ -134,10 +217,10 @@
     def execute(self, cmd, params=None):
         cmd = cmd.encode('utf8')
         if params is None:
-            log.debug2('%s' % (cmd, ))
+            log.debug3('%s' % (cmd, ))
             return self.__dict__['_cursor'].execute(cmd)
         else:
-            log.debug2('%s [%r]' % (cmd, params))
+            log.debug3('%s [%r]' % (cmd, params))
             return self.__dict__['_cursor'].execute(cmd, params)
 
     def __getattr__(self, key):
@@ -206,7 +289,7 @@
         self.entity_keyword = entity_keyword
         self.permissions = defaultdict(dict)
 
-    def add(self, permission, entity, principal, is_group=False):
+    def add(self, permission, entity, principal):
         """Add a permission.
 
         Add all privileges you want to grant or revoke first, then use
@@ -217,14 +300,8 @@
             or revoke a privilege.
         :param principal: User or group to which the privilege should
             apply.
-        :param is_group: Is `principal` a group?
         """
-        if is_group:
-            full_principal = "GROUP " + principal
-        else:
-            full_principal = principal
-        self.permissions[permission].setdefault(entity, set()).add(
-            full_principal)
+        self.permissions[permission].setdefault(principal, set()).add(entity)
 
     def tabulate(self):
         """Group privileges into single-statement work items.
@@ -239,9 +316,9 @@
         """
         result = []
         for permission, parties in self.permissions.iteritems():
-            for entity, principals in parties.iteritems():
+            for principal, entities in parties.iteritems():
                 result.append(
-                    (permission, entity, ", ".join(principals)))
+                    (permission, ", ".join(entities), principal))
         return result
 
     def countPermissions(self):
@@ -250,17 +327,17 @@
 
     def countEntities(self):
         """Count the number of different entities."""
-        return len(set(sum([
-            entities.keys()
-            for entities in self.permissions.itervalues()], [])))
+        entities = set()
+        for entities_and_entities in self.permissions.itervalues():
+            for extra_entities in entities_and_entities.itervalues():
+                entities.update(extra_entities)
+        return len(entities)
 
     def countPrincipals(self):
         """Count the number of different principals."""
-        principals = set()
-        for entities_and_principals in self.permissions.itervalues():
-            for extra_principals in entities_and_principals.itervalues():
-                principals.update(extra_principals)
-        return len(principals)
+        return len(set(sum([
+            principals.keys()
+            for principals in self.permissions.itervalues()], [])))
 
     def grant(self, cur):
         """Grant all gathered permissions.
@@ -303,6 +380,30 @@
         log.debug("Issued %d REVOKE statement(s).", revoke_count)
 
 
+def alter_permissions(cur, which, revoke=False):
+    """Efficiently apply a set of permission changes.
+
+    :param cur: a database cursor
+    :param which: an iterable of (object, role, permissions)
+    :param revoke: whether to revoke or grant permissions
+    """
+    gatherers = {
+        'table': PermissionGatherer("TABLE"),
+        'function': PermissionGatherer("FUNCTION"),
+        'sequence': PermissionGatherer("SEQUENCE"),
+        }
+
+    for obj, role, perms in which:
+        gatherers.get(obj.type, gatherers['table']).add(
+            ', '.join(perms), obj.fullname, quote_identifier(role))
+
+    for gatherer in gatherers.values():
+        if revoke:
+            gatherer.revoke(cur)
+        else:
+            gatherer.grant(cur)
+
+
 def reset_permissions(con, config, options):
     schema = DbSchema(con)
     all_users = list_identifiers(schema.users)
@@ -342,14 +443,14 @@
             if username in schema.principals:
                 if type_ == 'group':
                     if options.revoke:
-                        log.debug("Revoking membership of %s role", username)
+                        log.debug2("Revoking membership of %s role", username)
                         cur.execute("REVOKE %s FROM %s" % (
                             quote_identifier(username), all_users))
                 else:
                     # Note - we don't drop the user because it might own
                     # objects in other databases. We need to ensure they are
                     # not superusers though!
-                    log.debug("Resetting role options of %s role.", username)
+                    log.debug2("Resetting role options of %s role.", username)
                     cur.execute(
                         "ALTER ROLE %s WITH %s" % (
                             quote_identifier(username),
@@ -380,12 +481,12 @@
         if user.endswith('_ro'):
             groups = ['%s_ro' % group for group in groups]
         if groups:
-            log.debug("Adding %s to %s roles", user, ', '.join(groups))
+            log.debug2("Adding %s to %s roles", user, ', '.join(groups))
             for group in groups:
                 cur.execute(r"""ALTER GROUP %s ADD USER %s""" % (
                     quote_identifier(group), quote_identifier(user)))
         else:
-            log.debug("%s not in any roles", user)
+            log.debug2("%s not in any roles", user)
 
     if options.revoke:
         # Change ownership of all objects to OWNER.
@@ -399,40 +500,14 @@
                     log.info("Resetting ownership of %s", obj.fullname)
                     cur.execute("ALTER TABLE %s OWNER TO %s" % (
                         obj.fullname, quote_identifier(options.owner)))
-
-        # Revoke all privs from known groups. Don't revoke anything for
-        # users or groups not defined in our security.cfg.
-        table_revocations = PermissionGatherer("TABLE")
-        function_revocations = PermissionGatherer("FUNCTION")
-        sequence_revocations = PermissionGatherer("SEQUENCE")
-
-        # Gather all revocations.
-        for section_name in config.sections():
-            role = quote_identifier(section_name)
-            if section_name == 'public':
-                ro_role = None
-            else:
-                ro_role = quote_identifier(section_name + "_ro")
-
-            for obj in schema.values():
-                if obj.type == 'function':
-                    gatherer = function_revocations
-                else:
-                    gatherer = table_revocations
-
-                gatherer.add("ALL", obj.fullname, role)
-
-                if obj.seqname in schema:
-                    sequence_revocations.add("ALL", obj.seqname, role)
-                    if ro_role is not None:
-                        sequence_revocations.add("ALL", obj.seqname, ro_role)
-
-        table_revocations.revoke(cur)
-        function_revocations.revoke(cur)
-        sequence_revocations.revoke(cur)
     else:
         log.info("Not resetting ownership of database objects")
-        log.info("Not revoking permissions on database objects")
+
+    managed_roles = set(['read', 'admin'])
+    for section_name in config.sections():
+        managed_roles.add(section_name)
+        if section_name != 'public':
+            managed_roles.add(section_name + "_ro")
 
     # Set of all tables we have granted permissions on. After we have assigned
     # permissions, we can use this to determine what tables have been
@@ -440,16 +515,26 @@
     found = set()
 
     # Set permissions as per config file
-
-    table_permissions = PermissionGatherer("TABLE")
-    function_permissions = PermissionGatherer("FUNCTION")
-    sequence_permissions = PermissionGatherer("SEQUENCE")
+    desired_permissions = defaultdict(lambda: defaultdict(set))
+
+    valid_objs = set(schema.iterkeys())
+
+    # Any object with permissions granted is accessible to the 'read'
+    # role. Some (eg. the lp_* replicated tables and internal or trigger
+    # functions) aren't readable.
+    granted_objs = set()
 
     for username in config.sections():
+        who = username
+        if username == 'public':
+            who_ro = who
+        else:
+            who_ro = '%s_ro' % username
+
         for obj_name, perm in config.items(username):
             if '.' not in obj_name:
                 continue
-            if obj_name not in schema.keys():
+            if obj_name not in valid_objs:
                 log.warn('Bad object name %r', obj_name)
                 continue
             obj = schema[obj_name]
@@ -461,46 +546,37 @@
                 # No perm means no rights. We can't grant no rights, so skip.
                 continue
 
-            who = quote_identifier(username)
-            if username == 'public':
-                who_ro = who
-            else:
-                who_ro = quote_identifier('%s_ro' % username)
+            granted_objs.add(obj)
 
-            log.debug(
-                "Granting %s on %s to %s", perm, obj.fullname, who)
             if obj.type == 'function':
-                function_permissions.add(perm, obj.fullname, who)
-                function_permissions.add("EXECUTE", obj.fullname, who_ro)
-                function_permissions.add(
-                    "EXECUTE", obj.fullname, "read", is_group=True)
-                function_permissions.add(
-                    "ALL", obj.fullname, "admin", is_group=True)
+                desired_permissions[obj][who].update(perm.split(', '))
+                if who_ro:
+                    desired_permissions[obj][who_ro].add("EXECUTE")
             else:
-                table_permissions.add(
-                    "ALL", obj.fullname, "admin", is_group=True)
-                table_permissions.add(perm, obj.fullname, who)
-                table_permissions.add("SELECT", obj.fullname, who_ro)
-                is_secure = (obj.fullname in SECURE_TABLES)
-                if not is_secure:
-                    table_permissions.add(
-                        "SELECT", obj.fullname, "read", is_group=True)
-                if obj.seqname in schema:
+                desired_permissions[obj][who].update(perm.split(', '))
+                if who_ro:
+                    desired_permissions[obj][who_ro].add("SELECT")
+                if obj.seqname in valid_objs:
+                    seq = schema[obj.seqname]
+                    granted_objs.add(seq)
                     if 'INSERT' in perm:
                         seqperm = 'USAGE'
                     elif 'SELECT' in perm:
                         seqperm = 'SELECT'
-                    sequence_permissions.add(seqperm, obj.seqname, who)
-                    if not is_secure:
-                        sequence_permissions.add(
-                            "SELECT", obj.seqname, "read", is_group=True)
-                    sequence_permissions.add("SELECT", obj.seqname, who_ro)
-                    sequence_permissions.add(
-                        "ALL", obj.seqname, "admin", is_group=True)
+                    else:
+                        seqperm = None
+                    if seqperm:
+                        desired_permissions[seq][who].add(seqperm)
+                    desired_permissions[seq][who_ro].add("SELECT")
 
-    function_permissions.grant(cur)
-    table_permissions.grant(cur)
-    sequence_permissions.grant(cur)
+    # read gets read access to all non-secure objects that we've granted
+    # anybody access to.
+    for obj in granted_objs:
+        if obj.type == 'function':
+            desired_permissions[obj]['read'].add("EXECUTE")
+        else:
+            if obj.fullname not in SECURE_TABLES:
+                desired_permissions[obj]['read'].add("SELECT")
 
     # Set permissions on public schemas
     public_schemas = [
@@ -516,10 +592,56 @@
             continue
         found.add(obj)
         if obj.type == 'function':
-            cur.execute('GRANT EXECUTE ON FUNCTION %s TO PUBLIC' %
-                        obj.fullname)
-        else:
-            cur.execute('GRANT SELECT ON TABLE %s TO PUBLIC' % obj.fullname)
+            desired_permissions[obj]['public'].add('EXECUTE')
+        else:
+            desired_permissions[obj]['public'].add('SELECT')
+
+    # For every object in the DB, ensure that the privileges held by our
+    # managed roles match our expectations. If not, store the delta
+    # to be applied later.
+    # Also grants/revokes access by the admin role, which isn't a
+    # traditionally managed role.
+    unmanaged_roles = set()
+    required_grants = []
+    required_revokes = []
+    for obj in schema.values():
+        # We only care about roles that are in either the desired or
+        # existing ACL, and are also our managed roles. But skip admin,
+        # because it's done at the end.
+        interesting_roles = set(desired_permissions[obj]).union(obj.acl)
+        unmanaged_roles.update(interesting_roles.difference(managed_roles))
+        for role in managed_roles.intersection(interesting_roles):
+            if role == 'admin':
+                continue
+            new = desired_permissions[obj][role]
+            old_privs = obj.acl.get(role, {})
+            old = set(old_privs)
+            if any(old_privs.itervalues()):
+                log.warning("%s has grant option on %s", role, obj.fullname)
+            if new == old:
+                continue
+            missing = new.difference(old)
+            extra = old.difference(new)
+            if missing:
+                required_grants.append((obj, role, missing))
+            if extra:
+                required_revokes.append((obj, role, extra))
+
+        # admin get all privileges on anything with privileges granted
+        # in security.cfg. We don't have a mapping from ALL to real
+        # privileges for each object type, so we just grant or revoke ALL
+        # each time.
+        if obj in granted_objs:
+            required_grants.append((obj, "admin", ("ALL",)))
+        else:
+            if "admin" in obj.acl:
+                required_revokes.append((obj, "admin", ("ALL",)))
+
+    log.debug("Unmanaged roles on managed objects: %r", list(unmanaged_roles))
+
+    alter_permissions(cur, required_grants)
+    if options.revoke:
+        alter_permissions(cur, required_revokes, revoke=True)
 
     # Raise an error if we have database objects lying around that have not
     # had permissions assigned.

=== removed file 'database/schema/testfuncs.sql'
--- database/schema/testfuncs.sql	2009-06-24 21:17:33 +0000
+++ database/schema/testfuncs.sql	1970-01-01 00:00:00 +0000
@@ -1,29 +0,0 @@
-/*
-Copyright 2009 Canonical Ltd.  This software is licensed under the
-GNU Affero General Public License version 3 (see the file LICENSE).
-
-Stored procedures designed for use only by the test suite. These
-will not be loaded onto the production database
-*/
-
-CREATE OR REPLACE FUNCTION _killall_backends(text)
-RETURNS Boolean AS $$
-    import os
-    from signal import SIGTERM
-
-    plan = plpy.prepare(
-        "SELECT procpid FROM pg_stat_activity WHERE datname=$1", ['text']
-        )
-    success = True
-    for row in plpy.execute(plan, args):
-        try:
-            plpy.info("Killing %d" % row['procpid'])
-            os.kill(row['procpid'], SIGTERM)
-        except OSError:
-            success = False
-
-    return success
-$$ LANGUAGE plpythonu;
-
-COMMENT ON FUNCTION _killall_backends(text) IS 'Kill all backend processes connected to the given database. Note that this is unlikely to work if you are connected to the database you are killing, as you are likely to kill your own connection before all the others have been killed.';
-

=== modified file 'database/schema/trusted.sql'
--- database/schema/trusted.sql	2011-06-13 08:39:28 +0000
+++ database/schema/trusted.sql	2011-09-06 07:51:36 +0000
@@ -1854,11 +1854,11 @@
         AFFECTED_USER = 4
         SUBSCRIBER = 2
 
-
     def get_max_heat_for_bug(bug_id):
         results = plpy.execute("""
             SELECT MAX(
-                GREATEST(Product.max_bug_heat, Distribution.max_bug_heat))
+                GREATEST(Product.max_bug_heat,
+                         DistributionSourcePackage.max_bug_heat))
                     AS max_heat
             FROM BugTask
             LEFT OUTER JOIN ProductSeries ON
@@ -1871,6 +1871,9 @@
             LEFT OUTER JOIN Distribution ON (
                 BugTask.distribution = Distribution.id
                 OR DistroSeries.distribution = Distribution.id)
+            LEFT OUTER JOIN DistributionSourcePackage ON (
+                BugTask.sourcepackagename =
+                    DistributionSourcePackage.sourcepackagename)
             WHERE
                 BugTask.bug = %s""" % bug_id)
 

=== modified file 'database/schema/upgrade.py'
--- database/schema/upgrade.py	2011-09-02 23:17:37 +0000
+++ database/schema/upgrade.py	2011-09-06 07:51:36 +0000
@@ -178,7 +178,7 @@
     # The first script applies the DB patches to all nodes.
 
     # First make sure the cluster is synced.
-    log.info("Waiting for cluster to sync.")
+    log.info("Waiting for cluster to sync, pre-update.")
     replication.helpers.sync(timeout=600)
 
     outf = StringIO()
@@ -186,18 +186,15 @@
     # Start a transaction block.
     print >> outf, "try {"
 
+    sql_to_run = []
+
     def run_sql(script):
         if os.path.isabs(script):
             full_path = script
         else:
             full_path = os.path.abspath(os.path.join(SCHEMA_DIR, script))
         assert os.path.exists(full_path), "%s doesn't exist." % full_path
-        print >> outf, dedent("""\
-            execute script (
-                set id = @lpmain_set, event node = @master_node,
-                filename='%s'
-                );
-            """ % full_path)
+        sql_to_run.append(full_path)
 
     # We are going to generate some temporary files using
     # NamedTempoararyFile. Store them here so we can control when
@@ -236,6 +233,22 @@
     combined_script.flush()
     run_sql(combined_script.name)
 
+    # Now combine all the written SQL (probably trusted.sql and
+    # patch*.sql) into one big file, which we execute with a single
+    # slonik execute_script statement to avoid multiple syncs.
+    single = NamedTemporaryFile(prefix='single', suffix='.sql')
+    for path in sql_to_run:
+        print >> single, open(path, 'r').read()
+        print >> single, ""
+    single.flush()
+
+    print >> outf, dedent("""\
+        execute script (
+            set id = @lpmain_set, event node = @master_node,
+            filename='%s'
+            );
+        """ % single.name)
+
     # Close transaction block and abort on error.
     print >> outf, dedent("""\
         }
@@ -246,9 +259,11 @@
         """)
 
     # Execute the script with slonik.
+    log.info("slonik(1) schema upgrade script generated. Invoking.")
     if not replication.helpers.execute_slonik(outf.getvalue()):
         log.fatal("Aborting.")
         raise SystemExit(4)
+    log.info("slonik(1) schema upgrade script completed.")
 
     # Cleanup our temporary files - they applied successfully.
     for temporary_file in temporary_files:
@@ -256,6 +271,7 @@
     del temporary_files
 
     # Wait for replication to sync.
+    log.info("Waiting for patches to apply to slaves and cluster to sync.")
     replication.helpers.sync(timeout=0)
 
     # The db patches have now been applied to all nodes, and we are now
@@ -350,25 +366,33 @@
                 subscribe set (
                     id=@holding_set, provider=@master_node,
                     receiver=@node%d_node, forward=yes);
+                wait for event (
+                    origin=@master_node, confirmed=all,
+                    wait on=@master_node, timeout=0);
                 echo 'Waiting for sync';
                 sync (id=@master_node);
                 wait for event (
                     origin=@master_node, confirmed=ALL,
-                    wait on=@master_node, timeout=0
-                    );
+                    wait on=@master_node, timeout=0);
                 """ % (slave_node.node_id, slave_node.node_id))
 
         print >> outf, dedent("""\
             echo 'Merging holding set to lpmain';
             merge set (
-                id=@lpmain_set, add id=@holding_set, origin=@master_node
-                );
+                id=@lpmain_set, add id=@holding_set, origin=@master_node);
             """)
 
         # Execute the script and sync.
+        log.info(
+            "Generated slonik(1) script to replicate new objects. Invoking.")
         if not replication.helpers.execute_slonik(outf.getvalue()):
             log.fatal("Aborting.")
+        log.info(
+            "slonik(1) script to replicate new objects completed.")
+        log.info("Waiting for sync.")
         replication.helpers.sync(timeout=0)
+    else:
+        log.info("No new tables or sequences to replicate.")
 
     # We also scan for tables and sequences we want to drop and do so using
     # a final slonik script. Instead of dropping tables in the DB patch,
@@ -411,8 +435,10 @@
                 exit 1;
                 }
             """ % sql.name)
+        log.info("Generated slonik(1) script to drop tables. Invoking.")
         if not replication.helpers.execute_slonik(sk.getvalue()):
             log.fatal("Aborting.")
+        log.info("slonik(1) script to drop tables completed.")
         sql.close()
 
     # Now drop sequences. We don't do this at the same time as the tables,
@@ -455,8 +481,11 @@
                 exit 1;
                 }
             """ % sql.name)
+        log.info("Generated slonik(1) script to drop sequences. Invoking.")
         if not replication.helpers.execute_slonik(sk.getvalue()):
             log.fatal("Aborting.")
+        log.info("slonik(1) script to drop sequences completed.")
+    log.info("Waiting for final sync.")
     replication.helpers.sync(timeout=0)
 
 

=== modified file 'lib/lp/registry/browser/distribution.py'
--- lib/lp/registry/browser/distribution.py	2011-06-16 13:50:58 +0000
+++ lib/lp/registry/browser/distribution.py	2011-09-06 07:51:36 +0000
@@ -827,6 +827,7 @@
         'description',
         'bug_reporting_guidelines',
         'bug_reported_acknowledgement',
+        'package_derivatives_email',
         'icon',
         'logo',
         'mugshot',

=== modified file 'lib/lp/registry/browser/tests/distribution-views.txt'
--- lib/lp/registry/browser/tests/distribution-views.txt	2011-04-08 13:04:24 +0000
+++ lib/lp/registry/browser/tests/distribution-views.txt	2011-09-06 07:51:36 +0000
@@ -127,7 +127,8 @@
 
     >>> view.field_names
     ['displayname', 'title', 'summary', 'description',
-     'bug_reporting_guidelines', 'bug_reported_acknowledgement', 'icon',
+     'bug_reporting_guidelines', 'bug_reported_acknowledgement',
+     'package_derivatives_email', 'icon',
      'logo', 'mugshot', 'official_malone', 'enable_bug_expiration',
      'blueprints_usage', 'official_rosetta', 'answers_usage',
      'translation_focus']

=== modified file 'lib/lp/registry/browser/tests/test_distribution_views.py'
--- lib/lp/registry/browser/tests/test_distribution_views.py	2011-03-16 17:14:26 +0000
+++ lib/lp/registry/browser/tests/test_distribution_views.py	2011-09-06 07:51:36 +0000
@@ -14,6 +14,7 @@
 from lp.registry.interfaces.distribution import IDistributionSet
 from lp.testing import (
     login_celebrity,
+    person_logged_in,
     TestCaseWithFactory,
     )
 from lp.testing.sampledata import LAUNCHPAD_ADMIN
@@ -126,6 +127,26 @@
         self.assertEqual(distribution.registrant, admin)
 
 
+class TestDistroEditView(TestCaseWithFactory):
+    """Test the +edit page for a distro."""
+
+    layer = DatabaseFunctionalLayer
+
+    def test_package_derivatives_email(self):
+        # Test that the edit form allows changing package_derivatives_email
+        distro = self.factory.makeDistribution()
+        email = '{package_name}_thing@xxxxxxx'
+        form = {
+            'field.package_derivatives_email': email,
+            'field.actions.change': 'Change',
+            }
+        with person_logged_in(distro.owner):
+            create_initialized_view(
+                distro, '+edit', principal=distro.owner, method="POST",
+                form=form)
+        self.assertEqual(distro.package_derivatives_email, email)
+
+
 class TestDistroReassignView(TestCaseWithFactory):
     """Test the +reassign page for a new distribution."""
 

=== modified file 'lib/lp/registry/configure.zcml'
--- lib/lp/registry/configure.zcml	2011-08-25 04:27:35 +0000
+++ lib/lp/registry/configure.zcml	2011-09-06 07:51:36 +0000
@@ -1541,6 +1541,7 @@
                 official_blueprints
                 official_malone
                 owner
+                package_derivatives_email
                 security_contact
                 summary
                 title"/>

=== modified file 'lib/lp/registry/interfaces/distribution.py'
--- lib/lp/registry/interfaces/distribution.py	2011-08-25 08:19:28 +0000
+++ lib/lp/registry/interfaces/distribution.py	2011-09-06 07:51:36 +0000
@@ -282,6 +282,13 @@
     uploaders = Attribute(_(
         "ArchivePermission records for uploaders with rights to upload to "
         "this distribution."))
+    package_derivatives_email = TextLine(
+        title=_("Package Derivatives Email Address"),
+        description=_(
+            "The email address to send information about updates to packages "
+            "that are derived from another distribution. The sequence "
+            "{package_name} is replaced with the actual package name."),
+        required=False)
 
     # properties
     currentseries = exported(

=== modified file 'lib/lp/registry/model/distribution.py'
--- lib/lp/registry/model/distribution.py	2011-08-25 08:29:37 +0000
+++ lib/lp/registry/model/distribution.py	2011-09-06 07:51:36 +0000
@@ -261,6 +261,7 @@
         schema=TranslationPermission, default=TranslationPermission.OPEN)
     active = True
     max_bug_heat = Int()
+    package_derivatives_email = StringCol(notNull=False, default=None)
 
     def __repr__(self):
         displayname = self.displayname.encode('ASCII', 'backslashreplace')

=== modified file 'lib/lp/registry/tests/test_distribution.py'
--- lib/lp/registry/tests/test_distribution.py	2011-08-24 04:07:31 +0000
+++ lib/lp/registry/tests/test_distribution.py	2011-09-06 07:51:36 +0000
@@ -7,12 +7,14 @@
 
 from lazr.lifecycle.snapshot import Snapshot
 import soupmatchers
+from storm.store import Store
 from testtools import ExpectedException
 from testtools.matchers import (
     MatchesAny,
     Not,
     )
 from zope.component import getUtility
+from zope.security.interfaces import Unauthorized
 from zope.security.proxy import removeSecurityProxy
 
 from canonical.database.constants import UTC_NOW
@@ -40,6 +42,7 @@
     )
 from lp.testing import (
     login_person,
+    person_logged_in,
     TestCaseWithFactory,
     )
 from lp.testing.matchers import Provides
@@ -191,6 +194,23 @@
             sourcepackage.distribution.guessPublishedSourcePackageName(
                 'my-package').name)
 
+    def test_derivatives_email(self):
+        # Make sure the package_derivatives_email column stores data
+        # correctly.
+        email = "thingy@xxxxxxx"
+        distro = self.factory.makeDistribution()
+        with person_logged_in(distro.owner):
+            distro.package_derivatives_email = email
+        Store.of(distro).flush()
+        self.assertEqual(email, distro.package_derivatives_email)
+
+    def test_derivatives_email_permissions(self):
+        # package_derivatives_email requires lp.edit to set/change.
+        distro = self.factory.makeDistribution()
+        self.assertRaises(
+            Unauthorized,
+            setattr, distro, "package_derivatives_email", "foo")
+
 
 class TestDistributionCurrentSourceReleases(
     TestDistroSeriesCurrentSourceReleases):

=== modified file 'lib/lp/soyuz/adapters/notification.py'
--- lib/lp/soyuz/adapters/notification.py	2011-08-29 11:57:19 +0000
+++ lib/lp/soyuz/adapters/notification.py	2011-09-06 07:51:36 +0000
@@ -255,7 +255,9 @@
         elif bprs:
             name = bprs[0].build.source_package_release.name
         if name:
-            bcc_addr = '%s_derivatives@xxxxxxxxxxxxxxxxxxxxxx' % name
+            email_base = distroseries.distribution.package_derivatives_email
+            if email_base:
+                bcc_addr = email_base.format(package_name=name)
 
         build_and_send_mail(
             'announcement', [str(distroseries.changeslist)], from_addr,

=== modified file 'lib/lp/soyuz/adapters/tests/test_notification.py'
--- lib/lp/soyuz/adapters/tests/test_notification.py	2011-08-30 12:11:41 +0000
+++ lib/lp/soyuz/adapters/tests/test_notification.py	2011-09-06 07:51:36 +0000
@@ -39,7 +39,10 @@
 from lp.soyuz.model.distroseriessourcepackagerelease import (
     DistroSeriesSourcePackageRelease,
     )
-from lp.testing import TestCaseWithFactory
+from lp.testing import (
+    person_logged_in,
+    TestCaseWithFactory,
+    )
 from lp.testing.mail_helpers import pop_notifications
 
 
@@ -84,6 +87,23 @@
             'accepted')
         self.assertEqual(expected_subject, subject)
 
+    def _setup_notification(self, from_person=None, distroseries=None,
+                            spr=None):
+        if spr is None:
+            spr = self.factory.makeSourcePackageRelease()
+        self.factory.makeSourcePackageReleaseFile(sourcepackagerelease=spr)
+        archive = self.factory.makeArchive(purpose=ArchivePurpose.PRIMARY)
+        pocket = PackagePublishingPocket.RELEASE
+        if distroseries is None:
+            distroseries = self.factory.makeDistroSeries()
+        distroseries.changeslist = "blah@xxxxxxxxxxx"
+        blamer = self.factory.makePerson()
+        if from_person is None:
+            from_person = self.factory.makePerson()
+        notify(
+            blamer, spr, [], [], archive, distroseries, pocket,
+            action='accepted', announce_from_person=from_person)
+
     def test_notify_from_person_override(self):
         # notify() takes an optional from_person to override the calculated
         # From: address in announcement emails.
@@ -133,6 +153,22 @@
             "=?utf-8?q?Lo=C3=AFc_Mot=C3=B6rhead?= <loic@xxxxxxxxxxx>",
             notifications[1]["From"])
 
+    def test_notify_bcc_to_derivatives_list(self):
+        # notify() will BCC the announcement email to the address defined in
+        # Distribution.package_derivatives_email if it's defined.
+        email = "{package_name}_thing@xxxxxxx"
+        distroseries = self.factory.makeDistroSeries()
+        with person_logged_in(distroseries.distribution.owner):
+            distroseries.distribution.package_derivatives_email = email
+        spr = self.factory.makeSourcePackageRelease()
+        self._setup_notification(distroseries=distroseries, spr=spr)
+
+        notifications = pop_notifications()
+        self.assertEqual(2, len(notifications))
+        bcc_address = notifications[1]["Bcc"]
+        expected_email = email.format(package_name=spr.sourcepackagename.name)
+        self.assertIn(expected_email, bcc_address)
+
     def test_fetch_information_spr_multiple_changelogs(self):
         # If previous_version is passed the "changelog" entry in the
         # returned dict should contain the changelogs for all SPRs *since*

=== modified file 'lib/lp/testing/pgsql.py'
--- lib/lp/testing/pgsql.py	2011-09-05 15:42:27 +0000
+++ lib/lp/testing/pgsql.py	2011-09-06 07:51:36 +0000
@@ -402,7 +402,11 @@
                 # always having this is a problem.
                 try:
                     cur = con.cursor()
-                    cur.execute('SELECT _killall_backends(%s)', [self.dbname])
+                    cur.execute("""
+                        SELECT pg_terminate_backend(procpid)
+                        FROM pg_stat_activity
+                        WHERE procpid <> pg_backend_pid() AND datname=%s
+                        """, [self.dbname])
                 except psycopg2.DatabaseError:
                     pass
 

=== modified file 'lib/lp/translations/configure.zcml'
--- lib/lp/translations/configure.zcml	2011-08-25 19:48:19 +0000
+++ lib/lp/translations/configure.zcml	2011-09-06 07:51:36 +0000
@@ -154,6 +154,10 @@
         for="lp.registry.interfaces.packaging.IPackaging
              lazr.lifecycle.interfaces.IObjectEvent"
         handler=".model.translationsharingjob.schedule_packaging_job" />
+    <subscriber
+        for="lp.translations.interfaces.potemplate.IPOTemplate
+             lazr.lifecycle.interfaces.IObjectModifiedEvent"
+        handler=".model.translationsharingjob.schedule_potemplate_job" />
     <facet
         facet="translations">
 
@@ -643,6 +647,10 @@
         class="lp.translations.model.translationpackagingjob.TranslationSplitJob">
         <allow interface='lp.services.job.interfaces.job.IRunnableJob'/>
     </class>
+    <class
+        class="lp.translations.model.translationpackagingjob.TranslationTemplateChangeJob">
+        <allow interface='lp.services.job.interfaces.job.IRunnableJob'/>
+    </class>
     <utility
         component="lp.translations.model.translationtemplatesbuildjob.TranslationTemplatesBuildJob"
         provides="lp.buildmaster.interfaces.buildfarmjob.IBuildFarmJob"

=== modified file 'lib/lp/translations/interfaces/translationsharingjob.py'
--- lib/lp/translations/interfaces/translationsharingjob.py	2011-07-30 14:05:25 +0000
+++ lib/lp/translations/interfaces/translationsharingjob.py	2011-09-06 07:51:36 +0000
@@ -19,3 +19,6 @@
 
     sourcepackagename = Attribute(
         _("The sourcepackagename of the Packaging."))
+
+    potemplate = Attribute(
+        _("The POTemplate to pass around as the relevant template."))

=== modified file 'lib/lp/translations/model/translationpackagingjob.py'
--- lib/lp/translations/model/translationpackagingjob.py	2011-07-30 14:18:38 +0000
+++ lib/lp/translations/model/translationpackagingjob.py	2011-09-06 07:51:36 +0000
@@ -10,6 +10,7 @@
 __all__ = [
     'TranslationMergeJob',
     'TranslationSplitJob',
+    'TranslationTemplateChangeJob',
     ]
 
 import logging
@@ -17,6 +18,7 @@
 from lazr.lifecycle.interfaces import (
     IObjectCreatedEvent,
     IObjectDeletedEvent,
+    IObjectModifiedEvent,
     )
 import transaction
 from zope.interface import (
@@ -40,7 +42,10 @@
     TransactionManager,
     TranslationMerger,
     )
-from lp.translations.utilities.translationsplitter import TranslationSplitter
+from lp.translations.utilities.translationsplitter import (
+    TranslationSplitter,
+    TranslationTemplateSplitter,
+    )
 
 
 class TranslationPackagingJob(TranslationSharingJobDerived, BaseRunnableJob):
@@ -117,3 +122,31 @@
             'Splitting %s and %s', self.productseries.displayname,
             self.sourcepackage.displayname)
         TranslationSplitter(self.productseries, self.sourcepackage).split()
+
+
+class TranslationTemplateChangeJob(TranslationPackagingJob):
+    """Job for merging/splitting translations when template is changed."""
+
+    implements(IRunnableJob)
+
+    class_job_type = TranslationSharingJobType.TEMPLATE_CHANGE
+
+    create_on_event = IObjectModifiedEvent
+
+    @classmethod
+    def forPOTemplate(cls, potemplate):
+        """Create a TranslationTemplateChangeJob for a POTemplate.
+
+        :param potemplate: The `POTemplate` to create the job for.
+        :return: A `TranslationTemplateChangeJob`.
+        """
+        return cls.create(potemplate=potemplate)
+
+    def run(self):
+        """See `IRunnableJob`."""
+        logger = logging.getLogger()
+        logger.info("Sanitizing translations for '%s'" % (
+                self.potemplate.displayname))
+        TranslationTemplateSplitter(self.potemplate).split()
+        tm = TransactionManager(transaction.manager, False)
+        TranslationMerger.mergeModifiedTemplates(self.potemplate, tm)

=== modified file 'lib/lp/translations/model/translationsharingjob.py'
--- lib/lp/translations/model/translationsharingjob.py	2011-07-30 14:30:41 +0000
+++ lib/lp/translations/model/translationsharingjob.py	2011-09-06 07:51:36 +0000
@@ -37,6 +37,7 @@
 from lp.translations.interfaces.translationsharingjob import (
     ITranslationSharingJob,
     )
+from lp.translations.model.potemplate import POTemplate
 
 
 class TranslationSharingJobType(DBEnumeratedType):
@@ -54,6 +55,12 @@
         Split translations between productseries and sourcepackage.
         """)
 
+    TEMPLATE_CHANGE = DBItem(2, """
+        Split/merge translations for a single translation template.
+
+        Split/merge translations for a single translation template.
+        """)
+
 
 class TranslationSharingJob(StormBase):
     """Base class for jobs related to a packaging."""
@@ -82,8 +89,12 @@
 
     sourcepackagename = Reference(sourcepackagename_id, SourcePackageName.id)
 
+    potemplate_id = Int('potemplate')
+
+    potemplate = Reference(potemplate_id, POTemplate.id)
+
     def __init__(self, job, job_type, productseries, distroseries,
-                 sourcepackagename):
+                 sourcepackagename, potemplate=None):
         """"Constructor.
 
         :param job: The `Job` to use for storing basic job state.
@@ -96,6 +107,7 @@
         self.distroseries = distroseries
         self.sourcepackagename = sourcepackagename
         self.productseries = productseries
+        self.potemplate = potemplate
 
 
 class RegisteredSubclass(type):
@@ -143,16 +155,18 @@
         self.job = job
 
     @classmethod
-    def create(cls, productseries, distroseries, sourcepackagename):
+    def create(cls, productseries=None, distroseries=None,
+               sourcepackagename=None, potemplate=None):
         """"Create a TranslationPackagingJob backed by TranslationSharingJob.
 
         :param productseries: The ProductSeries side of the Packaging.
         :param distroseries: The distroseries of the Packaging sourcepackage.
         :param sourcepackagename: The name of the Packaging sourcepackage.
+        :param potemplate: POTemplate to restrict to (if any).
         """
         context = TranslationSharingJob(
             Job(), cls.class_job_type, productseries,
-            distroseries, sourcepackagename)
+            distroseries, sourcepackagename, potemplate)
         return cls(context)
 
     @classmethod
@@ -170,6 +184,27 @@
                 job_class.forPackaging(packaging)
 
     @classmethod
+    def schedulePOTemplateJob(cls, potemplate, event):
+        """Event subscriber to create a TranslationSharingJob on events.
+
+        :param potemplate: The `POTemplate` to create
+            a `TranslationSharingJob` for.
+        :param event: The event itself.
+        """
+        if ('name' not in event.edited_fields and
+            'productseries' not in event.edited_fields and
+            'distroseries' not in event.edited_fields and
+            'sourcepackagename' not in event.edited_fields):
+            # Ignore changes to POTemplates that are neither renames,
+            # nor moves to a different package/project.
+            return
+        for event_type, job_classes in cls._event_types.iteritems():
+            if not event_type.providedBy(event):
+                continue
+            for job_class in job_classes:
+                job_class.forPOTemplate(potemplate)
+
+    @classmethod
     def iterReady(cls, extra_clauses):
         """See `IJobSource`.
 
@@ -207,3 +242,4 @@
 
 #make accessible to zcml
 schedule_packaging_job = TranslationSharingJobDerived.schedulePackagingJob
+schedule_potemplate_job = TranslationSharingJobDerived.schedulePOTemplateJob

=== modified file 'lib/lp/translations/scripts/tests/test_packaging_translations.py'
--- lib/lp/translations/scripts/tests/test_packaging_translations.py	2011-08-12 13:55:02 +0000
+++ lib/lp/translations/scripts/tests/test_packaging_translations.py	2011-09-06 07:51:36 +0000
@@ -39,6 +39,7 @@
             INFO    Deleted POTMsgSets: 1.  TranslationMessages: 1.
             INFO    Running TranslationSplitJob \(ID .*\) in status Waiting
             INFO    Splitting .* and .* in Ubuntu Distroseries.*
+            INFO    1 entries split.
             INFO    Ran 1 TranslationMergeJob jobs.
             INFO    Ran 1 TranslationSplitJob jobs.
             """))

=== modified file 'lib/lp/translations/tests/test_translationpackagingjob.py'
--- lib/lp/translations/tests/test_translationpackagingjob.py	2011-08-01 14:32:07 +0000
+++ lib/lp/translations/tests/test_translationpackagingjob.py	2011-09-06 07:51:36 +0000
@@ -8,12 +8,17 @@
 
 import transaction
 from zope.component import getUtility
+from zope.event import notify
+
+from lazr.lifecycle.event import ObjectModifiedEvent
+from lazr.lifecycle.snapshot import Snapshot
 
 from canonical.launchpad.webapp.testing import verifyObject
 from canonical.testing.layers import (
     LaunchpadZopelessLayer,
     )
 from lp.registry.interfaces.packaging import IPackagingUtil
+from lp.translations.interfaces.potemplate import IPOTemplate
 from lp.translations.model.translationsharingjob import (
     TranslationSharingJob,
     TranslationSharingJobDerived,
@@ -36,6 +41,7 @@
     TranslationMergeJob,
     TranslationPackagingJob,
     TranslationSplitJob,
+    TranslationTemplateChangeJob,
     )
 from lp.translations.tests.test_translationsplitter import (
     make_shared_potmsgset,
@@ -101,20 +107,32 @@
 
 class JobFinder:
 
-    def __init__(self, productseries, sourcepackage, job_class):
-        self.productseries = productseries
-        self.sourcepackagename = sourcepackage.sourcepackagename
-        self.distroseries = sourcepackage.distroseries
+    def __init__(self, productseries, sourcepackage, job_class,
+                 potemplate=None):
+        if potemplate is None:
+            self.productseries = productseries
+            self.sourcepackagename = sourcepackage.sourcepackagename
+            self.distroseries = sourcepackage.distroseries
+            self.potemplate = None
+        else:
+            self.potemplate = potemplate
         self.job_type = job_class.class_job_type
 
     def find(self):
-        return list(TranslationSharingJobDerived.iterReady([
-            TranslationSharingJob.productseries_id == self.productseries.id,
-            (TranslationSharingJob.sourcepackagename_id ==
-             self.sourcepackagename.id),
-            TranslationSharingJob.distroseries_id == self.distroseries.id,
-            TranslationSharingJob.job_type == self.job_type,
-            ]))
+        if self.potemplate is None:
+            return list(TranslationSharingJobDerived.iterReady([
+              TranslationSharingJob.productseries_id == self.productseries.id,
+              (TranslationSharingJob.sourcepackagename_id ==
+               self.sourcepackagename.id),
+              TranslationSharingJob.distroseries_id == self.distroseries.id,
+              TranslationSharingJob.job_type == self.job_type,
+              ]))
+        else:
+            return list(
+                TranslationSharingJobDerived.iterReady([
+                    TranslationSharingJob.potemplate_id == self.potemplate.id,
+                    TranslationSharingJob.job_type == self.job_type,
+                    ]))
 
 
 class TestTranslationPackagingJob(TestCaseWithFactory):
@@ -273,3 +291,62 @@
                 packaging.distroseries)
         (job,) = finder.find()
         self.assertIsInstance(job, TranslationSplitJob)
+
+
+class TestTranslationTemplateChangeJob(TestCaseWithFactory):
+
+    layer = LaunchpadZopelessLayer
+
+    def test_modifyPOTemplate_makes_job(self):
+        """Creating a Packaging should make a TranslationMergeJob."""
+        potemplate = self.factory.makePOTemplate()
+        finder = JobFinder(
+            None, None, TranslationTemplateChangeJob, potemplate)
+        self.assertEqual([], finder.find())
+        with person_logged_in(potemplate.owner):
+            snapshot = Snapshot(potemplate, providing=IPOTemplate)
+            potemplate.name = self.factory.getUniqueString()
+            notify(ObjectModifiedEvent(potemplate, snapshot, ["name"]))
+
+        (job,) = finder.find()
+        self.assertIsInstance(job, TranslationTemplateChangeJob)
+
+    def test_splits_and_merges(self):
+        """Changing a template makes the translations split and then
+        re-merged in the new target sharing set."""
+        potemplate = self.factory.makePOTemplate(name='template')
+        other_ps = self.factory.makeProductSeries(
+            product=potemplate.productseries.product)
+        old_shared = self.factory.makePOTemplate(name='template',
+                                                 productseries=other_ps)
+        new_shared = self.factory.makePOTemplate(name='renamed',
+                                                 productseries=other_ps)
+
+        # Set up shared POTMsgSets and translations.
+        potmsgset = self.factory.makePOTMsgSet(potemplate, sequence=1)
+        potmsgset.setSequence(old_shared, 1)
+        self.factory.makeCurrentTranslationMessage(potmsgset=potmsgset)
+
+        # This is the identical English message in the new_shared template.
+        target_potmsgset = self.factory.makePOTMsgSet(
+            new_shared, sequence=1, singular=potmsgset.singular_text)
+
+        # Rename the template and confirm that messages are now shared
+        # with new_shared instead of old_shared.
+        potemplate.name = 'renamed'
+        job = TranslationTemplateChangeJob.create(potemplate=potemplate)
+
+        self.becomeDbUser('rosettaadmin')
+        job.run()
+
+        # New POTMsgSet is now different from the old one (it's been split),
+        # but matches the target potmsgset (it's been merged into it).
+        new_potmsgset = potemplate.getPOTMsgSets()[0]
+        self.assertNotEqual(potmsgset, new_potmsgset)
+        self.assertEqual(target_potmsgset, new_potmsgset)
+
+        # Translations have been merged as well.
+        self.assertContentEqual(
+            [tm.translations for tm in potmsgset.getAllTranslationMessages()],
+            [tm.translations
+             for tm in new_potmsgset.getAllTranslationMessages()])

=== modified file 'lib/lp/translations/tests/test_translationsplitter.py'
--- lib/lp/translations/tests/test_translationsplitter.py	2011-02-25 20:23:40 +0000
+++ lib/lp/translations/tests/test_translationsplitter.py	2011-09-06 07:51:36 +0000
@@ -13,6 +13,7 @@
     )
 from lp.translations.utilities.translationsplitter import (
     TranslationSplitter,
+    TranslationTemplateSplitter,
     )
 
 
@@ -77,8 +78,6 @@
         splitter = make_translation_splitter(self.factory)
         upstream_item, ubuntu_item = make_shared_potmsgset(
             self.factory, splitter)
-        ubuntu_template = ubuntu_item.potemplate
-        ubuntu_sequence = ubuntu_item.sequence
         new_potmsgset = splitter.splitPOTMsgSet(ubuntu_item)
         self.assertEqual(new_potmsgset, ubuntu_item.potmsgset)
 
@@ -91,8 +90,7 @@
             potmsgset=upstream_item.potmsgset,
             potemplate=upstream_item.potemplate, diverged=True)
         splitter.splitPOTMsgSet(ubuntu_item)
-        upstream_translation = splitter.migrateTranslations(
-            upstream_item.potmsgset, ubuntu_item)
+        splitter.migrateTranslations(upstream_item.potmsgset, ubuntu_item)
         self.assertEqual(
             upstream_message,
             upstream_item.potmsgset.getAllTranslationMessages().one())
@@ -108,8 +106,7 @@
             potmsgset=ubuntu_item.potmsgset,
             potemplate=ubuntu_item.potemplate, diverged=True)
         splitter.splitPOTMsgSet(ubuntu_item)
-        upstream_translation = splitter.migrateTranslations(
-            upstream_item.potmsgset, ubuntu_item)
+        splitter.migrateTranslations(upstream_item.potmsgset, ubuntu_item)
         self.assertEqual(
             ubuntu_message,
             ubuntu_item.potmsgset.getAllTranslationMessages().one())
@@ -139,7 +136,7 @@
         splitter = make_translation_splitter(self.factory)
         upstream_item, ubuntu_item = make_shared_potmsgset(
             self.factory, splitter)
-        upstream_message = self.factory.makeCurrentTranslationMessage(
+        self.factory.makeCurrentTranslationMessage(
             potmsgset=upstream_item.potmsgset,
             potemplate=upstream_item.potemplate)
         splitter.split()
@@ -153,3 +150,183 @@
             upstream_item.potmsgset.getAllTranslationMessages().count(),
             ubuntu_item.potmsgset.getAllTranslationMessages().count(),
         )
+
+
+class TestTranslationTemplateSplitterBase:
+
+    layer = ZopelessDatabaseLayer
+
+    def getPOTMsgSetAndTemplateToSplit(self, splitter):
+        return [(tti1.potmsgset, tti1.potemplate)
+                for tti1, tti2 in splitter.findShared()]
+
+    def setUpSharingTemplates(self, other_side=False):
+        """Sets up two sharing templates with one sharing message and
+        one non-sharing message in each template."""
+        template1 = self.makePOTemplate()
+        template2 = self.makeSharingTemplate(template1, other_side)
+
+        shared_potmsgset = self.factory.makePOTMsgSet(template1, sequence=1)
+        shared_potmsgset.setSequence(template2, 1)
+
+        # POTMsgSets appearing in only one of the templates are not returned.
+        self.factory.makePOTMsgSet(template1, sequence=2)
+        self.factory.makePOTMsgSet(template2, sequence=2)
+        return template1, template2, shared_potmsgset
+
+    def makePOTemplate(self):
+        raise NotImplementedError('Subclasses should implement this.')
+
+    def makeSharingTemplate(self, template, other_side=False):
+        raise NotImplementedError('Subclasses should implement this.')
+
+    def test_findShared_renamed(self):
+        """Shared POTMsgSets are included for a renamed template."""
+        template1, template2, shared_potmsgset = self.setUpSharingTemplates()
+
+        splitter = TranslationTemplateSplitter(template2)
+        self.assertContentEqual([], splitter.findShared())
+
+        template2.name = 'renamed'
+        self.assertContentEqual(
+            [(shared_potmsgset, template1)],
+            self.getPOTMsgSetAndTemplateToSplit(splitter))
+
+    def test_findShared_moved_product(self):
+        """Moving a template to a different product splits its messages."""
+        template1, template2, shared_potmsgset = self.setUpSharingTemplates()
+
+        splitter = TranslationTemplateSplitter(template2)
+        self.assertContentEqual([], splitter.findShared())
+
+        # Move the template to a different product entirely.
+        template2.productseries = self.factory.makeProduct().development_focus
+        template2.distroseries = None
+        template2.sourcepackagename = None
+        self.assertContentEqual(
+            [(shared_potmsgset, template1)],
+            self.getPOTMsgSetAndTemplateToSplit(splitter))
+
+    def test_findShared_moved_distribution(self):
+        """Moving a template to a different distribution gets it split."""
+        template1, template2, shared_potmsgset = self.setUpSharingTemplates()
+
+        splitter = TranslationTemplateSplitter(template2)
+        self.assertContentEqual([], splitter.findShared())
+
+        # Move the template to a different distribution entirely.
+        sourcepackage = self.factory.makeSourcePackage()
+        template2.distroseries = sourcepackage.distroseries
+        template2.sourcepackagename = sourcepackage.sourcepackagename
+        template2.productseries = None
+        self.assertContentEqual(
+            [(shared_potmsgset, template1)],
+            self.getPOTMsgSetAndTemplateToSplit(splitter))
+
+    def test_findShared_moved_to_nonsharing_target(self):
+        """Moving a template to a target not sharing with the existing
+        upstreams and source package gets it split."""
+        template1, template2, shared_potmsgset = self.setUpSharingTemplates(
+            other_side=True)
+
+        splitter = TranslationTemplateSplitter(template2)
+        self.assertContentEqual([], splitter.findShared())
+
+        # Move the template to a different distribution entirely.
+        sourcepackage = self.factory.makeSourcePackage()
+        template2.distroseries = sourcepackage.distroseries
+        template2.sourcepackagename = sourcepackage.sourcepackagename
+        template2.productseries = None
+        self.assertContentEqual(
+            [(shared_potmsgset, template1)],
+            self.getPOTMsgSetAndTemplateToSplit(splitter))
+
+    def test_split_messages(self):
+        """Splitting messages works properly."""
+        template1, template2, shared_potmsgset = self.setUpSharingTemplates()
+
+        splitter = TranslationTemplateSplitter(template2)
+        self.assertContentEqual([], splitter.findShared())
+
+        # Move the template to a different product entirely.
+        template2.productseries = self.factory.makeProduct().development_focus
+        template2.distroseries = None
+        template2.sourcepackagename = None
+
+        other_item, this_item = splitter.findShared()[0]
+
+        splitter.split()
+
+        self.assertNotEqual(other_item.potmsgset, this_item.potmsgset)
+        self.assertEqual(shared_potmsgset, other_item.potmsgset)
+        self.assertNotEqual(shared_potmsgset, this_item.potmsgset)
+
+
+class TestProductTranslationTemplateSplitter(
+    TestCaseWithFactory, TestTranslationTemplateSplitterBase):
+    """Templates in a product get split appropriately."""
+
+    def makePOTemplate(self):
+        return self.factory.makePOTemplate(
+            name='template',
+            side=TranslationSide.UPSTREAM)
+
+    def makeSharingTemplate(self, template, other_side=False):
+        if other_side:
+            template2 = self.factory.makePOTemplate(
+                name='template',
+                side=TranslationSide.UBUNTU)
+            self.factory.makePackagingLink(
+                productseries=template.productseries,
+                distroseries=template2.distroseries,
+                sourcepackagename=template2.sourcepackagename)
+            return template2
+        else:
+            product = template.productseries.product
+            other_series = self.factory.makeProductSeries(product=product)
+            return self.factory.makePOTemplate(name='template',
+                                               productseries=other_series)
+
+
+class TestDistributionTranslationTemplateSplitter(
+    TestCaseWithFactory, TestTranslationTemplateSplitterBase):
+    """Templates in a distribution get split appropriately."""
+
+    def makePOTemplate(self):
+        return self.factory.makePOTemplate(
+            name='template',
+            side=TranslationSide.UBUNTU)
+
+    def makeSharingTemplate(self, template, other_side=False):
+        if other_side:
+            template2 = self.factory.makePOTemplate(
+                name='template',
+                side=TranslationSide.UPSTREAM)
+            self.factory.makePackagingLink(
+                productseries=template2.productseries,
+                distroseries=template.distroseries,
+                sourcepackagename=template.sourcepackagename)
+            return template2
+        else:
+            distro = template.distroseries.distribution
+            other_series = self.factory.makeDistroSeries(distribution=distro)
+            return self.factory.makePOTemplate(
+                name='template',
+                distroseries=other_series,
+                sourcepackagename=template.sourcepackagename)
+
+    def test_findShared_moved_sourcepackage(self):
+        """Moving a template to a different source package gets it split."""
+        template1, template2, shared_potmsgset = self.setUpSharingTemplates()
+
+        splitter = TranslationTemplateSplitter(template2)
+        self.assertContentEqual([], splitter.findShared())
+
+        # Move the template to a different source package inside the
+        # same distroseries.
+        sourcepackage = self.factory.makeSourcePackage(
+            distroseries=template2.distroseries)
+        template2.sourcepackagename = sourcepackage.sourcepackagename
+        self.assertContentEqual(
+            [(shared_potmsgset, template1)],
+            self.getPOTMsgSetAndTemplateToSplit(splitter))

=== modified file 'lib/lp/translations/translationmerger.py'
--- lib/lp/translations/translationmerger.py	2011-05-27 21:12:25 +0000
+++ lib/lp/translations/translationmerger.py	2011-09-06 07:51:36 +0000
@@ -387,6 +387,26 @@
             merger = cls(templates, tm)
             merger.mergePOTMsgSets()
 
+    @classmethod
+    def mergeModifiedTemplates(cls, potemplate, tm):
+        subset = getUtility(IPOTemplateSet).getSharingSubset(
+            distribution=potemplate.distribution,
+            sourcepackagename=potemplate.sourcepackagename,
+            product=potemplate.product)
+        templates = list(subset.getSharingPOTemplates(potemplate.name))
+        templates.sort(key=methodcaller('sharingKey'), reverse=True)
+        merger = cls(templates, tm)
+        merger.mergeAll()
+
+    def mergeAll(self):
+        """Properly merge POTMsgSets and TranslationMessages."""
+        self._removeDuplicateMessages()
+        self.tm.endTransaction(intermediate=True)
+        self.mergePOTMsgSets()
+        self.tm.endTransaction(intermediate=True)
+        self.mergeTranslationMessages()
+        self.tm.endTransaction()
+
     def __init__(self, potemplates, tm):
         """Constructor.
 
@@ -548,7 +568,6 @@
             deletions = 0
             order_check.check(template)
             potmsgset_ids = self._getPOTMsgSetIds(template)
-            total_ids = len(potmsgset_ids)
             for potmsgset_id in potmsgset_ids:
                 potmsgset = POTMsgSet.get(potmsgset_id)
 

=== modified file 'lib/lp/translations/utilities/translationsplitter.py'
--- lib/lp/translations/utilities/translationsplitter.py	2011-05-12 20:21:58 +0000
+++ lib/lp/translations/utilities/translationsplitter.py	2011-09-06 07:51:36 +0000
@@ -6,50 +6,30 @@
 
 import logging
 
-from storm.locals import ClassAlias, Store
+from storm.expr import (
+    And,
+    Join,
+    LeftJoin,
+    Not,
+    Or,
+    )
+from storm.locals import (
+    ClassAlias,
+    Store,
+    )
 import transaction
 
+from lp.registry.model.distroseries import DistroSeries
+from lp.registry.model.packaging import Packaging
+from lp.registry.model.productseries import ProductSeries
 from lp.translations.model.potemplate import POTemplate
 from lp.translations.model.translationtemplateitem import (
     TranslationTemplateItem,
     )
 
 
-class TranslationSplitter:
-    """Split translations for a productseries, sourcepackage pair.
-
-    If a productseries and sourcepackage were linked in error, and then
-    unlinked, they may still share some translations.  This class breaks those
-    associations.
-    """
-
-    def __init__(self, productseries, sourcepackage):
-        """Constructor.
-
-        :param productseries: The `ProductSeries` to split from.
-        :param sourcepackage: The `SourcePackage` to split from.
-        """
-        self.productseries = productseries
-        self.sourcepackage = sourcepackage
-
-    def findShared(self):
-        """Provide tuples of upstream, ubuntu for each shared POTMsgSet."""
-        store = Store.of(self.productseries)
-        UpstreamItem = ClassAlias(TranslationTemplateItem, 'UpstreamItem')
-        UpstreamTemplate = ClassAlias(POTemplate, 'UpstreamTemplate')
-        UbuntuItem = ClassAlias(TranslationTemplateItem, 'UbuntuItem')
-        UbuntuTemplate = ClassAlias(POTemplate, 'UbuntuTemplate')
-        return store.find(
-            (UpstreamItem, UbuntuItem),
-            UpstreamItem.potmsgsetID == UbuntuItem.potmsgsetID,
-            UbuntuItem.potemplateID == UbuntuTemplate.id,
-            UbuntuTemplate.sourcepackagenameID ==
-                self.sourcepackage.sourcepackagename.id,
-            UbuntuTemplate.distroseriesID ==
-                self.sourcepackage.distroseries.id,
-            UpstreamItem.potemplateID == UpstreamTemplate.id,
-            UpstreamTemplate.productseriesID == self.productseries.id,
-        )
+class TranslationSplitterBase:
+    """Base class for translation splitting jobs."""
 
     @staticmethod
     def splitPOTMsgSet(ubuntu_item):
@@ -86,9 +66,151 @@
         """Split the translations for the ProductSeries and SourcePackage."""
         logger = logging.getLogger()
         shared = enumerate(self.findShared(), 1)
+        total = 0
         for num, (upstream_item, ubuntu_item) in shared:
             self.splitPOTMsgSet(ubuntu_item)
             self.migrateTranslations(upstream_item.potmsgset, ubuntu_item)
             if num % 100 == 0:
                 logger.info('%d entries split.  Committing...', num)
                 transaction.commit()
+            total = num
+
+        if total % 100 != 0 or total == 0:
+            transaction.commit()
+            logger.info('%d entries split.', total)
+
+
+class TranslationSplitter(TranslationSplitterBase):
+    """Split translations for a productseries, sourcepackage pair.
+
+    If a productseries and sourcepackage were linked in error, and then
+    unlinked, they may still share some translations.  This class breaks those
+    associations.
+    """
+
+    def __init__(self, productseries, sourcepackage):
+        """Constructor.
+
+        :param productseries: The `ProductSeries` to split from.
+        :param sourcepackage: The `SourcePackage` to split from.
+        """
+        self.productseries = productseries
+        self.sourcepackage = sourcepackage
+
+    def findShared(self):
+        """Provide tuples of upstream, ubuntu for each shared POTMsgSet."""
+        store = Store.of(self.productseries)
+        UpstreamItem = ClassAlias(TranslationTemplateItem, 'UpstreamItem')
+        UpstreamTemplate = ClassAlias(POTemplate, 'UpstreamTemplate')
+        UbuntuItem = ClassAlias(TranslationTemplateItem, 'UbuntuItem')
+        UbuntuTemplate = ClassAlias(POTemplate, 'UbuntuTemplate')
+        return store.find(
+            (UpstreamItem, UbuntuItem),
+            UpstreamItem.potmsgsetID == UbuntuItem.potmsgsetID,
+            UbuntuItem.potemplateID == UbuntuTemplate.id,
+            UbuntuTemplate.sourcepackagenameID ==
+                self.sourcepackage.sourcepackagename.id,
+            UbuntuTemplate.distroseriesID ==
+                self.sourcepackage.distroseries.id,
+            UpstreamItem.potemplateID == UpstreamTemplate.id,
+            UpstreamTemplate.productseriesID == self.productseries.id,
+        )
+
+
+class TranslationTemplateSplitter(TranslationSplitterBase):
+    """Split translations for an extracted potemplate.
+
+    When a POTemplate is removed from a set of sharing templates,
+    it keeps sharing POTMsgSets with other templates.  This class
+    removes those associations.
+    """
+
+    def __init__(self, potemplate):
+        """Constructor.
+
+        :param potemplate: The `POTemplate` to sanitize.
+        """
+        self.potemplate = potemplate
+
+    def findShared(self):
+        """Provide tuples of (other, this) items for each shared POTMsgSet.
+
+        Only return those that are shared but shouldn't be because they
+        are now in non-sharing templates.
+        """
+        store = Store.of(self.potemplate)
+        ThisItem = ClassAlias(TranslationTemplateItem, 'ThisItem')
+        OtherItem = ClassAlias(TranslationTemplateItem, 'OtherItem')
+        OtherTemplate = ClassAlias(POTemplate, 'OtherTemplate')
+
+        tables = [
+            OtherTemplate,
+            Join(OtherItem, OtherItem.potemplateID == OtherTemplate.id),
+            Join(ThisItem,
+                 And(ThisItem.potmsgsetID == OtherItem.potmsgsetID,
+                     ThisItem.potemplateID == self.potemplate.id)),
+            ]
+
+        if self.potemplate.productseries is not None:
+            # If the template is now in a product, we look for all
+            # effectively sharing templates that are in *different*
+            # products, or that are in a sourcepackage which is not
+            # linked (through Packaging table) with this product.
+            ps = self.potemplate.productseries
+            productseries_join = LeftJoin(
+                ProductSeries,
+                ProductSeries.id == OtherTemplate.productseriesID)
+            packaging_join = LeftJoin(
+                Packaging,
+                And(Packaging.productseriesID == ps.id,
+                    (Packaging.sourcepackagenameID ==
+                     OtherTemplate.sourcepackagenameID),
+                    Packaging.distroseriesID == OtherTemplate.distroseriesID
+                    ))
+            tables.extend([productseries_join, packaging_join])
+            # Template should not be sharing if...
+            other_clauses = Or(
+                # The name is different, or...
+                OtherTemplate.name != self.potemplate.name,
+                # It's in a different product, or...
+                And(Not(ProductSeries.id == None),
+                    ProductSeries.productID != ps.productID),
+                # There is no link between this product series and
+                # a source package the template is in.
+                And(Not(OtherTemplate.distroseriesID == None),
+                    Packaging.id == None))
+        else:
+            # If the template is now in a source package, we look for all
+            # effectively sharing templates that are in *different*
+            # distributions or source packages, or that are in a product
+            # which is not linked with this source package.
+            ds = self.potemplate.distroseries
+            spn = self.potemplate.sourcepackagename
+            distroseries_join = LeftJoin(
+                DistroSeries,
+                DistroSeries.id == OtherTemplate.distroseriesID)
+            packaging_join = LeftJoin(
+                Packaging,
+                And(Packaging.distroseriesID == ds.id,
+                    Packaging.sourcepackagenameID == spn.id,
+                    Packaging.productseriesID == OtherTemplate.productseriesID
+                    ))
+            tables.extend([distroseries_join, packaging_join])
+            # Template should not be sharing if...
+            other_clauses = Or(
+                # The name is different, or...
+                OtherTemplate.name != self.potemplate.name,
+                # It's in a different distribution or source package, or...
+                And(Not(DistroSeries.id == None),
+                    Or(DistroSeries.distributionID != ds.distributionID,
+                       OtherTemplate.sourcepackagenameID != spn.id)),
+                # There is no link between this source package and
+                # a product the template is in.
+                And(Not(OtherTemplate.productseriesID == None),
+                    Packaging.id == None))
+
+        return store.using(*tables).find(
+            (OtherItem, ThisItem),
+            OtherTemplate.id != self.potemplate.id,
+            other_clauses,
+            )


Follow ups