← Back to team overview

launchpad-reviewers team mailing list archive

[Merge] lp:~wgrant/launchpad/smaller-publisher-oopses into lp:launchpad

 

William Grant has proposed merging lp:~wgrant/launchpad/smaller-publisher-oopses into lp:launchpad.

Commit message:
Use a per-archive OOPS timeline in archivepublisher scripts.

Requested reviews:
  Launchpad code reviewers (launchpad-reviewers)

For more details, see:
https://code.launchpad.net/~wgrant/launchpad/smaller-publisher-oopses/+merge/306074

Use a per-archive OOPS timeline in archivepublisher scripts.

Previously SQL queries weren't logged, just librarian and co. requests,
and the timeline covered the entire script run. This resulted in
OOPSes with timelines that were 99.99% irrelevant and omitted the
important SQL bits, and has caused rabbitmq to ENOSPC on at least one occasion.

SQL queries are now logged, and the timeline is reset between archives
in process-accepted, publish-distro and process-death-row.
-- 
Your team Launchpad code reviewers is requested to review the proposed merge of lp:~wgrant/launchpad/smaller-publisher-oopses into lp:launchpad.
=== modified file 'lib/lp/archivepublisher/scripts/processaccepted.py'
--- lib/lp/archivepublisher/scripts/processaccepted.py	2014-08-09 19:45:00 +0000
+++ lib/lp/archivepublisher/scripts/processaccepted.py	2016-09-19 10:57:46 +0000
@@ -15,6 +15,11 @@
 
 from lp.archivepublisher.publishing import GLOBAL_PUBLISHER_LOCK
 from lp.archivepublisher.scripts.base import PublisherScript
+from lp.services.limitedlist import LimitedList
+from lp.services.webapp.adapter import (
+    clear_request_started,
+    set_request_started,
+    )
 from lp.services.webapp.errorlog import (
     ErrorReportingUtility,
     ScriptRequest,
@@ -105,22 +110,28 @@
         """
         processed_queue_ids = []
         for archive in self.getTargetArchives(distribution):
-            for distroseries in distribution.series:
-
-                self.logger.debug("Processing queue for %s %s" % (
-                    archive.reference, distroseries.name))
-
-                queue_items = distroseries.getPackageUploads(
-                    status=PackageUploadStatus.ACCEPTED, archive=archive)
-                for queue_item in queue_items:
-                    if self.processQueueItem(queue_item):
-                        processed_queue_ids.append(queue_item.id)
-                    # Commit even on error; we may have altered the
-                    # on-disk archive, so the partial state must
-                    # make it to the DB.
-                    self.txn.commit()
-                    close_bugs_for_queue_item(queue_item)
-                    self.txn.commit()
+            set_request_started(
+                request_statements=LimitedList(10000),
+                txn=self.txn, enable_timeout=False)
+            try:
+                for distroseries in distribution.series:
+
+                    self.logger.debug("Processing queue for %s %s" % (
+                        archive.reference, distroseries.name))
+
+                    queue_items = distroseries.getPackageUploads(
+                        status=PackageUploadStatus.ACCEPTED, archive=archive)
+                    for queue_item in queue_items:
+                        if self.processQueueItem(queue_item):
+                            processed_queue_ids.append(queue_item.id)
+                        # Commit even on error; we may have altered the
+                        # on-disk archive, so the partial state must
+                        # make it to the DB.
+                        self.txn.commit()
+                        close_bugs_for_queue_item(queue_item)
+                        self.txn.commit()
+            finally:
+                clear_request_started()
         return processed_queue_ids
 
     def main(self):

=== modified file 'lib/lp/archivepublisher/scripts/processdeathrow.py'
--- lib/lp/archivepublisher/scripts/processdeathrow.py	2014-08-09 19:45:00 +0000
+++ lib/lp/archivepublisher/scripts/processdeathrow.py	2016-09-19 10:57:46 +0000
@@ -17,6 +17,11 @@
 
 from lp.archivepublisher.deathrow import getDeathRow
 from lp.archivepublisher.scripts.base import PublisherScript
+from lp.services.limitedlist import LimitedList
+from lp.services.webapp.adapter import (
+    clear_request_started,
+    set_request_started,
+    )
 
 
 class DeathRowProcessor(PublisherScript):
@@ -58,6 +63,9 @@
             archive, self.logger, self.options.pool_root)
         self.logger.debug(
             "Unpublishing death row for %s." % archive.displayname)
+        set_request_started(
+            request_statements=LimitedList(10000),
+            txn=self.txn, enable_timeout=False)
         try:
             death_row.reap(self.options.dry_run)
         except Exception:
@@ -71,3 +79,4 @@
             else:
                 self.logger.debug("Committing")
                 self.txn.commit()
+            clear_request_started()

=== modified file 'lib/lp/archivepublisher/scripts/publishdistro.py'
--- lib/lp/archivepublisher/scripts/publishdistro.py	2016-03-30 09:40:38 +0000
+++ lib/lp/archivepublisher/scripts/publishdistro.py	2016-09-19 10:57:46 +0000
@@ -19,7 +19,12 @@
     GLOBAL_PUBLISHER_LOCK,
     )
 from lp.archivepublisher.scripts.base import PublisherScript
+from lp.services.limitedlist import LimitedList
 from lp.services.scripts.base import LaunchpadScriptFailure
+from lp.services.webapp.adapter import (
+    clear_request_started,
+    set_request_started,
+    )
 from lp.soyuz.enums import (
     ArchivePurpose,
     ArchiveStatus,
@@ -331,23 +336,30 @@
         for distribution in self.findDistros():
             allowed_suites = self.findAllowedSuites(distribution)
             for archive in self.getTargetArchives(distribution):
-                if archive.status == ArchiveStatus.DELETING:
-                    publisher = self.getPublisher(
-                        distribution, archive, allowed_suites)
-                    work_done = self.deleteArchive(archive, publisher)
-                elif archive.can_be_published:
-                    publisher = self.getPublisher(
-                        distribution, archive, allowed_suites)
-                    for suite in self.options.dirty_suites:
-                        distroseries, pocket = self.findSuite(
-                            distribution, suite)
-                        if not cannot_modify_suite(
-                                archive, distroseries, pocket):
-                            publisher.markPocketDirty(distroseries, pocket)
-                    self.publishArchive(archive, publisher)
-                    work_done = True
-                else:
-                    work_done = False
+                set_request_started(
+                    request_statements=LimitedList(10000),
+                    txn=self.txn, enable_timeout=False)
+                try:
+                    if archive.status == ArchiveStatus.DELETING:
+                        publisher = self.getPublisher(
+                            distribution, archive, allowed_suites)
+                        work_done = self.deleteArchive(archive, publisher)
+                    elif archive.can_be_published:
+                        publisher = self.getPublisher(
+                            distribution, archive, allowed_suites)
+                        for suite in self.options.dirty_suites:
+                            distroseries, pocket = self.findSuite(
+                                distribution, suite)
+                            if not cannot_modify_suite(
+                                    archive, distroseries, pocket):
+                                publisher.markPocketDirty(
+                                    distroseries, pocket)
+                        self.publishArchive(archive, publisher)
+                        work_done = True
+                    else:
+                        work_done = False
+                finally:
+                    clear_request_started()
 
                 if work_done:
                     self.txn.commit()


Follow ups