← Back to team overview

launchpad-reviewers team mailing list archive

[Merge] lp:~mars/launchpad/test-ghost-update into lp:~launchpad/launchpad/ghost-line

 

Māris Fogels has proposed merging lp:~mars/launchpad/test-ghost-update into lp:~launchpad/launchpad/ghost-line.

Requested reviews:
  Launchpad code reviewers (launchpad-reviewers)


Test merge into the ghost line
-- 
The attached diff has been truncated due to its size.
https://code.launchpad.net/~mars/launchpad/test-ghost-update/+merge/39419
Your team Launchpad code reviewers is requested to review the proposed merge of lp:~mars/launchpad/test-ghost-update into lp:~launchpad/launchpad/ghost-line.
=== modified file '.bzrignore'
--- .bzrignore	2010-10-18 21:53:28 +0000
+++ .bzrignore	2010-10-27 02:13:03 +0000
@@ -80,3 +80,4 @@
 *.pt.py
 .project
 .pydevproject
+librarian.log

=== modified file 'configs/README.txt'
--- configs/README.txt	2010-10-18 03:29:59 +0000
+++ configs/README.txt	2010-10-27 02:13:03 +0000
@@ -299,10 +299,6 @@
         |    |
         |    + staging-mailman/launchpad-lazr.conf
         |
-        + edge-lazr.conf
-        |    |
-        |    + edge<1-4>/launchpad-lazr.conf
-        |
         + lpnet-lazr.conf
         |    |
         |    + lpnet<1-8>/launchpad-lazr.conf

=== modified file 'configs/development/launchpad-lazr.conf'
--- configs/development/launchpad-lazr.conf	2010-10-21 03:22:06 +0000
+++ configs/development/launchpad-lazr.conf	2010-10-27 02:13:03 +0000
@@ -175,6 +175,7 @@
 [librarian_server]
 root: /var/tmp/fatsam
 launch: True
+logfile: librarian.log
 
 [malone]
 bugmail_error_from_address: noreply@xxxxxxxxxxxxxxxxxx

=== modified file 'daemons/librarian.tac'
--- daemons/librarian.tac	2010-10-20 18:43:29 +0000
+++ daemons/librarian.tac	2010-10-27 02:13:03 +0000
@@ -9,6 +9,8 @@
 from meliae import scanner
 
 from twisted.application import service, strports
+from twisted.internet import reactor
+from twisted.python import log
 from twisted.web import server
 
 from canonical.config import config, dbconfig
@@ -29,10 +31,14 @@
 if config.librarian_server.upstream_host:
     upstreamHost = config.librarian_server.upstream_host
     upstreamPort = config.librarian_server.upstream_port
-    print 'Using upstream librarian http://%s:%d' % (
-        upstreamHost, upstreamPort)
+    reactor.addSystemEventTrigger(
+        'before', 'startup', log.msg,
+        'Using upstream librarian http://%s:%d' %
+        (upstreamHost, upstreamPort))
 else:
     upstreamHost = upstreamPort = None
+    reactor.addSystemEventTrigger(
+        'before', 'startup', log.msg, 'Not using upstream librarian')
 
 application = service.Application('Librarian')
 librarianService = service.IServiceCollection(application)

=== modified file 'database/schema/security.cfg'
--- database/schema/security.cfg	2010-10-22 10:23:44 +0000
+++ database/schema/security.cfg	2010-10-27 02:13:03 +0000
@@ -938,6 +938,7 @@
 public.archive                                  = SELECT
 public.archivepermission                        = SELECT, INSERT
 public.binarypackagebuild                       = SELECT
+public.binarypackagename                        = SELECT
 public.binarypackagepublishinghistory           = SELECT, INSERT
 public.binarypackagerelease                     = SELECT
 public.buildfarmjob                             = SELECT

=== modified file 'lib/canonical/buildd/pottery/intltool.py'
--- lib/canonical/buildd/pottery/intltool.py	2010-06-30 15:46:36 +0000
+++ lib/canonical/buildd/pottery/intltool.py	2010-10-27 02:13:03 +0000
@@ -3,8 +3,6 @@
 
 """Functions to build PO templates on the build slave."""
 
-from __future__ import with_statement
-
 __metaclass__ = type
 __all__ = [
     'check_potfiles_in',

=== modified file 'lib/canonical/config/fixture.py'
--- lib/canonical/config/fixture.py	2010-10-19 19:23:19 +0000
+++ lib/canonical/config/fixture.py	2010-10-27 02:13:03 +0000
@@ -1,7 +1,6 @@
 # Copyright 2010 Canonical Ltd.  This software is licensed under the
 # GNU Affero General Public License version 3 (see the file LICENSE).
 
-from __future__ import with_statement
 """Fixtures related to configs.
 
 XXX: Robert Collins 2010-10-20 bug=663454 this is in the wrong namespace.

=== modified file 'lib/canonical/config/schema-lazr.conf'
--- lib/canonical/config/schema-lazr.conf	2010-10-22 10:23:44 +0000
+++ lib/canonical/config/schema-lazr.conf	2010-10-27 02:13:03 +0000
@@ -356,8 +356,8 @@
 bzr_lp_prefix: lp:
 
 # The hosts which may be used to refer to this server's branches in lp: urls
-# The double-comma is used to produce the empty string.
-lp_url_hosts: edge,,production
+# The comma is used to produce the empty string.
+lp_url_hosts: ,production
 
 # see [error_reports].
 error_dir: none
@@ -988,10 +988,6 @@
 # datatype: boolean
 is_demo: False
 
-# Should the 'edge system' indicator in the footer be turned on?
-# Should be true for edge and false for all other live systems.
-is_edge: False
-
 # On launchpad.net, Launchpad's version and revision numbers aren't shown,
 # because it's a Web site. Should be True for launchpad.net and False for all
 # other systems.

=== modified file 'lib/canonical/database/sqlbase.py'
--- lib/canonical/database/sqlbase.py	2010-10-03 15:30:06 +0000
+++ lib/canonical/database/sqlbase.py	2010-10-27 02:13:03 +0000
@@ -71,7 +71,7 @@
     dbconfig,
     )
 from canonical.database.interfaces import ISQLBase
-from lp.services.propertycache import IPropertyCacheManager
+from lp.services.propertycache import clear_property_cache
 
 # Default we want for scripts, and the PostgreSQL default. Note psycopg1 will
 # use SERIALIZABLE unless we override, but psycopg2 will not.
@@ -270,7 +270,7 @@
         # XXX: RobertCollins 2010-08-16 bug=622648: Note this is not directly
         # tested, but the entire test suite blows up awesomely if it's broken.
         # It's entirely unclear where tests for this should be.
-        IPropertyCacheManager(self).clear()
+        clear_property_cache(self)
 
 
 alreadyInstalledMsg = ("A ZopelessTransactionManager with these settings is "

=== modified file 'lib/canonical/launchpad/browser/launchpad.py'
--- lib/canonical/launchpad/browser/launchpad.py	2010-10-06 11:46:51 +0000
+++ lib/canonical/launchpad/browser/launchpad.py	2010-10-27 02:13:03 +0000
@@ -527,34 +527,31 @@
         target_url = self.request.getHeader('referer')
         path = '/'.join(self.request.stepstogo)
         try:
-            # first check for a valid branch url
-            try:
-                branch_data = getUtility(IBranchLookup).getByLPPath(path)
-                branch, trailing = branch_data
-                target_url = canonical_url(branch)
-                if trailing is not None:
-                    target_url = urlappend(target_url, trailing)
-
-            except (NoLinkedBranch), e:
-                # a valid ICanHasLinkedBranch target exists but there's no
-                # branch or it's not visible
-
-                # If are aren't arriving at this invalid branch URL from
-                # another page then we just raise an exception, otherwise we
-                # end up in a bad recursion loop. The target url will be None
-                # in that case.
-                if target_url is None:
-                    raise e
-                self.request.response.addNotification(
-                    "The target %s does not have a linked branch." % path)
-
+            branch_data = getUtility(IBranchLookup).getByLPPath(path)
+            branch, trailing = branch_data
+            target_url = canonical_url(branch)
+            if trailing is not None:
+                target_url = urlappend(target_url, trailing)
+        except (NoLinkedBranch), e:
+            # A valid ICanHasLinkedBranch target exists but there's no
+            # branch or it's not visible.
+
+            # If are aren't arriving at this invalid branch URL from
+            # another page then we just raise a NotFoundError to generate
+            # a 404, otherwise we end up in a bad recursion loop. The
+            # target url will be None in that case.
+            if target_url is None:
+                raise NotFoundError
+            self.request.response.addNotification(
+                "The target %s does not have a linked branch." % path)
         except (CannotHaveLinkedBranch, InvalidNamespace,
                 InvalidProductName, NotFoundError), e:
             # If are aren't arriving at this invalid branch URL from another
-            # page then we just raise an exception, otherwise we end up in a
-            # bad recursion loop. The target url will be None in that case.
+            # page then we just raise a NotFoundError to generate a 404,
+            # otherwise we end up in a bad recursion loop. The target url will
+            # be None in that case.
             if target_url is None:
-                raise e
+                raise NotFoundError
             error_msg = str(e)
             if error_msg == '':
                 error_msg = "Invalid branch lp:%s." % path

=== modified file 'lib/canonical/launchpad/browser/tests/test_launchpad.py'
--- lib/canonical/launchpad/browser/tests/test_launchpad.py	2010-10-06 11:46:51 +0000
+++ lib/canonical/launchpad/browser/tests/test_launchpad.py	2010-10-27 02:13:03 +0000
@@ -20,13 +20,14 @@
 from canonical.launchpad.webapp.url import urlappend
 from canonical.testing.layers import DatabaseFunctionalLayer
 from lp.app.errors import GoneError
-from lp.code.errors import NoLinkedBranch
 from lp.code.interfaces.linkedbranch import ICanHasLinkedBranch
 from lp.registry.interfaces.person import (
     IPersonSet,
     PersonVisibility,
     )
 from lp.testing import (
+    ANONYMOUS,
+    login,
     login_person,
     person_logged_in,
     TestCaseWithFactory,
@@ -83,11 +84,6 @@
         self._validateNotificationContext(
             redirection.request, notification, level)
 
-    def assertNoLinkedBranch(self, path, use_default_referer=True):
-        self.assertRaises(
-            NoLinkedBranch, self.traverse, path,
-            use_default_referer=use_default_referer)
-
     def assertNotFound(self, path, use_default_referer=True):
         self.assertRaises(
             NotFound, self.traverse, path,
@@ -128,6 +124,16 @@
 
     layer = DatabaseFunctionalLayer
 
+    def assertDisplaysNotice(self, path, notification):
+        """Assert that traversal redirects back with the specified notice."""
+        self.assertDisplaysNotification(
+            path, notification, BrowserNotificationLevel.NOTICE)
+
+    def assertDisplaysError(self, path, notification):
+        """Assert that traversal redirects back with the specified notice."""
+        self.assertDisplaysNotification(
+            path, notification, BrowserNotificationLevel.ERROR)
+
     def traverse(self, path, **kwargs):
         return super(TestBranchTraversal, self).traverse(
             path, '+branch', **kwargs)
@@ -144,25 +150,15 @@
         branch = self.factory.makeAnyBranch()
         bad_name = branch.unique_name + 'wibble'
         requiredMessage = "No such branch: '%s'." % (branch.name+"wibble")
-        self.assertDisplaysNotification(
-            bad_name, requiredMessage,
-            BrowserNotificationLevel.ERROR)
+        self.assertDisplaysError(bad_name, requiredMessage)
 
     def test_private_branch(self):
         # If an attempt is made to access a private branch, display an error.
-        branch = self.factory.makeProductBranch()
-        branch_unique_name = branch.unique_name
-        naked_product = removeSecurityProxy(branch.product)
-        ICanHasLinkedBranch(naked_product).setBranch(branch)
-        removeSecurityProxy(branch).private = True
-
-        any_user = self.factory.makePerson()
-        login_person(any_user)
+        branch = self.factory.makeProductBranch(private=True)
+        branch_unique_name = removeSecurityProxy(branch).unique_name
+        login(ANONYMOUS)
         requiredMessage = "No such branch: '%s'." % branch_unique_name
-        self.assertDisplaysNotification(
-            branch_unique_name,
-            requiredMessage,
-            BrowserNotificationLevel.ERROR)
+        self.assertDisplaysError(branch_unique_name, requiredMessage)
 
     def test_product_alias(self):
         # Traversing to /+branch/<product> redirects to the page for the
@@ -179,23 +175,16 @@
         naked_product = removeSecurityProxy(branch.product)
         ICanHasLinkedBranch(naked_product).setBranch(branch)
         removeSecurityProxy(branch).private = True
-
-        any_user = self.factory.makePerson()
-        login_person(any_user)
-        requiredMessage = (u"The target %s does not have a linked branch."
-            % naked_product.name)
-        self.assertDisplaysNotification(
-            naked_product.name,
-            requiredMessage,
-            BrowserNotificationLevel.NOTICE)
+        login(ANONYMOUS)
+        requiredMessage = (
+            u"The target %s does not have a linked branch." % naked_product.name)
+        self.assertDisplaysNotice(naked_product.name, requiredMessage)
 
     def test_nonexistent_product(self):
         # Traversing to /+branch/<no-such-product> displays an error message.
         non_existent = 'non-existent'
         requiredMessage = u"No such product: '%s'." % non_existent
-        self.assertDisplaysNotification(
-            non_existent, requiredMessage,
-            BrowserNotificationLevel.ERROR)
+        self.assertDisplaysError(non_existent, requiredMessage)
 
     def test_nonexistent_product_without_referer(self):
         # Traversing to /+branch/<no-such-product> without a referer results
@@ -211,22 +200,16 @@
         naked_product = removeSecurityProxy(branch.product)
         ICanHasLinkedBranch(naked_product).setBranch(branch)
         removeSecurityProxy(branch).private = True
-
-        any_user = self.factory.makePerson()
-        login_person(any_user)
-        self.assertNoLinkedBranch(
-            naked_product.name, use_default_referer=False)
+        login(ANONYMOUS)
+        self.assertNotFound(naked_product.name, use_default_referer=False)
 
     def test_product_without_dev_focus(self):
         # Traversing to a product without a development focus displays a
         # user message on the same page.
         product = self.factory.makeProduct()
-        requiredMessage = (u"The target %s does not have a linked branch."
-            % product.name)
-        self.assertDisplaysNotification(
-            product.name,
-            requiredMessage,
-            BrowserNotificationLevel.NOTICE)
+        requiredMessage = (
+            u"The target %s does not have a linked branch." % product.name)
+        self.assertDisplaysNotice(product.name, requiredMessage)
 
     def test_distro_package_alias(self):
         # Traversing to /+branch/<distro>/<sourcepackage package> redirects
@@ -253,14 +236,11 @@
         registrant = ubuntu_branches.teamowner
         with person_logged_in(registrant):
             ICanHasLinkedBranch(distro_package).setBranch(branch, registrant)
-
-        any_user = self.factory.makePerson()
-        login_person(any_user)
+        login(ANONYMOUS)
         path = ICanHasLinkedBranch(distro_package).bzr_path
-        requiredMessage = (u"The target %s does not have a linked branch."
-            % path)
-        self.assertDisplaysNotification(
-            path, requiredMessage, BrowserNotificationLevel.NOTICE)
+        requiredMessage = (
+            u"The target %s does not have a linked branch." % path)
+        self.assertDisplaysNotice(path, requiredMessage)
 
     def test_trailing_path_redirect(self):
         # If there are any trailing path segments after the branch identifier,
@@ -283,52 +263,46 @@
         product = self.factory.makeProduct()
         non_existent = 'nonexistent'
         requiredMessage = u"No such product series: '%s'." % non_existent
-        self.assertDisplaysNotification(
-            '%s/%s' % (product.name, non_existent),
-            requiredMessage,
-            BrowserNotificationLevel.ERROR)
+        path = '%s/%s' % (product.name, non_existent)
+        self.assertDisplaysError(path, requiredMessage)
 
     def test_no_branch_for_series(self):
         # If there's no branch for a product series, display a
         # message telling the user there is no linked branch.
         series = self.factory.makeProductSeries()
         path = ICanHasLinkedBranch(series).bzr_path
-        requiredMessage = ("The target %s does not have a linked branch."
-            % path)
-        self.assertDisplaysNotification(
-            path, requiredMessage, BrowserNotificationLevel.NOTICE)
+        requiredMessage = (
+            "The target %s does not have a linked branch." % path)
+        self.assertDisplaysNotice(path, requiredMessage)
 
     def test_private_branch_for_series(self):
         # If the development focus of a product series is private, display a
         # message telling the user there is no linked branch.
         branch = self.factory.makeBranch(private=True)
         series = self.factory.makeProductSeries(branch=branch)
-
-        any_user = self.factory.makePerson()
-        login_person(any_user)
+        login(ANONYMOUS)
         path = ICanHasLinkedBranch(series).bzr_path
-        requiredMessage = (u"The target %s does not have a linked branch."
-            % path)
-        self.assertDisplaysNotification(
-            path, requiredMessage, BrowserNotificationLevel.NOTICE)
+        requiredMessage = (
+            u"The target %s does not have a linked branch." % path)
+        self.assertDisplaysNotice(path, requiredMessage)
 
     def test_too_short_branch_name(self):
         # error notification if the thing following +branch is a unique name
         # that's too short to be a real unique name.
         owner = self.factory.makePerson()
-        requiredMessage = (u"Cannot understand namespace name: '%s'"
-            % owner.name)
-        self.assertDisplaysNotification(
-            '~%s' % owner.name,
-            requiredMessage,
-            BrowserNotificationLevel.ERROR)
+        requiredMessage = (
+            u"Cannot understand namespace name: '%s'" % owner.name)
+        self.assertDisplaysError('~%s' % owner.name, requiredMessage)
 
     def test_invalid_product_name(self):
         # error notification if the thing following +branch has an invalid
         # product name.
-        self.assertDisplaysNotification(
-            'a', u"Invalid name for product: a.",
-            BrowserNotificationLevel.ERROR)
+        self.assertDisplaysError('_foo', u"Invalid name for product: _foo.")
+
+    def test_invalid_product_name_without_referer(self):
+        # error notification if the thing following +branch has an invalid
+        # product name.
+        self.assertNotFound("_foo", use_default_referer=False)
 
 
 class TestPersonTraversal(TestCaseWithFactory, TraversalMixin):

=== modified file 'lib/canonical/launchpad/daemons/tachandler.py'
--- lib/canonical/launchpad/daemons/tachandler.py	2010-10-20 18:43:29 +0000
+++ lib/canonical/launchpad/daemons/tachandler.py	2010-10-27 02:13:03 +0000
@@ -1,8 +1,6 @@
 # Copyright 2009-2010 Canonical Ltd.  This software is licensed under the
 # GNU Affero General Public License version 3 (see the file LICENSE).
 
-from __future__ import with_statement
-
 """Test harness for TAC (Twisted Application Configuration) files."""
 
 __metaclass__ = type

=== modified file 'lib/canonical/launchpad/database/message.py'
--- lib/canonical/launchpad/database/message.py	2010-10-03 15:30:06 +0000
+++ lib/canonical/launchpad/database/message.py	2010-10-27 02:13:03 +0000
@@ -236,8 +236,8 @@
         If the header isn't encoded properly, the characters that can't
         be decoded are replaced with unicode question marks.
 
-            >>> MessageSet()._decode_header('=?utf-8?q?F=F6=F6_b=E4r?=')
-            u'F\ufffd\ufffd'
+            >>> MessageSet()._decode_header('=?utf-8?q?F=F6?=')
+            u'F\ufffd'
         """
         # Unfold the header before decoding it.
         header = ''.join(header.splitlines())

=== modified file 'lib/canonical/launchpad/doc/decoratedresultset.txt'
--- lib/canonical/launchpad/doc/decoratedresultset.txt	2010-10-09 16:36:22 +0000
+++ lib/canonical/launchpad/doc/decoratedresultset.txt	2010-10-27 02:13:03 +0000
@@ -75,7 +75,7 @@
 
 There was a bug in the Storm API whereby calling count (or other aggregates)
 on a storm ResultSet does not respect the distinct
-config option (https://bugs.edge.launchpad.net/storm/+bug/217644):
+config option (https://bugs.launchpad.net/storm/+bug/217644):
 
     >>> from canonical.launchpad.database import (BinaryPackageRelease,
     ...     BinaryPackagePublishingHistory)

=== modified file 'lib/canonical/launchpad/doc/google-searchservice.txt'
--- lib/canonical/launchpad/doc/google-searchservice.txt	2010-10-09 16:36:22 +0000
+++ lib/canonical/launchpad/doc/google-searchservice.txt	2010-10-27 02:13:03 +0000
@@ -300,8 +300,8 @@
     <M>2</M>
     ...
     <R N="1">
-    <U>https://edge.launchpad.net/gc</U>
-    <UE>https://edge.launchpad.net/gc</UE>
+    <U>https://launchpad.net/gc</U>
+    <UE>https://launchpad.net/gc</UE>
     <RK>0</RK>
     <S>
     </S>

=== modified file 'lib/canonical/launchpad/ftests/googlesearches/blog.launchpad.net-feed.xml'
--- lib/canonical/launchpad/ftests/googlesearches/blog.launchpad.net-feed.xml	2010-07-16 20:55:29 +0000
+++ lib/canonical/launchpad/ftests/googlesearches/blog.launchpad.net-feed.xml	2010-10-27 02:13:03 +0000
@@ -54,7 +54,7 @@
     import httplib2
    [...]]]></description>
 			<content:encoded><![CDATA[<p>Three tips from <a href="http://launchpad.net/~leonardr";>Leonard&#8217;s</a> lightning talk in Prague about writing faster <a href="https://help.launchpad.net/API/launchpadlib#preview";>Launchpadlib</a> API clients:</p>
-<p><b>1. Use the latest launchpadlib.</b> It gets faster from one release to the next.  (The versions in the current Ubuntu release should be fine; otherwise run from the <a href="https://code.edge.launchpad.net/~lazr-developers/launchpadlib/trunk";>branch</a> or the latest <a href="https://launchpad.net/launchpadlib/+download";>tarball</a>.)</p>
+<p><b>1. Use the latest launchpadlib.</b> It gets faster from one release to the next.  (The versions in the current Ubuntu release should be fine; otherwise run from the <a href="https://code.launchpad.net/~lazr-developers/launchpadlib/trunk";>branch</a> or the latest <a href="https://launchpad.net/launchpadlib/+download";>tarball</a>.)</p>
 <p><b>2. Profile:</b></p>
 <pre>
     import httplib2

=== modified file 'lib/canonical/launchpad/ftests/googlesearches/googlesearchservice-missing-title.xml'
--- lib/canonical/launchpad/ftests/googlesearches/googlesearchservice-missing-title.xml	2008-05-28 00:48:48 +0000
+++ lib/canonical/launchpad/ftests/googlesearches/googlesearchservice-missing-title.xml	2010-10-27 02:13:03 +0000
@@ -25,8 +25,8 @@
 </NB>
 
 <R N="1">
-<U>https://edge.launchpad.net/gc</U>
-<UE>https://edge.launchpad.net/gc</UE>
+<U>https://launchpad.net/gc</U>
+<UE>https://launchpad.net/gc</UE>
 <RK>0</RK>
 <S>
 </S>

=== modified file 'lib/canonical/launchpad/ftests/keys_for_tests.py'
--- lib/canonical/launchpad/ftests/keys_for_tests.py	2010-10-03 15:30:06 +0000
+++ lib/canonical/launchpad/ftests/keys_for_tests.py	2010-10-27 02:13:03 +0000
@@ -25,16 +25,17 @@
 import gpgme
 from zope.component import getUtility
 
-from canonical.launchpad.interfaces import (
+from canonical.launchpad.interfaces.gpghandler import IGPGHandler
+from lp.registry.interfaces.gpg import (
     GPGKeyAlgorithm,
-    IGPGHandler,
     IGPGKeySet,
-    IPersonSet,
     )
+from lp.registry.interfaces.person import IPersonSet
 
 
 gpgkeysdir = os.path.join(os.path.dirname(__file__), 'gpgkeys')
 
+
 def import_public_key(email_addr):
     """Imports the public key related to the given email address."""
     gpghandler = getUtility(IGPGHandler)
@@ -71,17 +72,20 @@
         algorithm=GPGKeyAlgorithm.items[key.algorithm],
         active=(not key.revoked))
 
+
 def iter_test_key_emails():
     """Iterates over the email addresses for the keys in the gpgkeysdir."""
     for name in sorted(os.listdir(gpgkeysdir), reverse=True):
         if name.endswith('.pub'):
             yield name[:-4]
 
+
 def import_public_test_keys():
     """Imports all the public keys located in gpgkeysdir into the db."""
     for email in iter_test_key_emails():
         import_public_key(email)
 
+
 def import_secret_test_key(keyfile='test@xxxxxxxxxxxxxxxxx'):
     """Imports the secret key located in gpgkeysdir into local keyring.
 
@@ -91,20 +95,24 @@
     seckey = open(os.path.join(gpgkeysdir, keyfile)).read()
     return gpghandler.importSecretKey(seckey)
 
+
 def test_pubkey_file_from_email(email_addr):
     """Get the file name for a test pubkey by email address."""
     return os.path.join(gpgkeysdir, email_addr + '.pub')
 
+
 def test_pubkey_from_email(email_addr):
     """Get the on disk content for a test pubkey by email address."""
     return open(test_pubkey_file_from_email(email_addr)).read()
 
+
 def test_keyrings():
     """Iterate over the filenames for test keyrings."""
     for name in os.listdir(gpgkeysdir):
         if name.endswith('.gpg'):
             yield os.path.join(gpgkeysdir, name)
 
+
 def decrypt_content(content, password):
     """Return the decrypted content or None if failed
 

=== modified file 'lib/canonical/launchpad/ftests/test_system_documentation.py'
--- lib/canonical/launchpad/ftests/test_system_documentation.py	2010-10-11 18:29:14 +0000
+++ lib/canonical/launchpad/ftests/test_system_documentation.py	2010-10-27 02:13:03 +0000
@@ -7,8 +7,6 @@
 """
 # pylint: disable-msg=C0103
 
-from __future__ import with_statement
-
 import logging
 import os
 import unittest
@@ -23,11 +21,6 @@
     ANONYMOUS,
     login,
     )
-from canonical.launchpad.interfaces import (
-    IDistributionSet,
-    ILanguageSet,
-    IPersonSet,
-    )
 from canonical.launchpad.testing import browser
 from canonical.launchpad.testing.systemdocs import (
     LayeredDocFileSuite,
@@ -47,6 +40,9 @@
     )
 from lp.bugs.interfaces.bug import CreateBugParams
 from lp.bugs.interfaces.bugtask import IBugTaskSet
+from lp.registry.interfaces.distribution import IDistributionSet
+from lp.registry.interfaces.person import IPersonSet
+from lp.services.worlddata.interfaces.language import ILanguageSet
 from lp.testing.mail_helpers import pop_notifications
 
 
@@ -68,7 +64,7 @@
     validity.
     """
     from canonical.launchpad.database import EmailAddress
-    from canonical.launchpad.interfaces import EmailAddressStatus
+    from canonical.launchpad.interfaces.emailaddress import EmailAddressStatus
     stevea_emailaddress = EmailAddress.byEmail(
             'steve.alexander@xxxxxxxxxxxxxxx')
     stevea_emailaddress.status = EmailAddressStatus.NEW
@@ -360,8 +356,6 @@
         one_test = LayeredDocFileSuite(
             path, setUp=setUp, tearDown=tearDown,
             layer=LaunchpadFunctionalLayer,
-            # 'icky way of running doctests with __future__ imports
-            globs={'with_statement': with_statement},
             stdout_logging_level=logging.WARNING)
         suite.addTest(one_test)
 

=== modified file 'lib/canonical/launchpad/mail/errortemplates/oops.txt'
--- lib/canonical/launchpad/mail/errortemplates/oops.txt	2008-02-13 13:32:25 +0000
+++ lib/canonical/launchpad/mail/errortemplates/oops.txt	2010-10-27 02:13:03 +0000
@@ -3,5 +3,5 @@
 Apologies for the inconvenience.
 
 If this is blocking your work, please file a question at
-https://answers.edge.launchpad.net/launchpad/+addquestion
+https://answers.launchpad.net/launchpad/+addquestion
 and include the error ID %(oops_id)s in the description.

=== modified file 'lib/canonical/launchpad/mail/tests/test_handlers.py'
--- lib/canonical/launchpad/mail/tests/test_handlers.py	2010-10-12 01:11:41 +0000
+++ lib/canonical/launchpad/mail/tests/test_handlers.py	2010-10-27 02:13:03 +0000
@@ -1,8 +1,6 @@
 # Copyright 2009 Canonical Ltd.  This software is licensed under the
 # GNU Affero General Public License version 3 (see the file LICENSE).
 
-from __future__ import with_statement
-
 __metaclass__ = type
 
 from doctest import DocTestSuite

=== modified file 'lib/canonical/launchpad/pagetests/basics/demo-and-lpnet.txt'
--- lib/canonical/launchpad/pagetests/basics/demo-and-lpnet.txt	2010-06-16 19:07:12 +0000
+++ lib/canonical/launchpad/pagetests/basics/demo-and-lpnet.txt	2010-10-27 02:13:03 +0000
@@ -51,7 +51,7 @@
     <style...url(/@@/demo)...</style>
     ...
     >>> print extract_text(find_tag_by_id(browser.contents, 'lp-version'))
-    &bull; r... devmode demo site
+    &bull; r... devmode demo site (get the code)
 
     >>> print extract_text(find_tags_by_class(
     ...     browser.contents, 'sitemessage')[0])
@@ -70,94 +70,6 @@
 
     >>> browser.open('http://launchpad.dev/ubuntu')
     >>> print extract_text(find_tag_by_id(browser.contents, 'lp-version'))
-    &bull; r... devmode
+    &bull; r... devmode (get the code)
     >>> len(find_tags_by_class(browser.contents, 'sitemessage'))
     0
-
-
-== Launchpad Edge ==
-
-Additionally, when a server is running as an edge server, the site-message
-is appended with a link to disable edge redirects.
-
-In addition to this prominent display on the root page, most pages will
-also include the disable-redirect link in the site_message - if the
-user is a member of the beta group and has not already disabled
-the redirects.
-
-    # Now setup an edge site-message config and re-check.
-    >>> edge_config_data = """
-    ...     [launchpad]
-    ...     site_message: This is a beta site.
-    ...     is_edge: True
-    ...     """
-    >>> config.push('edge_config_data', edge_config_data)
-    >>> beta_browser = setupBrowser(
-    ...     auth='Basic beta-admin@xxxxxxxxxxxxx:test')
-    >>> beta_browser.open('http://launchpad.dev/ubuntu')
-    >>> site_message = find_tags_by_class(
-    ...     beta_browser.contents, 'sitemessage')[0]
-    >>> print extract_text(site_message)
-    This is a beta site. Disable edge redirect.
-    >>> print extract_text(site_message.find(
-    ...     'a', onclick="setBetaRedirect(false)"))
-    Disable edge redirect.
-
-The disable-redirect link will also appear in the site_message when browsed by
-non-beta/anonymous users. This is to reduce the annoyance when users are
-logged into launchpad.net but haven't noticed yet that they need to log into
-edge as well (https://launchpad.net/bugs/160191).
-
-    >>> browser.open('http://launchpad.dev/ubuntu')
-    >>> print extract_text(find_tags_by_class(
-    ...     browser.contents, 'sitemessage')[0])
-    This is a beta site. Disable edge redirect.
-
-Once the redirection has been inhibited, the link changes to enable
-redirects.
-
-    # Workaround bug in mechanize where you cannot use the Cookie
-    # header with the CookieJar
-    >>> from mechanize._clientcookie import Cookie
-    >>> cookiejar = (
-    ...     beta_browser.mech_browser._ua_handlers['_cookies'].cookiejar)
-    >>> cookiejar.set_cookie(
-    ...     Cookie(
-    ...         version=0, name='inhibit_beta_redirect', value='1', port=None,
-    ...         port_specified=False, domain='.launchpad.dev',
-    ...         domain_specified=True, domain_initial_dot=True, path='/',
-    ...         path_specified=True, secure=False, expires=None,
-    ...         discard=None, comment=None, comment_url=None, rest={}))
-    >>> beta_browser.open('http://launchpad.dev/ubuntu')
-    >>> site_message = find_tags_by_class(
-    ...     beta_browser.contents, 'sitemessage')[0]
-    >>> print extract_text(site_message)
-    This is a beta site. Enable edge redirect.
-    >>> print extract_text(site_message.find(
-    ...     'a', onclick="setBetaRedirect(true)"))
-    Enable edge redirect.
-
-    # Remove the specific site-message config data before continuing.
-    >>> dummy = config.pop('edge_config_data')
-
-
-== Launchpad.net ==
-
-On launchpad.net, the version and revision numbers are presented only in an
-HTML comment.
-
-    >>> # Pretend that we're on launchpad.net:
-    >>> test_data = dedent("""
-    ...     [launchpad]
-    ...     is_lpnet: True
-    ...     """)
-    >>> config.push('test_data', test_data)
-
-    >>> browser.open('http://launchpad.dev/ubuntu')
-    >>> print find_tag_by_id(browser.contents, 'lp-version')
-    None
-
-    >>> # Restore the previous config:
-    >>> config_data = config.pop('test_data')
-    >>> print config.launchpad.is_lpnet
-    False

=== modified file 'lib/canonical/launchpad/pagetests/standalone/xx-offsite-form-post.txt'
--- lib/canonical/launchpad/pagetests/standalone/xx-offsite-form-post.txt	2010-03-30 14:33:26 +0000
+++ lib/canonical/launchpad/pagetests/standalone/xx-offsite-form-post.txt	2010-10-27 02:13:03 +0000
@@ -45,7 +45,7 @@
 It also fails if there is no referrer.
 
 Note that we have to set up a monkeypatch to test this in order to work
-around Zope bug 98437 (https://bugs.edge.launchpad.net/zope3/+bug/98437).
+around Zope bug 98437 (https://bugs.launchpad.net/zope3/+bug/98437).
 
   >>> from canonical.launchpad.webapp.servers import LaunchpadBrowserRequest
   >>> original_init = LaunchpadBrowserRequest.__init__
@@ -87,7 +87,7 @@
   No REFERER Header
   ...
   >>> browser.getLink('the FAQ').url
-  'https://answers.edge.launchpad.net/launchpad/+faq/1024'
+  'https://answers.launchpad.net/launchpad/+faq/1024'
   >>> browser.handleErrors = False
 
 We have a few exceptional cases in which we allow POST requests without a

=== modified file 'lib/canonical/launchpad/readonly.py'
--- lib/canonical/launchpad/readonly.py	2010-08-20 20:31:18 +0000
+++ lib/canonical/launchpad/readonly.py	2010-10-27 02:13:03 +0000
@@ -7,8 +7,6 @@
 named read-only.txt in the root of the Launchpad tree.
 """
 
-from __future__ import with_statement
-
 __metaclass__ = type
 __all__ = [
     'is_read_only',

=== modified file 'lib/canonical/launchpad/scripts/ftests/test_keyringtrustanalyser.py'
--- lib/canonical/launchpad/scripts/ftests/test_keyringtrustanalyser.py	2010-10-04 19:50:45 +0000
+++ lib/canonical/launchpad/scripts/ftests/test_keyringtrustanalyser.py	2010-10-27 02:13:03 +0000
@@ -8,13 +8,13 @@
 from zope.component import getUtility
 
 from canonical.launchpad.ftests import keys_for_tests
-from canonical.launchpad.interfaces import (
+from canonical.launchpad.interfaces.emailaddress import (
     EmailAddressStatus,
     IEmailAddressSet,
-    IGPGHandler,
-    IPersonSet,
     )
+from canonical.launchpad.interfaces.gpghandler import IGPGHandler
 from canonical.testing.layers import LaunchpadZopelessLayer
+from lp.registry.interfaces.person import IPersonSet
 from lp.registry.scripts.keyringtrustanalyser import (
     addOtherKeyring,
     addTrustedKeyring,
@@ -29,9 +29,11 @@
 
 
 class LogCollector(logging.Handler):
+
     def __init__(self):
         logging.Handler.__init__(self)
         self.records = []
+
     def emit(self, record):
         self.records.append(self.format(record))
 

=== modified file 'lib/canonical/launchpad/scripts/oops.py'
--- lib/canonical/launchpad/scripts/oops.py	2010-09-09 06:04:24 +0000
+++ lib/canonical/launchpad/scripts/oops.py	2010-10-27 02:13:03 +0000
@@ -3,8 +3,6 @@
 
 """Module docstring goes here."""
 
-from __future__ import with_statement
-
 __metaclass__ = type
 
 __all__ = [

=== modified file 'lib/canonical/launchpad/templates/launchpad-noreferrer.pt'
--- lib/canonical/launchpad/templates/launchpad-noreferrer.pt	2010-03-29 16:11:05 +0000
+++ lib/canonical/launchpad/templates/launchpad-noreferrer.pt	2010-10-27 02:13:03 +0000
@@ -15,7 +15,7 @@
       <code>REFERER</code> headers.</p>
       <p>Unblock <code>REFERER</code> headers for launchpad.net and try
       again, or see <a
-      href="https://answers.edge.launchpad.net/launchpad/+faq/1024";>the
+      href="https://answers.launchpad.net/launchpad/+faq/1024";>the
       FAQ <em>Why does Launchpad require a REFERER header?</em></a> for
       more information.</p>
       <p>You can also join <a href="irc://irc.freenode.net/launchpad">the

=== modified file 'lib/canonical/launchpad/templates/launchpad-requestexpired.pt'
--- lib/canonical/launchpad/templates/launchpad-requestexpired.pt	2010-03-10 19:10:04 +0000
+++ lib/canonical/launchpad/templates/launchpad-requestexpired.pt	2010-10-27 02:13:03 +0000
@@ -9,16 +9,6 @@
   <body>
     <div class="top-portlet" metal:fill-slot="main">
       <h1 class="exception">Timeout error</h1>
-      <div tal:condition="is_edge"
-           id="redirect_notice" class="informational message">
-        <p>Our edge server has a lower timeout threshold than launchpad.net,
-        so we can catch those before they hit a wider audience.
-        If this is blocking your work and you are a member of the Launchpad
-        Beta Testers team, you can disable automatic redirection
-        to edge in order to use launchpad.net.</p>
-        <p><button onclick="setBetaRedirect(false)">Disable redirection
-        for 2 hours</button></p>
-      </div>
       <p>
         Sorry, something just went wrong in Launchpad.
       </p>

=== modified file 'lib/canonical/launchpad/templates/oops.pt'
--- lib/canonical/launchpad/templates/oops.pt	2010-03-15 17:58:27 +0000
+++ lib/canonical/launchpad/templates/oops.pt	2010-10-27 02:13:03 +0000
@@ -31,26 +31,6 @@
           <tal:oops replace="structure view/oops_id_text" />)
         </p>
 
-        <div id="redirect_notice" style="display:none"
-             class="informational message">
-          <p>This server runs pre-release code, so it's possible this problem
-          doesn't affect launchpad.net. If you're a member of the Launchpad
-          Beta Testers team, you can disable redirection in order to use
-          launchpad.net.</p>
-          <p><button onclick="setBetaRedirect(false)">Disable redirection
-          for 2 hours</button></p>
-        </div>
-        <tal:comment condition="nothing">
-          Can't use the 'is_edge' global because we don't use our page
-          macros here.
-        </tal:comment>
-        <script type="text/javascript">
-          if (document.location.hostname.match('edge.')) {
-            redirect_notice_div = document.getElementById('redirect_notice');
-            redirect_notice_div.style.display = "block";
-          };
-        </script>
-
         <tal:traceback replace="structure view/maybeShowTraceback" />
 
         <div class="related">

=== modified file 'lib/canonical/launchpad/testing/browser.py'
--- lib/canonical/launchpad/testing/browser.py	2010-08-20 20:31:18 +0000
+++ lib/canonical/launchpad/testing/browser.py	2010-10-27 02:13:03 +0000
@@ -104,7 +104,7 @@
     def _clickSubmit(self, form, control, coord):
         # XXX gary 2010-03-08 bug=98437
         # This change is taken from
-        # https://bugs.edge.launchpad.net/zope3/+bug/98437/comments/9 .  It
+        # https://bugs.launchpad.net/zope3/+bug/98437/comments/9 .  It
         # should be pushed upstream, per that comment.
         labels = control.get_labels()
         if labels:

=== modified file 'lib/canonical/launchpad/testing/pages.py'
--- lib/canonical/launchpad/testing/pages.py	2010-10-12 05:32:24 +0000
+++ lib/canonical/launchpad/testing/pages.py	2010-10-27 02:13:03 +0000
@@ -1,8 +1,6 @@
 # Copyright 2009 Canonical Ltd.  This software is licensed under the
 # GNU Affero General Public License version 3 (see the file LICENSE).
 
-from __future__ import with_statement
-
 """Testing infrastructure for page tests."""
 
 # Stop lint warning about not initializing TestCase parent on
@@ -813,7 +811,6 @@
     test.globs['print_tag_with_id'] = print_tag_with_id
     test.globs['PageTestLayer'] = PageTestLayer
     test.globs['stop'] = stop
-    test.globs['with_statement'] = with_statement
     test.globs['ws_uncache'] = ws_uncache
 
 

=== modified file 'lib/canonical/launchpad/testing/systemdocs.py'
--- lib/canonical/launchpad/testing/systemdocs.py	2010-10-03 15:30:06 +0000
+++ lib/canonical/launchpad/testing/systemdocs.py	2010-10-27 02:13:03 +0000
@@ -3,8 +3,6 @@
 
 """Infrastructure for setting up doctests."""
 
-from __future__ import with_statement
-
 __metaclass__ = type
 __all__ = [
     'default_optionflags',
@@ -211,7 +209,6 @@
     test.globs['verifyObject'] = verifyObject
     test.globs['pretty'] = pprint.PrettyPrinter(width=1).pformat
     test.globs['stop'] = stop
-    test.globs['with_statement'] = with_statement
     test.globs['launchpadlib_for'] = launchpadlib_for
     test.globs['launchpadlib_credentials_for'] = launchpadlib_credentials_for
     test.globs['oauth_access_token_for'] = oauth_access_token_for

=== modified file 'lib/canonical/launchpad/tests/test_helpers.py'
--- lib/canonical/launchpad/tests/test_helpers.py	2010-10-12 01:11:41 +0000
+++ lib/canonical/launchpad/tests/test_helpers.py	2010-10-27 02:13:03 +0000
@@ -9,12 +9,10 @@
 
 from canonical.launchpad import helpers
 from canonical.launchpad.ftests import login
-from canonical.launchpad.interfaces import (
-    ILanguageSet,
-    ILaunchBag,
-    IPerson,
-    )
+from canonical.launchpad.webapp.interfaces import ILaunchBag
 from canonical.testing.layers import LaunchpadFunctionalLayer
+from lp.registry.interfaces.person import IPerson
+from lp.services.worlddata.interfaces.language import ILanguageSet
 from lp.testing.factory import LaunchpadObjectFactory
 from lp.translations.utilities.translation_export import LaunchpadWriteTarFile
 
@@ -45,10 +43,11 @@
             '# Yowza!',
         'uberfrob-0.1/blah/po/la':
             'la la',
-        'uberfrob-0.1/uberfrob.py' :
+        'uberfrob-0.1/uberfrob.py':
             'import sys\n'
-            'print "Frob!"\n'
-    })
+            'print "Frob!"\n',
+        })
+
 
 def make_test_tarball_2():
     r'''
@@ -89,12 +88,14 @@
         'test/es.po': po,
     })
 
+
 def test_join_lines():
     r"""
     >>> helpers.join_lines('foo', 'bar', 'baz')
     'foo\nbar\nbaz\n'
     """
 
+
 def test_shortest():
     """
     >>> helpers.shortest(['xyzzy', 'foo', 'blah'])
@@ -105,6 +106,7 @@
 
 
 class DummyLanguage:
+
     def __init__(self, code, pluralforms):
         self.code = code
         self.pluralforms = pluralforms
@@ -115,10 +117,10 @@
     implements(ILanguageSet)
 
     _languages = {
-        'ja' : DummyLanguage('ja', 1),
-        'es' : DummyLanguage('es', 2),
-        'fr' : DummyLanguage('fr', 3),
-        'cy' : DummyLanguage('cy', None),
+        'ja': DummyLanguage('ja', 1),
+        'es': DummyLanguage('es', 2),
+        'fr': DummyLanguage('fr', 3),
+        'cy': DummyLanguage('cy', None),
         }
 
     def __getitem__(self, key):
@@ -134,15 +136,17 @@
 
         self.languages = [all_languages[code] for code in self.codes]
 
+
 dummyPerson = DummyPerson(('es',))
-
 dummyNoLanguagePerson = DummyPerson(())
 
 
 class DummyResponse:
+
     def redirect(self, url):
         pass
 
+
 class DummyRequest:
     implements(IBrowserRequest)
 
@@ -154,20 +158,24 @@
     def get(self, key, default):
         raise key
 
+
 def adaptRequestToLanguages(request):
     return DummyRequestLanguages()
 
 
 class DummyRequestLanguages:
+
     def getPreferredLanguages(self):
         return [DummyLanguage('ja', 1),
             DummyLanguage('es', 2),
-            DummyLanguage('fr', 3),]
+            DummyLanguage('fr', 3),
+            ]
 
     def getLocalLanguages(self):
         return [DummyLanguage('da', 4),
             DummyLanguage('as', 5),
-            DummyLanguage('sr', 6),]
+            DummyLanguage('sr', 6),
+            ]
 
 
 class DummyLaunchBag:
@@ -191,9 +199,13 @@
 
     >>> setUp()
     >>> ztapi.provideUtility(ILanguageSet, DummyLanguageSet())
-    >>> ztapi.provideUtility(ILaunchBag, DummyLaunchBag('foo.bar@xxxxxxxxxxxxx', dummyPerson))
-    >>> ztapi.provideAdapter(IBrowserRequest, IRequestPreferredLanguages, adaptRequestToLanguages)
-    >>> ztapi.provideAdapter(IBrowserRequest, IRequestLocalLanguages, adaptRequestToLanguages)
+    >>> ztapi.provideUtility(
+    ...     ILaunchBag, DummyLaunchBag('foo.bar@xxxxxxxxxxxxx', dummyPerson))
+    >>> ztapi.provideAdapter(
+    ...     IBrowserRequest, IRequestPreferredLanguages,
+    ...     adaptRequestToLanguages)
+    >>> ztapi.provideAdapter(
+    ...     IBrowserRequest, IRequestLocalLanguages, adaptRequestToLanguages)
 
     >>> languages = preferred_or_request_languages(DummyRequest())
     >>> len(languages)
@@ -207,9 +219,14 @@
 
     >>> setUp()
     >>> ztapi.provideUtility(ILanguageSet, DummyLanguageSet())
-    >>> ztapi.provideUtility(ILaunchBag, DummyLaunchBag('foo.bar@xxxxxxxxxxxxx', dummyNoLanguagePerson))
-    >>> ztapi.provideAdapter(IBrowserRequest, IRequestPreferredLanguages, adaptRequestToLanguages)
-    >>> ztapi.provideAdapter(IBrowserRequest, IRequestLocalLanguages, adaptRequestToLanguages)
+    >>> ztapi.provideUtility(
+    ...     ILaunchBag,
+    ...     DummyLaunchBag('foo.bar@xxxxxxxxxxxxx', dummyNoLanguagePerson))
+    >>> ztapi.provideAdapter(
+    ...     IBrowserRequest, IRequestPreferredLanguages,
+    ...     adaptRequestToLanguages)
+    >>> ztapi.provideAdapter(
+    ...     IBrowserRequest, IRequestLocalLanguages, adaptRequestToLanguages)
 
     >>> languages = preferred_or_request_languages(DummyRequest())
     >>> len(languages)

=== modified file 'lib/canonical/launchpad/tour/bugs'
--- lib/canonical/launchpad/tour/bugs	2010-04-21 09:47:58 +0000
+++ lib/canonical/launchpad/tour/bugs	2010-10-27 02:13:03 +0000
@@ -99,7 +99,7 @@
                         Manage bug reports entirely through your email client.
 <br /><br />
 						You can use email to report, subscribe, comment on, assign, prioritise and make just about any other update to bugs tracked in Launchpad. Launchpad will also mail you with updates on any bug you're interested in, whether an individual bug or all the bugs associated with a particular project or package. Read more about <a href="https://help.launchpad.net/Bugs/EmailInterface";>the bug tracker's e-mail interface &gt;</a><br /><br />
-						And, if you prefer, you can also subscribe to bug information in your feed reader with our Atom feeds. <img src="https://edge.launchpad.net/@@/rss"; alt="Feed logo" />
+						And, if you prefer, you can also subscribe to bug information in your feed reader with our Atom feeds. <img src="https://launchpad.net/@@/rss"; alt="Feed logo" />
                         </p>
                     </div>
                     <div class="block odd">

=== modified file 'lib/canonical/launchpad/webapp/ftests/test_annotations.py'
--- lib/canonical/launchpad/webapp/ftests/test_annotations.py	2010-10-04 19:50:45 +0000
+++ lib/canonical/launchpad/webapp/ftests/test_annotations.py	2010-10-27 02:13:03 +0000
@@ -17,7 +17,7 @@
         connection = db.open()
         root = connection.root()
         handle_before_traversal(root)
-        from canonical.launchpad.interfaces import IZODBAnnotation
+        from canonical.launchpad.interfaces.launchpad import IZODBAnnotation
         from lp.bugs.model.bug import Bug
         from lp.registry.model.product import Product
         bug = Bug.get(1)
@@ -39,6 +39,7 @@
         self.assertEquals(all_annotations['Product']['2']['soyuz.message'],
                           'a message on a product')
 
+
 def test_suite():
     suite = unittest.TestSuite()
     # XXX daniels 2004-12-14:
@@ -47,4 +48,3 @@
     #     tests: the rdb transaction is closed too early.
     ##suite.addTest(unittest.makeSuite(TestAnnotations))
     return suite
-

=== modified file 'lib/canonical/launchpad/webapp/login.py'
--- lib/canonical/launchpad/webapp/login.py	2010-09-03 16:43:11 +0000
+++ lib/canonical/launchpad/webapp/login.py	2010-10-27 02:13:03 +0000
@@ -2,8 +2,6 @@
 # GNU Affero General Public License version 3 (see the file LICENSE).
 """Stuff to do with logging in and logging out."""
 
-from __future__ import with_statement
-
 __metaclass__ = type
 
 from datetime import (

=== modified file 'lib/canonical/launchpad/webapp/servers.py'
--- lib/canonical/launchpad/webapp/servers.py	2010-10-18 10:19:56 +0000
+++ lib/canonical/launchpad/webapp/servers.py	2010-10-27 02:13:03 +0000
@@ -620,10 +620,6 @@
         """As per zope.publisher.browser.BrowserRequest._createResponse"""
         return LaunchpadBrowserResponse()
 
-    def isRedirectInhibited(self):
-        """Returns True if edge redirection has been inhibited."""
-        return self.cookies.get('inhibit_beta_redirect', '0') == '1'
-
     @cachedproperty
     def form_ng(self):
         """See ILaunchpadBrowserApplicationRequest."""

=== modified file 'lib/canonical/launchpad/webapp/tests/test_authutility.py'
--- lib/canonical/launchpad/webapp/tests/test_authutility.py	2010-10-03 15:30:06 +0000
+++ lib/canonical/launchpad/webapp/tests/test_authutility.py	2010-10-27 02:13:03 +0000
@@ -16,11 +16,8 @@
 from zope.publisher.browser import TestRequest
 from zope.publisher.interfaces.http import IHTTPCredentials
 
-from canonical.launchpad.interfaces import (
-    IAccount,
-    IPasswordEncryptor,
-    IPerson,
-    )
+from canonical.launchpad.interfaces.account import IAccount
+from canonical.launchpad.interfaces.launchpad import IPasswordEncryptor
 from canonical.launchpad.webapp.authentication import (
     LaunchpadPrincipal,
     PlacelessAuthUtility,
@@ -29,6 +26,7 @@
     IPlacelessAuthUtility,
     IPlacelessLoginSource,
     )
+from lp.registry.interfaces.person import IPerson
 
 
 class DummyPerson(object):
@@ -65,6 +63,7 @@
 
 
 class TestPlacelessAuth(PlacelessSetup, unittest.TestCase):
+
     def setUp(self):
         PlacelessSetup.setUp(self)
         ztapi.provideUtility(IPasswordEncryptor, DummyPasswordEncryptor())

=== modified file 'lib/canonical/launchpad/webapp/tests/test_dbpolicy.py'
--- lib/canonical/launchpad/webapp/tests/test_dbpolicy.py	2010-10-03 15:30:06 +0000
+++ lib/canonical/launchpad/webapp/tests/test_dbpolicy.py	2010-10-27 02:13:03 +0000
@@ -23,7 +23,7 @@
     ISession,
     )
 
-from canonical.launchpad.interfaces import (
+from canonical.launchpad.interfaces.lpstorm import (
     IMasterStore,
     ISlaveStore,
     )
@@ -182,7 +182,7 @@
 
     def test_FeedsLayer_uses_SlaveDatabasePolicy(self):
         """FeedsRequest should use the SlaveDatabasePolicy since they
-        are read-only in nature. Also we don't want to send session cookies 
+        are read-only in nature. Also we don't want to send session cookies
         over them.
         """
         request = LaunchpadTestRequest(

=== modified file 'lib/canonical/launchpad/webapp/tests/test_encryptor.py'
--- lib/canonical/launchpad/webapp/tests/test_encryptor.py	2010-10-03 15:30:06 +0000
+++ lib/canonical/launchpad/webapp/tests/test_encryptor.py	2010-10-27 02:13:03 +0000
@@ -12,7 +12,7 @@
 from zope.app.testing.placelesssetup import PlacelessSetup
 from zope.component import getUtility
 
-from canonical.launchpad.interfaces import IPasswordEncryptor
+from canonical.launchpad.interfaces.launchpad import IPasswordEncryptor
 from canonical.launchpad.webapp.authentication import SSHADigestEncryptor
 
 
@@ -30,8 +30,7 @@
         self.failIfEqual(encrypted1, encrypted2)
         salt = encrypted1[20:]
         v = binascii.b2a_base64(
-            hashlib.sha1('motorhead' + salt).digest() + salt
-            )[:-1]
+            hashlib.sha1('motorhead' + salt).digest() + salt)[:-1]
         return (v == encrypted1)
 
     def test_validate(self):
@@ -67,6 +66,3 @@
 def test_suite():
     t = unittest.makeSuite(TestSSHADigestEncryptor)
     return unittest.TestSuite((t,))
-
-if __name__=='__main__':
-    main(defaultTest='test_suite')

=== modified file 'lib/canonical/launchpad/webapp/tests/test_errorlog.py'
--- lib/canonical/launchpad/webapp/tests/test_errorlog.py	2010-10-13 16:44:44 +0000
+++ lib/canonical/launchpad/webapp/tests/test_errorlog.py	2010-10-27 02:13:03 +0000
@@ -3,7 +3,6 @@
 
 """Tests for error logging & OOPS reporting."""
 
-from __future__ import with_statement
 __metaclass__ = type
 
 import datetime

=== modified file 'lib/canonical/launchpad/webapp/tests/test_login.py'
--- lib/canonical/launchpad/webapp/tests/test_login.py	2010-09-03 11:10:09 +0000
+++ lib/canonical/launchpad/webapp/tests/test_login.py	2010-10-27 02:13:03 +0000
@@ -1,6 +1,4 @@
 # Copyright 2009-2010 Canonical Ltd.  All rights reserved.
-from __future__ import with_statement
-
 # pylint: disable-msg=W0105
 """Test harness for running the new-login.txt tests."""
 

=== modified file 'lib/canonical/launchpad/webapp/tests/test_loginsource.py'
--- lib/canonical/launchpad/webapp/tests/test_loginsource.py	2010-10-04 19:50:45 +0000
+++ lib/canonical/launchpad/webapp/tests/test_loginsource.py	2010-10-27 02:13:03 +0000
@@ -9,10 +9,10 @@
     ANONYMOUS,
     login,
     )
-from canonical.launchpad.interfaces import IPersonSet
 from canonical.launchpad.webapp.authentication import IPlacelessLoginSource
 from canonical.launchpad.webapp.interfaces import AccessLevel
 from canonical.testing.layers import DatabaseFunctionalLayer
+from lp.registry.interfaces.person import IPersonSet
 
 
 class LaunchpadLoginSourceTest(unittest.TestCase):

=== modified file 'lib/canonical/launchpad/webapp/tests/test_servers.py'
--- lib/canonical/launchpad/webapp/tests/test_servers.py	2010-09-28 07:00:56 +0000
+++ lib/canonical/launchpad/webapp/tests/test_servers.py	2010-10-27 02:13:03 +0000
@@ -485,19 +485,6 @@
             "The query_string_params dict correctly interprets encoded "
             "parameters.")
 
-    def test_isRedirectInhibited_without_cookie(self):
-        # When the request doesn't include the inhibit_beta_redirect cookie,
-        # isRedirectInhibited() returns False.
-        request = LaunchpadBrowserRequest('', {})
-        self.assertFalse(request.isRedirectInhibited())
-
-    def test_isRedirectInhibited_with_cookie(self):
-        # When the request includes the inhibit_beta_redirect cookie,
-        # isRedirectInhibited() returns True.
-        request = LaunchpadBrowserRequest(
-            '', dict(HTTP_COOKIE="inhibit_beta_redirect=1"))
-        self.assertTrue(request.isRedirectInhibited())
-
 
 def test_suite():
     suite = unittest.TestSuite()

=== modified file 'lib/canonical/launchpad/windmill/tests/test_widgets.py'
--- lib/canonical/launchpad/windmill/tests/test_widgets.py	2010-08-20 20:31:18 +0000
+++ lib/canonical/launchpad/windmill/tests/test_widgets.py	2010-10-27 02:13:03 +0000
@@ -3,8 +3,6 @@
 
 """Tests for the Windmill test doubles themselves."""
 
-from __future__ import with_statement
-
 __metaclass__ = type
 
 from mocker import (

=== modified file 'lib/canonical/launchpad/xmlrpc/tests/test_authserver.py'
--- lib/canonical/launchpad/xmlrpc/tests/test_authserver.py	2010-10-03 15:30:06 +0000
+++ lib/canonical/launchpad/xmlrpc/tests/test_authserver.py	2010-10-27 02:13:03 +0000
@@ -10,7 +10,7 @@
 from zope.component import getUtility
 from zope.publisher.xmlrpc import TestRequest
 
-from canonical.launchpad.interfaces import IPrivateApplication
+from canonical.launchpad.interfaces.launchpad import IPrivateApplication
 from canonical.launchpad.xmlrpc import faults
 from canonical.launchpad.xmlrpc.authserver import AuthServerAPIView
 from canonical.testing.layers import DatabaseFunctionalLayer

=== modified file 'lib/canonical/librarian/ftests/test_web.py'
--- lib/canonical/librarian/ftests/test_web.py	2010-10-04 19:50:45 +0000
+++ lib/canonical/librarian/ftests/test_web.py	2010-10-27 02:13:03 +0000
@@ -5,7 +5,10 @@
 from datetime import datetime
 import httplib
 import unittest
-from urllib2 import urlopen, HTTPError
+from urllib2 import (
+    HTTPError,
+    urlopen,
+    )
 from urlparse import urlparse
 
 import pytz
@@ -19,15 +22,15 @@
     flush_database_updates,
     session_store,
     )
+from canonical.launchpad.database import LibraryFileAlias
 from canonical.launchpad.database.librarian import TimeLimitedToken
+from canonical.launchpad.interfaces.librarian import ILibraryFileAliasSet
+from canonical.launchpad.interfaces.lpstorm import IMasterStore
 from canonical.librarian.client import (
     get_libraryfilealias_download_path,
     LibrarianClient,
     )
 from canonical.librarian.interfaces import DownloadFailed
-from canonical.launchpad.database import LibraryFileAlias
-from canonical.launchpad.interfaces import IMasterStore
-from canonical.launchpad.interfaces.librarian import ILibraryFileAliasSet
 from canonical.testing.layers import (
     LaunchpadFunctionalLayer,
     LaunchpadZopelessLayer,

=== modified file 'lib/canonical/librarian/testing/server.py'
--- lib/canonical/librarian/testing/server.py	2010-09-27 02:08:32 +0000
+++ lib/canonical/librarian/testing/server.py	2010-10-27 02:13:03 +0000
@@ -1,4 +1,4 @@
-# Copyright 2009 Canonical Ltd.  This software is licensed under the
+# Copyright 2009-2010 Canonical Ltd.  This software is licensed under the
 # GNU Affero General Public License version 3 (see the file LICENSE).
 
 """Fixture for the librarians."""
@@ -11,9 +11,9 @@
     'LibrarianTestSetup',
     ]
 
+import atexit
 import os
 import shutil
-import tempfile
 import warnings
 
 from fixtures import Fixture
@@ -24,7 +24,6 @@
     get_pid_from_file,
     TacException,
     TacTestSetup,
-    two_stage_kill,
     )
 from canonical.librarian.storage import _relFileLocation
 

=== modified file 'lib/canonical/librarian/web.py'
--- lib/canonical/librarian/web.py	2010-09-24 15:40:49 +0000
+++ lib/canonical/librarian/web.py	2010-10-27 02:13:03 +0000
@@ -7,6 +7,7 @@
 import time
 from urlparse import urlparse
 
+from twisted.python import log
 from twisted.web import resource, static, util, server, proxy
 from twisted.internet.threads import deferToThread
 
@@ -52,6 +53,8 @@
         try:
             aliasID = int(name)
         except ValueError:
+            log.msg(
+                "404: alias is not an int: %r" % (name,))
             return fourOhFour
 
         return LibraryFileAliasResource(self.storage, aliasID,
@@ -76,6 +79,8 @@
             try:
                 self.aliasID = int(filename)
             except ValueError:
+                log.msg(
+                    "404 (old URL): alias is not an int: %r" % (name,))
                 return fourOhFour
             filename = request.postpath[0]
 
@@ -95,6 +100,9 @@
                 netloc = netloc[:netloc.find(':')]
             expected_hostname = 'i%d.restricted.%s' % (self.aliasID, netloc)
             if expected_hostname != hostname:
+                log.msg(
+                    '404: expected_hostname != hostname: %r != %r' %
+                    (expected_hostname, hostname))
                 return fourOhFour
 
         token = request.args.get('token', [None])[0]
@@ -128,6 +136,9 @@
         # a crude form of access control (stuff we care about can have
         # unguessable names effectively using the filename as a secret).
         if dbfilename.encode('utf-8') != filename:
+            log.msg(
+                "404: dbfilename.encode('utf-8') != filename: %r != %r"
+                % (dbfilename.encode('utf-8'), filename))
             return fourOhFour
 
         if not restricted:

=== modified file 'lib/canonical/testing/layers.py'
--- lib/canonical/testing/layers.py	2010-10-22 09:49:44 +0000
+++ lib/canonical/testing/layers.py	2010-10-27 02:13:03 +0000
@@ -623,6 +623,8 @@
 
     _is_setup = False
 
+    _atexit_call = None
+
     @classmethod
     @profiled
     def setUp(cls):
@@ -635,7 +637,7 @@
         the_librarian = LibrarianTestSetup()
         the_librarian.setUp()
         LibrarianLayer._check_and_reset()
-        atexit.register(the_librarian.tearDown)
+        cls._atexit_call = atexit.register(the_librarian.tearDown)
 
     @classmethod
     @profiled
@@ -650,6 +652,11 @@
                     )
         LibrarianLayer._check_and_reset()
         LibrarianTestSetup().tearDown()
+        # Remove the atexit handler, since we've already done the work.
+        atexit._exithandlers = [
+            handler for handler in atexit._exithandlers
+            if handler[0] != cls._atexit_call]
+        cls._atexit_call = None
 
     @classmethod
     @profiled

=== modified file 'lib/canonical/testing/parallel.py'
--- lib/canonical/testing/parallel.py	2010-10-16 18:20:17 +0000
+++ lib/canonical/testing/parallel.py	2010-10-27 02:13:03 +0000
@@ -1,8 +1,6 @@
 # Copyright 2010 Canonical Ltd.  This software is licensed under the
 # GNU Affero General Public License version 3 (see the file LICENSE).
 
-from __future__ import with_statement
-
 """Parallel test glue."""
 
 __metaclass__ = type

=== modified file 'lib/canonical/testing/tests/test_parallel.py'
--- lib/canonical/testing/tests/test_parallel.py	2010-10-16 18:20:17 +0000
+++ lib/canonical/testing/tests/test_parallel.py	2010-10-27 02:13:03 +0000
@@ -1,8 +1,6 @@
 # Copyright 2010 Canonical Ltd.  This software is licensed under the
 # GNU Affero General Public License version 3 (see the file LICENSE).
 
-from __future__ import with_statement
-
 """Parallel test glue."""
 
 __metaclass__ = type

=== modified file 'lib/devscripts/autoland.py'
--- lib/devscripts/autoland.py	2010-09-02 20:25:02 +0000
+++ lib/devscripts/autoland.py	2010-10-27 02:13:03 +0000
@@ -28,7 +28,7 @@
         self._launchpad = launchpad
 
     @classmethod
-    def load(cls, service_root='edge'):
+    def load(cls, service_root='production'):
         # XXX: JonathanLange 2009-09-24: No unit tests.
         # XXX: JonathanLange 2009-09-24 bug=435813: If cached data invalid,
         # there's no easy way to delete it and try again.

=== modified file 'lib/devscripts/ec2test/entrypoint.py'
--- lib/devscripts/ec2test/entrypoint.py	2010-08-26 04:33:36 +0000
+++ lib/devscripts/ec2test/entrypoint.py	2010-10-27 02:13:03 +0000
@@ -3,8 +3,6 @@
 
 """The entry point for the 'ec2' utility."""
 
-from __future__ import with_statement
-
 __metaclass__ = type
 __all__ = [
     'main',

=== modified file 'lib/devscripts/ec2test/remote.py'
--- lib/devscripts/ec2test/remote.py	2010-09-20 22:44:49 +0000
+++ lib/devscripts/ec2test/remote.py	2010-10-27 02:13:03 +0000
@@ -16,8 +16,6 @@
    the responsibility of handling the results that `LaunchpadTester` gathers.
 """
 
-from __future__ import with_statement
-
 __metatype__ = type
 
 import datetime

=== modified file 'lib/lp/answers/browser/tests/test_questiontarget.py'
--- lib/lp/answers/browser/tests/test_questiontarget.py	2010-10-04 19:50:45 +0000
+++ lib/lp/answers/browser/tests/test_questiontarget.py	2010-10-27 02:13:03 +0000
@@ -3,8 +3,6 @@
 
 """Test questiontarget views."""
 
-from __future__ import with_statement
-
 __metaclass__ = type
 
 import os

=== modified file 'lib/lp/app/browser/tests/base-layout.txt'
--- lib/lp/app/browser/tests/base-layout.txt	2010-10-18 22:24:59 +0000
+++ lib/lp/app/browser/tests/base-layout.txt	2010-10-27 02:13:03 +0000
@@ -172,6 +172,7 @@
     Terms of use http://launchpad.dev/legal
     Contact Launchpad Support /feedback
     System status http://identi.ca/launchpadstatus
+    get the code https://code.launchpad.net/~launchpad-pqm/launchpad/stable/
 
 
 Page registering

=== removed file 'lib/lp/app/stories/basics/xx-beta-testers-redirection.txt'
--- lib/lp/app/stories/basics/xx-beta-testers-redirection.txt	2010-10-17 15:44:08 +0000
+++ lib/lp/app/stories/basics/xx-beta-testers-redirection.txt	1970-01-01 00:00:00 +0000
@@ -1,256 +0,0 @@
-= Redirection of Beta Testers =
-
-Launchpad occasionally runs private beta tests of large
-changes. During these times, members of the launchpad-beta-testers
-team have access to a separate web app running off the same database.
-
-To encourage members of this team to use the beta UI, we automatically
-redirect them to the beta site when they use the main site.
-
-For these tests, we will use the user 'launchpad-beta-owner', who is
-on the beta testers team.  First, create a browser object for this
-person:
-
-    >>> beta_browser = setupBrowser(auth='Basic beta-admin@xxxxxxxxxxxxx:test')
-
-# XXX jamesh 2007-01-31 bug=98482:
-# zope.testbrowser does not handle redirects to remote sites, so we
-# disable the redirection handling at the mechanize layer.  We then need
-# to check the HTTPError directly.
-
-    >>> user_browser.mech_browser.set_handle_redirect(False)
-    >>> beta_browser.mech_browser.set_handle_redirect(False)
-    >>> from urllib2 import HTTPError
-    >>> def check(function, *args, **kwargs):
-    ...     try:
-    ...         function(*args, **kwargs)
-    ...     except HTTPError, exc:
-    ...         print str(exc)
-    ...         location = exc.hdrs.getheader('Location')
-    ...         if location is not None:
-    ...             print 'Location:', location
-
-The redirection is controlled by the beta_testers_redirection_host
-config item.  If it is set to None, no redirection occurs:
-
-    >>> from canonical.config import config
-
-    >>> print config.launchpad.beta_testers_redirection_host
-    None
-    >>> check(beta_browser.open, 'http://launchpad.dev/ubuntu')
-    >>> print beta_browser.url
-    http://launchpad.dev/ubuntu
-
-Normally, we run the development system (launchpad.dev) with no beta
-redirection host. For the purposes of this test, however, we will set it to
-beta.launchpad.dev:
-
-    >>> beta_data = """
-    ...     [launchpad]
-    ...     beta_testers_redirection_host = beta.launchpad.dev
-    ...     """
-    >>> config.push('beta_data', beta_data)
-
-If a normal user goes to a page on the site, it will load as normal:
-
-    >>> check(user_browser.open, 'http://launchpad.dev/ubuntu')
-    >>> print user_browser.url
-    http://launchpad.dev/ubuntu
-
-In contrast, members of the beta testers get redirected:
-
-    >>> check(beta_browser.open, 'http://launchpad.dev/ubuntu')
-    HTTP Error 303: See Other
-    Location: http://beta.launchpad.dev/ubuntu
-
-The redirection also works for URLs below the root, and with query
-parameters:
-
-    >>> check(beta_browser.open,
-    ...     'http://launchpad.dev/ubuntu/+search?text=foo')
-    HTTP Error 303: See Other
-    Location: http://beta.launchpad.dev/ubuntu/+search?text=foo
-
-However, HTTP POST requests to the normal site are not redirected:
-
-    >>> from urllib import urlencode
-    >>> check(beta_browser.open,
-    ...       'http://launchpad.dev/ubuntu/+search',
-    ...       data=urlencode({'text': 'foo'}))
-    >>> print beta_browser.url
-    http://launchpad.dev/ubuntu/+search
-
-The redirection works for other Launchpad subdomains:
-
-    >>> check(beta_browser.open,
-    ...     'http://bugs.launchpad.dev/launchpad/+bugs?orderby=-datecreated')
-    HTTP Error 303: See Other
-    Location: http://bugs.beta.launchpad.dev/launchpad/+bugs?orderby=-datecreated
-
-    >>> check(beta_browser.open, 'http://answers.launchpad.dev/~name12')
-    HTTP Error 303: See Other
-    Location: http://answers.beta.launchpad.dev/~name12
-
-However, domains not under the main site will not be redirected:
-
-    # Go behing the curtains and change the hostname of our mainsite so that
-    # we can test this.
-    >>> config.push('mainsite_data', """
-    ...     [vhost.mainsite]
-    ...     hostname: foo.dev
-    ...     """)
-
-    >>> check(beta_browser.open, 'http://launchpad.dev/~name12')
-    >>> print beta_browser.url
-    http://launchpad.dev/~name12
-
-    # Now retore our mainsite's hostname.
-    >>> dummy = config.pop('mainsite_data')
-
-The front page of Launchpad does not redirect.
-
-    >>> check(beta_browser.open, 'http://launchpad.dev/')
-    >>> print beta_browser.url
-    http://launchpad.dev/
-
-On the beta site, a client side JS is available on every page (except
-the home page) in the footer that sets a cookie to inhibit
-the redirection.
-
-    # Configure the site as the beta site.
-    >>> config.push('fake_beta_site', """
-    ...     [launchpad]
-    ...     beta_testers_redirection_host = none
-    ...     is_edge: True
-    ...     site_message: This is the beta site
-    ...     """)
-
-    >>> check(beta_browser.open, 'http://launchpad.dev/ubuntu')
-    >>> print beta_browser.url
-    http://launchpad.dev/ubuntu
-    >>> print find_tags_by_class(beta_browser.contents, 'sitemessage')[0]
-    <div class="sitemessage">
-        This is the beta site
-        <a href="#" class="js-action" onclick="setBetaRedirect(false)">
-          Disable edge redirect.
-        </a>
-    </div>
-
-    >>> dummy = config.pop('fake_beta_site')
-
-    # Workaround bug in mechanize where you cannot use the Cookie
-    # header with the CookieJar
-    >>> from mechanize._clientcookie import Cookie
-    >>> cookiejar = (
-    ...     beta_browser.mech_browser._ua_handlers['_cookies'].cookiejar)
-    >>> cookiejar.set_cookie(
-    ...     Cookie(
-    ...         version=0, name='inhibit_beta_redirect', value='1', port=None,
-    ...         port_specified=False, domain='.launchpad.dev',
-    ...         domain_specified=True, domain_initial_dot=True, path='/',
-    ...         path_specified=True, secure=False, expires=None,
-    ...         discard=None, comment=None, comment_url=None, rest={}))
-
-Now when they go to a page on the site, it loads as normal:
-
-    >>> check(beta_browser.open, 'http://launchpad.dev/ubuntu')
-    >>> print beta_browser.url
-    http://launchpad.dev/ubuntu
-
-
-== Shortcut redirection for bugs ==
-
-A bug URL, such as http://launchpad.dev/bugs/12, is commonly hit
-directly and then, eventually, redirected to a bug task such as
-https://bugs.launchpad.dev/jokosher/+bug/12.  These bug URLs can be
-redirected with fewer steps than the four it would normally take.
-
-The normal user gets redirected to the bug task page.
-
-    >>> check(user_browser.open, 'http://launchpad.dev/bugs/12')
-    HTTP Error 303: See Other
-    Location: http://bugs.launchpad.dev/jokosher/+bug/12
-
-An anonymous user gets redirected to the bug task page.
-
-    >>> anon_browser.mech_browser.set_handle_redirect(False)
-    >>> check(anon_browser.open, 'http://launchpad.dev/bugs/12')
-    HTTP Error 303: See Other
-    Location: http://bugs.launchpad.dev/jokosher/+bug/12
-
-The beta user gets redirected to the bug task page, but on the beta
-host.
-
-    >>> # Create a new beta browser that doesn't have redirection
-    >>> # override cookies.
-    >>> beta_browser = setupBrowser(auth='Basic beta-admin@xxxxxxxxxxxxx:test')
-    >>> beta_browser.mech_browser.set_handle_redirect(False)
-    >>> check(beta_browser.open, 'http://launchpad.dev/bugs/12')
-    HTTP Error 303: See Other
-    Location: http://bugs.beta.launchpad.dev/jokosher/+bug/12
-
-Restore the config to state it was in at the start of the test.
-
-    >>> config_data = config.pop('beta_data')
-
-
-== Redirection notice in OOPS timeouts ==
-
-The beta site has timeout values lower than lpnet, so beta testers are more
-likely to experience timeouts. When a page times out for beta testers the
-notification to disable beta redirection is displayed.
-
-Add Foo Bar to the beta team.
-
-    >>> from zope.component import getUtility
-    >>> from canonical.launchpad.interfaces.launchpad import ILaunchpadCelebrities
-    >>> from lp.registry.interfaces.person import IPersonSet
-    >>> from canonical.launchpad.ftests import login, logout
-    >>> login('foo.bar@xxxxxxxxxxxxx')
-    >>> foo_bar = getUtility(IPersonSet).getByName('name16')
-    >>> launchpad_beta_testers = getUtility(
-    ...     ILaunchpadCelebrities).launchpad_beta_testers
-    >>> ignored = launchpad_beta_testers.addMember(foo_bar, reviewer=foo_bar)
-    >>> foo_bar.inTeam(launchpad_beta_testers)
-    True
-
-    >>> logout()
-
-Decrease the timeout values for launchpad.dev and pretend we're on the edge
-server.
-
-    >>> beta_data = """
-    ...     [launchpad]
-    ...     is_edge: True
-    ...     [database]
-    ...     db_statement_timeout: 1
-    ...     soft_request_timeout: 2
-    ...     """
-    >>> config.push('beta_data', beta_data)
-
-Check that the notification is in the timeout page
-
-    >>> print http(r"""
-    ... GET /+soft-timeout HTTP/1.1
-    ... Authorization: Basic Zm9vLmJhckBjYW5vbmljYWwuY29tOnRlc3Q=
-    ... """)
-    HTTP/1.1 503 Service Unavailable
-    ...
-    Retry-After: 900
-    ...
-    <title>Error: Timeout</title>
-    ...
-    <h1 class="exception">Timeout error</h1>
-    ...
-    <p>Our edge server has a lower timeout threshold than launchpad.net,
-    so we can catch those before they hit a wider audience.
-    If this is blocking your work and you are a member of the Launchpad
-    Beta Testers team, you can disable automatic redirection
-    to edge in order to use launchpad.net.</p>
-    <p><button onclick="setBetaRedirect(false)">Disable redirection
-    for 2 hours</button></p>
-    ...
-
-Restore the config to state it was in at the start of the test.
-
-    >>> config_data = config.pop('beta_data')

=== modified file 'lib/lp/app/templates/base-layout-macros.pt'
--- lib/lp/app/templates/base-layout-macros.pt	2010-10-04 12:23:40 +0000
+++ lib/lp/app/templates/base-layout-macros.pt	2010-10-27 02:13:03 +0000
@@ -424,15 +424,13 @@
       &nbsp;&bull;&nbsp;
       <a href="http://identi.ca/launchpadstatus";
         >System status</a>
-      <span id="lp-version" tal:condition="not:is_lpnet">
+      <span id="lp-version">
       &nbsp;&bull;&nbsp;
         r<tal:revno replace="revno" />
         <tal:devmode condition="devmode">devmode</tal:devmode>
         <tal:demo condition="is_demo">demo site</tal:demo>
-        <tal:edge condition="is_edge">
-          beta site (<a href="https://code.edge.launchpad.net/~launchpad-pqm/launchpad/stable/";
+        (<a href="https://code.launchpad.net/~launchpad-pqm/launchpad/stable/";
              >get the code</a>)
-        </tal:edge>
       </span>
     </div>
   </div>
@@ -443,16 +441,6 @@
     <tal:site_message tal:content="structure site_message">
       This site is running pre-release code.
     </tal:site_message>
-    <tal:edge_only condition="is_edge">
-      <a href="#" class="js-action" onclick="setBetaRedirect(false)"
-         tal:condition="not:request/isRedirectInhibited">
-        Disable edge redirect.
-      </a>
-      <a href="#" class="js-action" onclick="setBetaRedirect(true)"
-         tal:condition="request/isRedirectInhibited">
-        Enable edge redirect.
-      </a>
-    </tal:edge_only>
   </div>
 </metal:site-message>
 

=== modified file 'lib/lp/app/templates/base-layout.pt'
--- lib/lp/app/templates/base-layout.pt	2010-08-19 16:04:43 +0000
+++ lib/lp/app/templates/base-layout.pt	2010-10-27 02:13:03 +0000
@@ -9,7 +9,6 @@
     devmode modules/canonical.config/config/devmode;
     rooturl modules/canonical.launchpad.webapp.vhosts/allvhosts/configs/mainsite/rooturl;
     is_demo modules/canonical.config/config/launchpad/is_demo;
-    is_edge modules/canonical.config/config/launchpad/is_edge;
     is_lpnet modules/canonical.config/config/launchpad/is_lpnet;
     site_message modules/canonical.config/config/launchpad/site_message;
     icingroot string:${rooturl}+icing/rev${revno};
@@ -72,7 +71,7 @@
       ${view/context/fmt:public-private-css}
       yui-skin-sam">
         <script type="text/javascript"
-          tal:condition="python: is_edge or is_lpnet">
+          tal:condition="python: is_lpnet">
           var _gaq = _gaq || [];
           _gaq.push(['_setAccount', 'UA-12833497-1']);
           _gaq.push(['_setDomainName', '.launchpad.net']);

=== modified file 'lib/lp/archivepublisher/tests/test_config.py'
--- lib/lp/archivepublisher/tests/test_config.py	2010-10-04 19:50:45 +0000
+++ lib/lp/archivepublisher/tests/test_config.py	2010-10-27 02:13:03 +0000
@@ -8,12 +8,12 @@
 from zope.component import getUtility
 
 from canonical.config import config
-from canonical.launchpad.interfaces import IDistributionSet
 from canonical.testing.layers import LaunchpadZopelessLayer
 from lp.archivepublisher.config import (
     Config,
     LucilleConfigError,
     )
+from lp.registry.interfaces.distribution import IDistributionSet
 from lp.testing import TestCaseWithFactory
 
 

=== modified file 'lib/lp/archivepublisher/tests/test_generate_ppa_htaccess.py'
--- lib/lp/archivepublisher/tests/test_generate_ppa_htaccess.py	2010-10-03 15:30:06 +0000
+++ lib/lp/archivepublisher/tests/test_generate_ppa_htaccess.py	2010-10-27 02:13:03 +0000
@@ -18,17 +18,15 @@
 from zope.security.proxy import removeSecurityProxy
 
 from canonical.config import config
-from canonical.launchpad.interfaces import (
-    IDistributionSet,
-    IPersonSet,
-    TeamMembershipStatus,
-    )
 from canonical.launchpad.scripts import QuietFakeLogger
 from canonical.testing.layers import LaunchpadZopelessLayer
 from lp.archivepublisher.config import getPubConfig
 from lp.archivepublisher.scripts.generate_ppa_htaccess import (
     HtaccessTokenGenerator,
     )
+from lp.registry.interfaces.distribution import IDistributionSet
+from lp.registry.interfaces.person import IPersonSet
+from lp.registry.interfaces.teammembership import TeamMembershipStatus
 from lp.services.mail import stub
 from lp.services.scripts.interfaces.scriptactivity import IScriptActivitySet
 from lp.soyuz.enums import (

=== modified file 'lib/lp/archivepublisher/tests/util.py'
--- lib/lp/archivepublisher/tests/util.py	2010-10-03 15:30:06 +0000
+++ lib/lp/archivepublisher/tests/util.py	2010-10-27 02:13:03 +0000
@@ -7,14 +7,16 @@
 
 # Utility functions/classes for testing the archive publisher.
 
-from canonical.launchpad.interfaces import SeriesStatus
 from lp.archivepublisher.tests import datadir
 from lp.registry.interfaces.pocket import PackagePublishingPocket
+from lp.registry.interfaces.series import SeriesStatus
 
 
 __all__ = ['FakeLogger']
 
+
 class FakeLogger:
+
     def debug(self, *args, **kwargs):
         pass
 
@@ -23,6 +25,7 @@
 
 
 class FakeDistribution:
+
     def __init__(self, name, conf):
         self.name = name.decode('utf-8')
         self.lucilleconfig = conf.decode('utf-8')
@@ -39,6 +42,7 @@
 
 
 class FakeDistroSeries:
+
     def __init__(self, name, conf, distro):
         self.name = name.decode('utf-8')
         self.lucilleconfig = conf.decode('utf-8')
@@ -50,12 +54,14 @@
 
 
 class FakeDistroArchSeries:
+
     def __init__(self, series, archtag):
         self.distroseries = series
         self.architecturetag = archtag
 
 
 class FakeSource:
+
     def __init__(self, version, status, name=""):
         self.version = version.decode('utf-8')
         self.status = status
@@ -66,11 +72,11 @@
         return FakeSource(
             self.version.encode('utf-8'),
             self.status,
-            self.sourcepackagename.encode('utf-8')
-            )
+            self.sourcepackagename.encode('utf-8'))
 
 
 class FakeBinary:
+
     def __init__(self, version, status, name=""):
         self.version = version.decode('utf-8')
         self.status = status
@@ -81,8 +87,7 @@
         return FakeBinary(
             self.version.encode('utf-8'),
             self.status,
-            self.packagename.encode('utf-8')
-            )
+            self.packagename.encode('utf-8'))
 
 
 class FakeSourcePublishing:
@@ -90,7 +95,10 @@
     id = 1
 
     def __init__(self, source, component, alias, section, ds):
-        class Dummy: id = 1
+
+        class Dummy:
+            id = 1
+
         self.sourcepackagerelease = Dummy()
         self.sourcepackagerelease.name = source
         self.component = Dummy()
@@ -111,13 +119,17 @@
             self.distroseries.name,
             )
 
+
 class FakeBinaryPublishing:
     """Mocks a BinaryPackagePublishingHistory object."""
     id = 1
 
     def __init__(self, binary, source, component, alias,
                  section, ds, prio, archtag):
-        class Dummy: id = 1
+
+        class Dummy:
+            id = 1
+
         self.binarypackagerelease = Dummy()
         self.binarypackagerelease.name = source
         self.sourcepackagerelease = Dummy()
@@ -151,6 +163,7 @@
 
 class FakeSourceFilePublishing:
     """Mocks a SourcePackageFilePublishing object."""
+
     def __init__(self, source, component, leafname, alias, section, ds):
         self.sourcepackagename = source
         self.componentname = component
@@ -170,9 +183,12 @@
             self.distroseriesname,
             )
 
+
 class FakeBinaryFilePublishing:
     """Mocks a BinaryPackageFilePublishing object."""
-    def __init__(self, source, component, leafname, alias, section, ds, archtag):
+
+    def __init__(self, source, component, leafname, alias, section,
+                 ds, archtag):
         self.sourcepackagename = source
         self.componentname = component
         self.libraryfilealiasfilename = leafname
@@ -212,7 +228,7 @@
         for val in thing:
             ret.append(_deepCopy(val))
         return tuple(ret)
-    if getattr(thing,"_deepCopy",sentinel) != sentinel:
+    if getattr(thing, "_deepCopy", sentinel) != sentinel:
         return thing._deepCopy()
     return thing # Assume we can't copy it deeply
 
@@ -229,7 +245,8 @@
 overrideroot=FOO/overrides
 cacheroot=FOO/cache
 miscroot=FOO/misc
-                        """.replace("FOO",datadir("distro")).replace("BAR","ubuntu"));
+                        """.replace(
+                        "FOO", datadir("distro")).replace("BAR", "ubuntu"))
 
 fake_ubuntu_series = [
     FakeDistroSeries("warty",
@@ -241,6 +258,4 @@
                       """
 [publishing]
 components = main restricted universe
-                      """, fake_ubuntu)
-    ]
-
+                      """, fake_ubuntu)]

=== modified file 'lib/lp/archiveuploader/tests/__init__.py'
--- lib/lp/archiveuploader/tests/__init__.py	2010-10-06 11:46:51 +0000
+++ lib/lp/archiveuploader/tests/__init__.py	2010-10-27 02:13:03 +0000
@@ -3,8 +3,6 @@
 
 """Tests for the archive uploader."""
 
-from __future__ import with_statement
-
 __metaclass__ = type
 
 __all__ = [

=== modified file 'lib/lp/archiveuploader/tests/test_ppauploadprocessor.py'
--- lib/lp/archiveuploader/tests/test_ppauploadprocessor.py	2010-10-22 04:12:39 +0000
+++ lib/lp/archiveuploader/tests/test_ppauploadprocessor.py	2010-10-27 02:13:03 +0000
@@ -19,10 +19,8 @@
 
 from canonical.config import config
 from canonical.launchpad.database import Component
-from canonical.launchpad.interfaces import (
-    ILaunchpadCelebrities,
-    ILibraryFileAliasSet,
-    )
+from canonical.launchpad.interfaces.launchpad import ILaunchpadCelebrities
+from canonical.launchpad.interfaces.librarian import ILibraryFileAliasSet
 from canonical.launchpad.testing.fakepackager import FakePackager
 from lp.app.errors import NotFoundError
 from lp.archiveuploader.tests.test_uploadprocessor import (
@@ -38,12 +36,8 @@
     PackageUploadStatus,
     SourcePackageFormat,
     )
-from lp.soyuz.interfaces.archive import (
-    IArchiveSet,
-    )
-from lp.soyuz.interfaces.queue import (
-    NonBuildableSourceUploadError,
-    )
+from lp.soyuz.interfaces.archive import IArchiveSet
+from lp.soyuz.interfaces.queue import NonBuildableSourceUploadError
 from lp.soyuz.interfaces.sourcepackageformat import (
     ISourcePackageFormatSelectionSet,
     )
@@ -490,7 +484,7 @@
         Anyone listed as an uploader in ArchivePermissions will automatically
         get an upload notification email.
 
-        See https://bugs.edge.launchpad.net/soyuz/+bug/397077
+        See https://bugs.launchpad.net/soyuz/+bug/397077
         """
         # Create the extra permissions. We're making an extra team and
         # adding it to cprov's upload permission, plus name12.
@@ -1046,7 +1040,7 @@
 
         Some error messages can contain the PPA display name, which may
         sometimes contain unicode characters.  There was a bug
-        https://bugs.edge.launchpad.net/bugs/275509 reported about getting
+        https://bugs.launchpad.net/bugs/275509 reported about getting
         upload errors related to unicode.  This only happened when the
         uploder was attaching a .orig.tar.gz file with different contents
         than the one already in the PPA.

=== modified file 'lib/lp/archiveuploader/tests/test_uploadprocessor.py'
--- lib/lp/archiveuploader/tests/test_uploadprocessor.py	2010-10-18 13:55:07 +0000
+++ lib/lp/archiveuploader/tests/test_uploadprocessor.py	2010-10-27 02:13:03 +0000
@@ -28,11 +28,10 @@
 from canonical.config import config
 from canonical.database.constants import UTC_NOW
 from canonical.launchpad.ftests import import_public_test_keys
-from canonical.launchpad.interfaces import ILibraryFileAliasSet
+from canonical.launchpad.interfaces.librarian import ILibraryFileAliasSet
 from canonical.launchpad.testing.fakepackager import FakePackager
 from canonical.launchpad.webapp.errorlog import ErrorReportingUtility
 from canonical.testing.layers import LaunchpadZopelessLayer
-
 from lp.app.errors import NotFoundError
 from lp.archiveuploader.uploadpolicy import (
     AbstractUploadPolicy,
@@ -61,21 +60,15 @@
     PackageUploadStatus,
     SourcePackageFormat,
     )
-from lp.soyuz.interfaces.archive import (
-    IArchiveSet,
-    )
-from lp.soyuz.interfaces.archivepermission import (
-    IArchivePermissionSet,
-    )
+from lp.soyuz.interfaces.archive import IArchiveSet
+from lp.soyuz.interfaces.archivepermission import IArchivePermissionSet
 from lp.soyuz.interfaces.component import IComponentSet
 from lp.soyuz.interfaces.packageset import IPackagesetSet
 from lp.soyuz.interfaces.publishing import (
     IPublishingSet,
     PackagePublishingStatus,
     )
-from lp.soyuz.interfaces.queue import (
-    QueueInconsistentStateError,
-    )
+from lp.soyuz.interfaces.queue import QueueInconsistentStateError
 from lp.soyuz.interfaces.sourcepackageformat import (
     ISourcePackageFormatSelectionSet,
     )
@@ -173,6 +166,7 @@
         super(TestUploadProcessorBase, self).tearDown()
 
     def getUploadProcessor(self, txn):
+
         def getPolicy(distro, build):
             self.options.distro = distro.name
             policy = findPolicyByName(self.options.context)
@@ -182,6 +176,7 @@
                 policy.archive = build.archive
             policy.setOptions(self.options)
             return policy
+
         return UploadProcessor(
             self.options.base_fsroot, self.options.dryrun,
             self.options.nomails, self.options.builds,
@@ -273,7 +268,7 @@
             queue_entry=None):
         """Queue one of our test uploads.
 
-        upload_name is the name of the test upload directory. If there 
+        upload_name is the name of the test upload directory. If there
         is no explicit queue entry name specified, it is also
         the name of the queue entry directory we create.
         relative_path is the path to create inside the upload, eg
@@ -1210,7 +1205,8 @@
         error_report.write(fp)
         error_text = fp.getvalue()
         self.assertTrue(
-            "Unable to find mandatory field 'Files' in the changes file" in error_text)
+            "Unable to find mandatory field 'Files' "
+            "in the changes file" in error_text)
 
         # Housekeeping so the next test won't fail.
         shutil.rmtree(upload_dir)
@@ -1280,6 +1276,7 @@
             "Invalid upload path (1/ubuntu) for this policy (insecure)"]
         self.assertEmail(contents=contents, recipients=[])
 
+
     # Uploads that are new should have the component overridden
     # such that:
     #   'contrib' -> 'multiverse'
@@ -1977,7 +1974,8 @@
         archive = self.factory.makeArchive()
         archive.require_virtualized = False
         build = self.factory.makeSourcePackageRecipeBuild(sourcename=u"bar",
-            distroseries=self.breezy, archive=archive, requester=archive.owner)
+            distroseries=self.breezy, archive=archive,
+            requester=archive.owner)
         self.assertEquals(archive.owner, build.requester)
         bq = self.factory.makeSourcePackageRecipeBuildJob(recipe_build=build)
         # Commit so the build cookie has the right ids.
@@ -2045,4 +2043,5 @@
 
     def test_invalid_jobid(self):
         self.assertRaises(
-            ValueError, parse_build_upload_leaf_name, "aaba-a42-PACKAGEBUILD-abc")
+            ValueError, parse_build_upload_leaf_name,
+            "aaba-a42-PACKAGEBUILD-abc")

=== modified file 'lib/lp/blueprints/stories/blueprints/xx-dependencies.txt'
--- lib/lp/blueprints/stories/blueprints/xx-dependencies.txt	2010-08-26 02:30:06 +0000
+++ lib/lp/blueprints/stories/blueprints/xx-dependencies.txt	2010-10-27 02:13:03 +0000
@@ -222,13 +222,13 @@
   <img src="deptree.png" usemap="#deptree" />
   <map id="deptree" name="deptree">
   <area shape="poly"
-    title="Support &lt;canvas&gt; Objects" .../>
-  <area shape="poly"
-    href="http://blueprints.launchpad.dev/firefox/+spec/e4x"; .../>
-  <area shape="poly"
-    href="http://blueprints.launchpad.dev/firefox/+spec/mergewin"; .../>
-  <area shape="poly"
-    href="http://blueprints.launchpad.dev/firefox/+spec/svg...support"; .../>
+    ...title="Support &lt;canvas&gt; Objects" .../>
+  <area shape="poly"
+    ...href="http://blueprints.launchpad.dev/firefox/+spec/e4x"; .../>
+  <area shape="poly"
+    ...href="http://blueprints.launchpad.dev/firefox/+spec/mergewin"; .../>
+  <area shape="poly"
+    ...href="http://blueprints.launchpad.dev/firefox/+spec/svg...support"; .../>
   </map>
 
 

=== modified file 'lib/lp/blueprints/stories/blueprints/xx-non-ascii-imagemap.txt'
--- lib/lp/blueprints/stories/blueprints/xx-non-ascii-imagemap.txt	2010-08-26 02:30:06 +0000
+++ lib/lp/blueprints/stories/blueprints/xx-non-ascii-imagemap.txt	2010-10-27 02:13:03 +0000
@@ -15,7 +15,7 @@
   >>> print anon_browser.contents
   <img ...
   <map id="deptree" name="deptree">
-  <area shape="poly" title="Support &lt;canvas&gt; Objects" .../>
+  <area shape="poly" ...title="Support &lt;canvas&gt; Objects" .../>
   <area shape="poly" ...title="A title with non&#45;ascii characters áã" .../>
   ...
 

=== modified file 'lib/lp/blueprints/vocabularies/specificationdependency.py'
--- lib/lp/blueprints/vocabularies/specificationdependency.py	2010-09-26 22:29:58 +0000
+++ lib/lp/blueprints/vocabularies/specificationdependency.py	2010-10-27 02:13:03 +0000
@@ -97,7 +97,7 @@
         This implementation is a little fuzzy and will return specs for URLs
         that, for example, don't have the host name right.  This seems
         unlikely to cause confusion in practice, and being too anal probably
-        would be confusing (e.g. not accepting edge URLs on lpnet).
+        would be confusing (e.g. not accepting production URLs on staging).
         """
         scheme, netloc, path, params, args, fragment = urlparse(url)
         if not scheme or not netloc:

=== modified file 'lib/lp/blueprints/vocabularies/tests/specificationdepcandidates.txt'
--- lib/lp/blueprints/vocabularies/tests/specificationdepcandidates.txt	2010-08-27 04:24:55 +0000
+++ lib/lp/blueprints/vocabularies/tests/specificationdepcandidates.txt	2010-10-27 02:13:03 +0000
@@ -90,7 +90,7 @@
 appropriately. Queries conataining regual expression operators, for
 example, will simply look for the respective characters within the
 vocabulary's item (this used to be the cause of an OOPS, see
-https://bugs.edge.launchpad.net/blueprint/+bug/139385 for more
+https://bugs.launchpad.net/blueprint/+bug/139385 for more
 details).
 
     >>> list(naked_vocab.search('*'))

=== modified file 'lib/lp/bugs/browser/tests/bugs-views.txt'
--- lib/lp/bugs/browser/tests/bugs-views.txt	2010-10-18 22:24:59 +0000
+++ lib/lp/bugs/browser/tests/bugs-views.txt	2010-10-27 02:13:03 +0000
@@ -37,7 +37,7 @@
     >>> len(bug_eight.bugtasks)
     1
     >>> bug_eight.bugtasks[0].transitionToStatus(
-    ...     BugTaskStatus.CONFIRMED, getUtility(ILaunchBag).user)
+    ...     BugTaskStatus.CONFIRMED, bug_eight.bugtasks[0].distribution.owner)
     >>> def fix_bug(bug_id, bugtask_index=0):
     ...     bugtask = getUtility(IBugSet).get(bug_id).bugtasks[bugtask_index]
     ...     bugtask.transitionToStatus(

=== modified file 'lib/lp/bugs/browser/tests/test_bugattachment_file_access.py'
--- lib/lp/bugs/browser/tests/test_bugattachment_file_access.py	2010-10-04 19:50:45 +0000
+++ lib/lp/bugs/browser/tests/test_bugattachment_file_access.py	2010-10-27 02:13:03 +0000
@@ -14,25 +14,23 @@
 from zope.security.interfaces import Unauthorized
 
 from canonical.launchpad.browser.librarian import (
+    SafeStreamOrRedirectLibraryFileAliasView,
     StreamOrRedirectLibraryFileAliasView,
-    SafeStreamOrRedirectLibraryFileAliasView,
     )
-from canonical.launchpad.interfaces import ILaunchBag
 from canonical.launchpad.interfaces.librarian import (
     ILibraryFileAliasWithParent,
     )
+from canonical.launchpad.webapp.interfaces import ILaunchBag
 from canonical.launchpad.webapp.publisher import RedirectionView
 from canonical.launchpad.webapp.servers import LaunchpadTestRequest
 from canonical.testing.layers import LaunchpadFunctionalLayer
-from lp.bugs.browser.bugattachment import (
-    BugAttachmentFileNavigation,
-    )
+from lp.bugs.browser.bugattachment import BugAttachmentFileNavigation
+import lp.services.features
+from lp.services.features.flags import NullFeatureController
 from lp.testing import (
     login_person,
     TestCaseWithFactory,
     )
-import lp.services.features
-from lp.services.features.flags import NullFeatureController
 
 
 class TestAccessToBugAttachmentFiles(TestCaseWithFactory):

=== modified file 'lib/lp/bugs/browser/tests/test_bugtask.py'
--- lib/lp/bugs/browser/tests/test_bugtask.py	2010-10-07 04:28:17 +0000
+++ lib/lp/bugs/browser/tests/test_bugtask.py	2010-10-27 02:13:03 +0000
@@ -1,8 +1,6 @@
 # Copyright 2009 Canonical Ltd.  This software is licensed under the
 # GNU Affero General Public License version 3 (see the file LICENSE).
 
-from __future__ import with_statement
-
 __metaclass__ = type
 
 

=== modified file 'lib/lp/bugs/browser/tests/test_bugtracker_views.py'
--- lib/lp/bugs/browser/tests/test_bugtracker_views.py	2010-09-19 23:52:49 +0000
+++ lib/lp/bugs/browser/tests/test_bugtracker_views.py	2010-10-27 02:13:03 +0000
@@ -3,8 +3,6 @@
 
 """Tests for BugTracker views."""
 
-from __future__ import with_statement
-
 __metaclass__ = type
 
 from zope.component import getUtility

=== modified file 'lib/lp/bugs/configure.zcml'
--- lib/lp/bugs/configure.zcml	2010-10-19 21:30:53 +0000
+++ lib/lp/bugs/configure.zcml	2010-10-27 02:13:03 +0000
@@ -706,7 +706,6 @@
                     getSubscribersForPerson
                     indexed_messages
                     getAlsoNotifiedSubscribers
-                    getStructuralSubscribers
                     getBugWatch
                     canBeNominatedFor
                     getNominationFor

=== modified file 'lib/lp/bugs/doc/bugtask-search.txt'
--- lib/lp/bugs/doc/bugtask-search.txt	2010-10-19 18:44:31 +0000
+++ lib/lp/bugs/doc/bugtask-search.txt	2010-10-27 02:13:03 +0000
@@ -299,7 +299,7 @@
     ...     BugTaskImportance,
     ...     BugTaskStatus,
     ...     )
-    >>> from lp.bugs.tests.test_bugtask_1 import (
+    >>> from lp.bugs.model.tests.test_bugtask import (
     ...     BugTaskSearchBugsElsewhereTest)
     >>> def bugTaskInfo(bugtask):
     ...     return '%i %i %s %s' % (

=== modified file 'lib/lp/bugs/doc/bugtask.txt'
--- lib/lp/bugs/doc/bugtask.txt	2010-10-19 18:44:31 +0000
+++ lib/lp/bugs/doc/bugtask.txt	2010-10-27 02:13:03 +0000
@@ -1022,7 +1022,7 @@
 And attribute setting:
 
     >>> bug_upstream_firefox_no_svg_support.transitionToStatus(
-    ...     BugTaskStatus.FIXRELEASED, getUtility(ILaunchBag).user)
+    ...     BugTaskStatus.CONFIRMED, getUtility(ILaunchBag).user)
     >>> bug_upstream_firefox_no_svg_support.transitionToStatus(
     ...     BugTaskStatus.NEW, getUtility(ILaunchBag).user)
 

=== modified file 'lib/lp/bugs/doc/displaying-bugs-and-tasks.txt'
--- lib/lp/bugs/doc/displaying-bugs-and-tasks.txt	2010-10-19 18:44:31 +0000
+++ lib/lp/bugs/doc/displaying-bugs-and-tasks.txt	2010-10-27 02:13:03 +0000
@@ -164,7 +164,7 @@
 Lastly, some cleanup:
 
   >>> test_task.transitionToStatus(
-  ...   ORIGINAL_STATUS, getUtility(ILaunchBag).user)
+  ...   ORIGINAL_STATUS, test_task.distribution.owner)
   >>> test_task.transitionToAssignee(ORIGINAL_ASSIGNEE)
 
 

=== modified file 'lib/lp/bugs/doc/externalbugtracker-mantis-csv.txt'
--- lib/lp/bugs/doc/externalbugtracker-mantis-csv.txt	2010-10-18 22:24:59 +0000
+++ lib/lp/bugs/doc/externalbugtracker-mantis-csv.txt	2010-10-27 02:13:03 +0000
@@ -122,8 +122,8 @@
 Instead of issuing one request per bug watch, like was done before,
 updateBugWatches() issues only one request to update all watches:
 
-    >>> from lp.services.propertycache import IPropertyCache
-    >>> del IPropertyCache(example_ext_bug_tracker).csv_data
+    >>> from lp.services.propertycache import get_property_cache
+    >>> del get_property_cache(example_ext_bug_tracker).csv_data
 
     >>> example_ext_bug_tracker.trace_calls = True
     >>> bug_watch_updater.updateBugWatches(

=== modified file 'lib/lp/bugs/doc/malone-karma.txt'
--- lib/lp/bugs/doc/malone-karma.txt	2010-10-19 18:44:31 +0000
+++ lib/lp/bugs/doc/malone-karma.txt	2010-10-27 02:13:03 +0000
@@ -105,7 +105,7 @@
 
     >>> old_bugtask = Snapshot(bugtask, providing=IDistroBugTask)
     >>> bugtask.transitionToStatus(
-    ...     BugTaskStatus.INVALID, getUtility(ILaunchBag).user)
+    ...     BugTaskStatus.INVALID, bugtask.target.owner)
     >>> notify(ObjectModifiedEvent(bugtask, old_bugtask, ['status']))
     Karma added: action=bugrejected, distribution=debian
 

=== modified file 'lib/lp/bugs/interfaces/bug.py'
--- lib/lp/bugs/interfaces/bug.py	2010-10-15 16:09:18 +0000
+++ lib/lp/bugs/interfaces/bug.py	2010-10-27 02:13:03 +0000
@@ -81,7 +81,6 @@
 from lp.bugs.interfaces.bugwatch import IBugWatch
 from lp.bugs.interfaces.cve import ICve
 from lp.code.interfaces.branchlink import IHasLinkedBranches
-from lp.registry.enum import BugNotificationLevel
 from lp.registry.interfaces.mentoringoffer import ICanBeMentored
 from lp.registry.interfaces.person import IPerson
 from lp.services.fields import (
@@ -493,12 +492,6 @@
         from duplicates.
         """
 
-    def getStructuralSubscribers(recipients=None, level=None):
-        """Return `IPerson`s subscribed to this bug's targets.
-
-        This takes into account bug subscription filters.
-        """
-
     def getSubscriptionsFromDuplicates():
         """Return IBugSubscriptions subscribed from dupes of this bug."""
 

=== modified file 'lib/lp/bugs/interfaces/bugtarget.py'
--- lib/lp/bugs/interfaces/bugtarget.py	2010-09-22 08:48:24 +0000
+++ lib/lp/bugs/interfaces/bugtarget.py	2010-10-27 02:13:03 +0000
@@ -165,6 +165,11 @@
             u"Search for bugs that have been modified since the given "
             "date."),
         required=False),
+    "created_since": Datetime(
+        title=_(
+            u"Search for bugs that have been created since the given "
+            "date."),
+        required=False),
     }
 search_tasks_params_for_api_devel = search_tasks_params_for_api_1_0.copy()
 search_tasks_params_for_api_devel["omit_targeted"] = copy_field(
@@ -226,7 +231,8 @@
                     hardware_owner_is_affected_by_bug=False,
                     hardware_owner_is_subscribed_to_bug=False,
                     hardware_is_linked_to_bug=False, linked_branches=None,
-                    structural_subscriber=None, modified_since=None):
+                    structural_subscriber=None, modified_since=None,
+                    created_since=None):
         """Search the IBugTasks reported on this entity.
 
         :search_params: a BugTaskSearchParams object

=== modified file 'lib/lp/bugs/interfaces/bugtask.py'
--- lib/lp/bugs/interfaces/bugtask.py	2010-09-21 09:37:06 +0000
+++ lib/lp/bugs/interfaces/bugtask.py	2010-10-27 02:13:03 +0000
@@ -1128,22 +1128,22 @@
     def __init__(self, user, bug=None, searchtext=None, fast_searchtext=None,
                  status=None, importance=None, milestone=None,
                  assignee=None, sourcepackagename=None, owner=None,
-                 statusexplanation=None, attachmenttype=None,
-                 orderby=None, omit_dupes=False, subscriber=None,
-                 component=None, pending_bugwatch_elsewhere=False,
-                 resolved_upstream=False, open_upstream=False,
-                 has_no_upstream_bugtask=False, tag=None, has_cve=False,
-                 bug_supervisor=None, bug_reporter=None, nominated_for=None,
-                 bug_commenter=None, omit_targeted=False, date_closed=None,
-                 affected_user=None, affects_me=False, hardware_bus=None,
-                 hardware_vendor_id=None, hardware_product_id=None,
-                 hardware_driver_name=None, hardware_driver_package_name=None,
+                 attachmenttype=None, orderby=None, omit_dupes=False,
+                 subscriber=None, component=None,
+                 pending_bugwatch_elsewhere=False, resolved_upstream=False,
+                 open_upstream=False, has_no_upstream_bugtask=False, tag=None,
+                 has_cve=False, bug_supervisor=None, bug_reporter=None,
+                 nominated_for=None, bug_commenter=None, omit_targeted=False,
+                 date_closed=None, affected_user=None, affects_me=False,
+                 hardware_bus=None, hardware_vendor_id=None,
+                 hardware_product_id=None, hardware_driver_name=None,
+                 hardware_driver_package_name=None,
                  hardware_owner_is_bug_reporter=None,
                  hardware_owner_is_affected_by_bug=False,
                  hardware_owner_is_subscribed_to_bug=False,
                  hardware_is_linked_to_bug=False,
                  linked_branches=None, structural_subscriber=None,
-                 modified_since=None):
+                 modified_since=None, created_since=None):
 
         self.bug = bug
         self.searchtext = searchtext
@@ -1154,7 +1154,6 @@
         self.assignee = assignee
         self.sourcepackagename = sourcepackagename
         self.owner = owner
-        self.statusexplanation = statusexplanation
         self.attachmenttype = attachmenttype
         self.user = user
         self.orderby = orderby
@@ -1189,6 +1188,7 @@
         self.linked_branches = linked_branches
         self.structural_subscriber = structural_subscriber
         self.modified_since = None
+        self.created_since = None
 
     def setProduct(self, product):
         """Set the upstream context on which to filter the search."""
@@ -1261,7 +1261,8 @@
                        hardware_owner_is_affected_by_bug=False,
                        hardware_owner_is_subscribed_to_bug=False,
                        hardware_is_linked_to_bug=False, linked_branches=None,
-                       structural_subscriber=None, modified_since=None):
+                       structural_subscriber=None, modified_since=None,
+                       created_since=None):
         """Create and return a new instance using the parameter list."""
         search_params = cls(user=user, orderby=order_by)
 
@@ -1281,7 +1282,6 @@
             from lp.bugs.interfaces.bugattachment import (
                 BugAttachmentType)
             search_params.attachmenttype = BugAttachmentType.PATCH
-            search_params.has_patch = has_patch
         search_params.has_cve = has_cve
         if zope_isinstance(tags, (list, tuple)):
             if len(tags) > 0:
@@ -1331,6 +1331,7 @@
         search_params.linked_branches=linked_branches
         search_params.structural_subscriber = structural_subscriber
         search_params.modified_since = modified_since
+        search_params.created_since = created_since
 
         return search_params
 
@@ -1523,6 +1524,12 @@
     def getOpenBugTasksPerProduct(user, products):
         """Return open bugtask count for multiple products."""
 
+    def getStructuralSubscribers(bugtasks, recipients=None, level=None):
+        """Return `IPerson`s subscribed to the given bug tasks.
+
+        This takes into account bug subscription filters.
+        """
+
 
 def valid_remote_bug_url(value):
     """Verify that the URL is to a bug to a known bug tracker."""

=== modified file 'lib/lp/bugs/javascript/tests/test_me_too.js'
--- lib/lp/bugs/javascript/tests/test_me_too.js	2010-07-11 00:32:53 +0000
+++ lib/lp/bugs/javascript/tests/test_me_too.js	2010-10-27 02:13:03 +0000
@@ -53,19 +53,19 @@
         var inpage = Y.Node.create([
             '<span id="affectsmetoo">',
             '  <span class="static">',
-            '    <img src="https://bugs.edge.launchpad.net/@@/flame-icon"; alt="" />',
+            '    <img src="https://bugs.launchpad.net/@@/flame-icon"; alt="" />',
             '    This bug affects me too',
             '    <a href="+affectsmetoo">',
             '      <img class="editicon" alt="Edit"',
-            '           src="https://bugs.edge.launchpad.net/@@/edit"; />',
+            '           src="https://bugs.launchpad.net/@@/edit"; />',
             '    </a>',
             '  </span>',
             '  <span class="dynamic unseen">',
             '    <img class="editicon" alt="Edit"',
-            '         src="https://bugs.edge.launchpad.net/@@/edit"; />',
+            '         src="https://bugs.launchpad.net/@@/edit"; />',
             '    <a href="+affectsmetoo" class="js-action"',
             '       ><span class="value">Does this bug affect you?</span></a>',
-            '    <img src="https://bugs.edge.launchpad.net/@@/flame-icon"; alt=""/>',
+            '    <img src="https://bugs.launchpad.net/@@/flame-icon"; alt=""/>',
             '  </span>',
             '</span>'].join(''));
         Y.one("body").appendChild(inpage);

=== modified file 'lib/lp/bugs/mail/tests/test_bug_duplicate_notifications.py'
--- lib/lp/bugs/mail/tests/test_bug_duplicate_notifications.py	2010-10-04 19:50:45 +0000
+++ lib/lp/bugs/mail/tests/test_bug_duplicate_notifications.py	2010-10-27 02:13:03 +0000
@@ -12,11 +12,9 @@
 from zope.event import notify
 from zope.interface import providedBy
 
-from lp.bugs.model.bugnotification import BugNotification
-from canonical.launchpad.interfaces import BugTaskStatus
 from canonical.launchpad.webapp.interfaces import ILaunchBag
 from canonical.testing.layers import DatabaseFunctionalLayer
-from lp.bugs.scripts.bugnotification import construct_email_notifications
+from lp.bugs.interfaces.bugtask import BugTaskStatus
 from lp.services.mail import stub
 from lp.testing import TestCaseWithFactory
 
@@ -43,14 +41,16 @@
         self.person_subscribed = self.factory.makePerson(
             name='subscribed', displayname='Person',
             email=self.person_subscribed_email)
-        self.dup_bug.subscribe(self.person_subscribed, subscribed_by=self.user)
+        self.dup_bug.subscribe(
+            self.person_subscribed, subscribed_by=self.user)
         self.dup_bug.markAsDuplicate(self.master_bug)
 
     def test_dup_subscriber_change_notification_message(self):
         """Duplicate bug number in the reason (email footer) for
            duplicate subscribers when a master bug is modified."""
         self.assertEqual(len(stub.test_emails), 0, 'emails in queue')
-        self.master_bug_task.transitionToStatus(BugTaskStatus.CONFIRMED, self.user)
+        self.master_bug_task.transitionToStatus(
+            BugTaskStatus.CONFIRMED, self.user)
         notify(ObjectModifiedEvent(
             self.master_bug_task, self.master_bug_task_before_modification,
             ['status'], user=self.user))

=== modified file 'lib/lp/bugs/model/bug.py'
--- lib/lp/bugs/model/bug.py	2010-10-19 21:30:53 +0000
+++ lib/lp/bugs/model/bug.py	2010-10-27 02:13:03 +0000
@@ -176,9 +176,6 @@
 from lp.registry.interfaces.productseries import IProductSeries
 from lp.registry.interfaces.series import SeriesStatus
 from lp.registry.interfaces.sourcepackage import ISourcePackage
-from lp.registry.interfaces.structuralsubscription import (
-    IStructuralSubscriptionTarget,
-    )
 from lp.registry.model.mentoringoffer import MentoringOffer
 from lp.registry.model.person import (
     Person,
@@ -189,8 +186,8 @@
 from lp.services.fields import DuplicateBug
 from lp.services.propertycache import (
     cachedproperty,
-    IPropertyCache,
-    IPropertyCacheManager,
+    clear_property_cache,
+    get_property_cache,
     )
 
 
@@ -483,7 +480,7 @@
             for message in messages:
                 if message.id not in chunk_map:
                     continue
-                cache = IPropertyCache(message)
+                cache = get_property_cache(message)
                 cache.text_contents = Message.chunks_text(
                     chunk_map[message.id])
         def eager_load(rows, slice_info):
@@ -741,7 +738,7 @@
     def unsubscribe(self, person, unsubscribed_by):
         """See `IBug`."""
         # Drop cached subscription info.
-        IPropertyCacheManager(self).clear()
+        clear_property_cache(self)
         if person is None:
             person = unsubscribed_by
 
@@ -763,7 +760,7 @@
                 # disabled see the change.
                 store.flush()
                 self.updateHeat()
-                del IPropertyCache(self)._known_viewers
+                del get_property_cache(self)._known_viewers
                 return
 
     def unsubscribeFromDupes(self, person, unsubscribed_by):
@@ -934,8 +931,9 @@
             # XXX: RobertCollins 2010-09-22 bug=374777: This SQL(...) is a
             # hack; it does not seem to be possible to express DISTINCT ON
             # with Storm.
-            (SQL("DISTINCT ON (Person.name, BugSubscription.person) 0 AS ignore"),
-             # return people and subscribptions
+            (SQL("DISTINCT ON (Person.name, BugSubscription.person) "
+                 "0 AS ignore"),
+             # Return people and subscriptions
              Person, BugSubscription),
             # For this bug or its duplicates
             Or(
@@ -948,12 +946,12 @@
             # (person X is in the team X)
             TeamParticipation.person == person.id,
             # XXX: Storm fails to compile this, so manually done.
-            # bug=https://bugs.edge.launchpad.net/storm/+bug/627137
+            # bug=https://bugs.launchpad.net/storm/+bug/627137
             # RBC 20100831
             SQL("""TeamParticipation.team = BugSubscription.person"""),
             # Join in the Person rows we want
             # XXX: Storm fails to compile this, so manually done.
-            # bug=https://bugs.edge.launchpad.net/storm/+bug/627137
+            # bug=https://bugs.launchpad.net/storm/+bug/627137
             # RBC 20100831
             SQL("""Person.id = TeamParticipation.team"""),
             ).order_by(Person.name),
@@ -986,8 +984,8 @@
 
         # Structural subscribers.
         also_notified_subscribers.update(
-            self.getStructuralSubscribers(
-                recipients=recipients, level=level))
+            getUtility(IBugTaskSet).getStructuralSubscribers(
+                self.bugtasks, recipients=recipients, level=level))
 
         # Direct subscriptions always take precedence over indirect
         # subscriptions.
@@ -999,58 +997,6 @@
             (also_notified_subscribers - direct_subscribers),
             key=lambda x: removeSecurityProxy(x).displayname)
 
-    def getStructuralSubscribers(self, recipients=None, level=None):
-        """See `IBug`. """
-        query_arguments = []
-        for bugtask in self.bugtasks:
-            if IStructuralSubscriptionTarget.providedBy(bugtask.target):
-                query_arguments.append((bugtask.target, bugtask))
-                if bugtask.target.parent_subscription_target is not None:
-                    query_arguments.append(
-                        (bugtask.target.parent_subscription_target, bugtask))
-            if ISourcePackage.providedBy(bugtask.target):
-                # Distribution series bug tasks with a package have the source
-                # package set as their target, so we add the distroseries
-                # explicitly to the set of subscription targets.
-                query_arguments.append((bugtask.distroseries, bugtask))
-            if bugtask.milestone is not None:
-                query_arguments.append((bugtask.milestone, bugtask))
-
-        if len(query_arguments) == 0:
-            return EmptyResultSet()
-
-        if level is None:
-            # If level is not specified, default to NOTHING so that all
-            # subscriptions are found. XXX: Perhaps this should go in
-            # getSubscriptionsForBugTask()?
-            level = BugNotificationLevel.NOTHING
-
-        # Build the query.
-        union = lambda left, right: left.union(right)
-        queries = (
-            target.getSubscriptionsForBugTask(bugtask, level)
-            for target, bugtask in query_arguments)
-        subscriptions = reduce(union, queries)
-
-        # Pull all the subscriptions in.
-        subscriptions = list(subscriptions)
-
-        # Prepare a query for the subscribers.
-        subscribers = Store.of(self).find(
-            Person, Person.id.is_in(
-                subscription.subscriberID
-                for subscription in subscriptions))
-
-        if recipients is not None:
-            # We need to process subscriptions, so pull all the subscribes
-            # into the cache, then update recipients with the subscriptions.
-            subscribers = list(subscribers)
-            for subscription in subscriptions:
-                recipients.addStructuralSubscriber(
-                    subscription.subscriber, subscription.target)
-
-        return subscribers
-
     def getBugNotificationRecipients(self, duplicateof=None, old_bug=None,
                                      level=None,
                                      include_master_dupe_subscribers=False):
@@ -1426,7 +1372,7 @@
         question_target = IQuestionTarget(bugtask.target)
         question = question_target.createQuestionFromBug(self)
         self.addChange(BugConvertedToQuestion(UTC_NOW, person, question))
-        IPropertyCache(self)._question_from_bug = question
+        get_property_cache(self)._question_from_bug = question
 
         notify(BugBecameQuestionEvent(self, question, person))
         return question
@@ -1541,7 +1487,6 @@
             assert IProductSeries.providedBy(target)
             productseries = target
 
-        admins = getUtility(ILaunchpadCelebrities).admin
         if not (check_permission("launchpad.BugSupervisor", target) or
                 check_permission("launchpad.Driver", target)):
             raise NominationError(
@@ -1713,7 +1658,7 @@
                 self.date_made_private = None
 
             # XXX: This should be a bulk update. RBC 20100827
-            # bug=https://bugs.edge.launchpad.net/storm/+bug/625071
+            # bug=https://bugs.launchpad.net/storm/+bug/625071
             for attachment in self.attachments_unpopulated:
                 attachment.libraryfile.restricted = private
 
@@ -1761,7 +1706,7 @@
         # and insert the new ones.
         new_tags = set([tag.lower() for tag in tags])
         old_tags = set(self.tags)
-        del IPropertyCache(self)._cached_tags
+        del get_property_cache(self)._cached_tags
         added_tags = new_tags.difference(old_tags)
         removed_tags = old_tags.difference(new_tags)
         for removed_tag in removed_tags:
@@ -2012,7 +1957,7 @@
             # will be found without a query when dereferenced.
             indexed_message = message_to_indexed.get(attachment._messageID)
             if indexed_message is not None:
-                IPropertyCache(attachment).message = indexed_message
+                get_property_cache(attachment).message = indexed_message
             return attachment
         rawresults = self._attachments_query()
         return DecoratedResultSet(rawresults, set_indexed_message)

=== modified file 'lib/lp/bugs/model/bugtarget.py'
--- lib/lp/bugs/model/bugtarget.py	2010-10-14 15:22:46 +0000
+++ lib/lp/bugs/model/bugtarget.py	2010-10-27 02:13:03 +0000
@@ -93,7 +93,7 @@
                     hardware_owner_is_affected_by_bug=False,
                     hardware_owner_is_subscribed_to_bug=False,
                     hardware_is_linked_to_bug=False, linked_branches=None,
-                    modified_since=None):
+                    modified_since=None, created_since=None):
         """See `IHasBugs`."""
         if status is None:
             # If no statuses are supplied, default to the

=== modified file 'lib/lp/bugs/model/bugtask.py'
--- lib/lp/bugs/model/bugtask.py	2010-09-23 10:27:11 +0000
+++ lib/lp/bugs/model/bugtask.py	2010-10-27 02:13:03 +0000
@@ -127,6 +127,7 @@
     )
 from lp.bugs.model.bugnomination import BugNomination
 from lp.bugs.model.bugsubscription import BugSubscription
+from lp.registry.enum import BugNotificationLevel
 from lp.registry.interfaces.distribution import (
     IDistribution,
     IDistributionSet,
@@ -155,9 +156,12 @@
 from lp.registry.interfaces.projectgroup import IProjectGroup
 from lp.registry.interfaces.sourcepackage import ISourcePackage
 from lp.registry.interfaces.sourcepackagename import ISourcePackageNameSet
+from lp.registry.interfaces.structuralsubscription import (
+    IStructuralSubscriptionTarget,
+    )
 from lp.registry.model.pillar import pillar_sort_key
 from lp.registry.model.sourcepackagename import SourcePackageName
-from lp.services.propertycache import IPropertyCache
+from lp.services.propertycache import get_property_cache
 from lp.soyuz.enums import PackagePublishingStatus
 from lp.soyuz.model.publishing import SourcePackagePublishingHistory
 from lp.soyuz.model.sourcepackagerelease import SourcePackageRelease
@@ -891,12 +895,13 @@
             user.id == celebrities.janitor.id):
             return True
         else:
-            return (self.status is not BugTaskStatus.WONTFIX and
-                    new_status not in BUG_SUPERVISOR_BUGTASK_STATUSES)
+            return (self.status not in (
+                        BugTaskStatus.WONTFIX, BugTaskStatus.FIXRELEASED)
+                    and new_status not in BUG_SUPERVISOR_BUGTASK_STATUSES)
 
     def transitionToStatus(self, new_status, user, when=None):
         """See `IBugTask`."""
-        if not new_status:
+        if not new_status or user is None:
             # This is mainly to facilitate tests which, unlike the
             # normal status form, don't always submit a status when
             # testing the edit form.
@@ -1346,7 +1351,7 @@
     """
     userid = user.id
     def cache_user_can_view_bug(bugtask):
-        IPropertyCache(bugtask.bug)._known_viewers = set([userid])
+        get_property_cache(bugtask.bug)._known_viewers = set([userid])
         return bugtask
     return cache_user_can_view_bug
 
@@ -1487,7 +1492,7 @@
         """See `IBugTaskSet`."""
         # XXX: JSK: 2007-12-19: This method should probably return
         # None when task_id is not present. See:
-        # https://bugs.edge.launchpad.net/launchpad/+bug/123592
+        # https://bugs.launchpad.net/launchpad/+bug/123592
         try:
             bugtask = BugTask.get(task_id)
         except SQLObjectNotFound:
@@ -1942,12 +1947,17 @@
                 "Bug.date_last_updated > %s" % (
                     sqlvalues(params.modified_since,)))
 
+        if params.created_since:
+            extra_clauses.append(
+                "BugTask.datecreated > %s" % (
+                    sqlvalues(params.created_since,)))
+
         orderby_arg = self._processOrderBy(params)
 
         query = " AND ".join(extra_clauses)
 
         if not decorators:
-            decorator = lambda x:x
+            decorator = lambda x: x
         else:
             def decorator(obj):
                 for decor in decorators:
@@ -2371,7 +2381,7 @@
             bugtask._syncFromConjoinedSlave()
 
         bugtask.updateTargetNameCache()
-        del IPropertyCache(bug).bugtasks
+        del get_property_cache(bug).bugtasks
         # Because of block_implicit_flushes, it is possible for a new bugtask
         # to be queued in appropriately, which leads to Bug.bugtasks not
         # finding the bugtask.
@@ -2829,3 +2839,57 @@
             counts.append(package_counts)
 
         return counts
+
+    def getStructuralSubscribers(self, bugtasks, recipients=None, level=None):
+        """See `IBugTaskSet`."""
+        query_arguments = []
+        for bugtask in bugtasks:
+            if IStructuralSubscriptionTarget.providedBy(bugtask.target):
+                query_arguments.append((bugtask.target, bugtask))
+                if bugtask.target.parent_subscription_target is not None:
+                    query_arguments.append(
+                        (bugtask.target.parent_subscription_target, bugtask))
+            if ISourcePackage.providedBy(bugtask.target):
+                # Distribution series bug tasks with a package have the source
+                # package set as their target, so we add the distroseries
+                # explicitly to the set of subscription targets.
+                query_arguments.append((bugtask.distroseries, bugtask))
+            if bugtask.milestone is not None:
+                query_arguments.append((bugtask.milestone, bugtask))
+
+        if len(query_arguments) == 0:
+            return EmptyResultSet()
+
+        if level is None:
+            # If level is not specified, default to NOTHING so that all
+            # subscriptions are found.
+            level = BugNotificationLevel.NOTHING
+
+        # Build the query.
+        union = lambda left, right: (
+            removeSecurityProxy(left).union(
+                removeSecurityProxy(right)))
+        queries = (
+            target.getSubscriptionsForBugTask(bugtask, level)
+            for target, bugtask in query_arguments)
+        subscriptions = reduce(union, queries)
+
+        # Pull all the subscriptions in.
+        subscriptions = list(subscriptions)
+
+        # Prepare a query for the subscribers.
+        from lp.registry.model.person import Person
+        subscribers = IStore(Person).find(
+            Person, Person.id.is_in(
+                removeSecurityProxy(subscription).subscriberID
+                for subscription in subscriptions))
+
+        if recipients is not None:
+            # We need to process subscriptions, so pull all the subscribes into
+            # the cache, then update recipients with the subscriptions.
+            subscribers = list(subscribers)
+            for subscription in subscriptions:
+                recipients.addStructuralSubscriber(
+                    subscription.subscriber, subscription.target)
+
+        return subscribers

=== modified file 'lib/lp/bugs/model/tests/test_bug.py'
--- lib/lp/bugs/model/tests/test_bug.py	2010-10-15 16:11:17 +0000
+++ lib/lp/bugs/model/tests/test_bug.py	2010-10-27 02:13:03 +0000
@@ -1,14 +1,9 @@
 # Copyright 2010 Canonical Ltd.  This software is licensed under the
 # GNU Affero General Public License version 3 (see the file LICENSE).
 
-from __future__ import with_statement
-
 __metaclass__ = type
 
-from storm.store import ResultSet
-
 from canonical.testing.layers import DatabaseFunctionalLayer
-from lp.bugs.mail.bugnotificationrecipients import BugNotificationRecipients
 from lp.registry.enum import BugNotificationLevel
 from lp.registry.interfaces.person import PersonVisibility
 from lp.registry.model.structuralsubscription import StructuralSubscription
@@ -17,7 +12,6 @@
     person_logged_in,
     TestCaseWithFactory,
     )
-from lp.testing.matchers import StartsWith
 
 
 class TestBug(TestCaseWithFactory):
@@ -246,84 +240,3 @@
         self.assertTrue(
             subscriber not in duplicate_subscribers,
             "Subscriber should not be in duplicate_subscribers.")
-
-
-class TestBugStructuralSubscribers(TestCaseWithFactory):
-
-    layer = DatabaseFunctionalLayer
-
-    def test_getStructuralSubscribers_no_subscribers(self):
-        # If there are no subscribers for any of the bug's targets then no
-        # subscribers will be returned by getStructuralSubscribers().
-        product = self.factory.makeProduct()
-        bug = self.factory.makeBug(product=product)
-        subscribers = bug.getStructuralSubscribers()
-        self.assertIsInstance(subscribers, ResultSet)
-        self.assertEqual([], list(subscribers))
-
-    def test_getStructuralSubscribers_single_target(self):
-        # Subscribers for any of the bug's targets are returned.
-        subscriber = self.factory.makePerson()
-        login_person(subscriber)
-        product = self.factory.makeProduct()
-        product.addBugSubscription(subscriber, subscriber)
-        bug = self.factory.makeBug(product=product)
-        self.assertEqual([subscriber], list(bug.getStructuralSubscribers()))
-
-    def test_getStructuralSubscribers_multiple_targets(self):
-        # Subscribers for any of the bug's targets are returned.
-        actor = self.factory.makePerson()
-        login_person(actor)
-
-        subscriber1 = self.factory.makePerson()
-        subscriber2 = self.factory.makePerson()
-
-        product1 = self.factory.makeProduct(owner=actor)
-        product1.addBugSubscription(subscriber1, subscriber1)
-        product2 = self.factory.makeProduct(owner=actor)
-        product2.addBugSubscription(subscriber2, subscriber2)
-
-        bug = self.factory.makeBug(product=product1)
-        bug.addTask(actor, product2)
-
-        subscribers = bug.getStructuralSubscribers()
-        self.assertIsInstance(subscribers, ResultSet)
-        self.assertEqual(set([subscriber1, subscriber2]), set(subscribers))
-
-    def test_getStructuralSubscribers_recipients(self):
-        # If provided, getStructuralSubscribers() calls the appropriate
-        # methods on a BugNotificationRecipients object.
-        subscriber = self.factory.makePerson()
-        login_person(subscriber)
-        product = self.factory.makeProduct()
-        product.addBugSubscription(subscriber, subscriber)
-        bug = self.factory.makeBug(product=product)
-        recipients = BugNotificationRecipients()
-        subscribers = bug.getStructuralSubscribers(recipients=recipients)
-        # The return value is a list only when populating recipients.
-        self.assertIsInstance(subscribers, list)
-        self.assertEqual([subscriber], recipients.getRecipients())
-        reason, header = recipients.getReason(subscriber)
-        self.assertThat(
-            reason, StartsWith(
-                u"You received this bug notification because "
-                u"you are subscribed to "))
-        self.assertThat(header, StartsWith(u"Subscriber "))
-
-    def test_getStructuralSubscribers_level(self):
-        # getStructuralSubscribers() respects the given level.
-        subscriber = self.factory.makePerson()
-        login_person(subscriber)
-        product = self.factory.makeProduct()
-        subscription = product.addBugSubscription(subscriber, subscriber)
-        subscription.bug_notification_level = BugNotificationLevel.METADATA
-        bug = self.factory.makeBug(product=product)
-        self.assertEqual(
-            [subscriber], list(
-                bug.getStructuralSubscribers(
-                    level=BugNotificationLevel.METADATA)))
-        subscription.bug_notification_level = BugNotificationLevel.METADATA
-        self.assertEqual(
-            [], list(
-                bug.getStructuralSubscribers(
-                    level=BugNotificationLevel.COMMENTS)))

=== renamed file 'lib/lp/bugs/tests/test_bugtask.py' => 'lib/lp/bugs/model/tests/test_bugtask.py'
--- lib/lp/bugs/tests/test_bugtask.py	2010-10-04 19:50:45 +0000
+++ lib/lp/bugs/model/tests/test_bugtask.py	2010-10-27 02:13:03 +0000
@@ -8,31 +8,52 @@
 import unittest
 
 from lazr.lifecycle.snapshot import Snapshot
+from storm.store import ResultSet
 from zope.component import getUtility
 from zope.interface import providedBy
 
+from canonical.database.sqlbase import flush_database_updates
 from canonical.launchpad.interfaces.launchpad import ILaunchpadCelebrities
 from canonical.launchpad.searchbuilder import (
     all,
     any,
     )
+from canonical.launchpad.webapp.interfaces import ILaunchBag
 from canonical.testing.layers import (
     DatabaseFunctionalLayer,
     LaunchpadZopelessLayer,
     )
+from lp.app.enums import ServiceUsage
+from lp.bugs.interfaces.bug import IBugSet
 from lp.bugs.interfaces.bugtarget import IBugTarget
 from lp.bugs.interfaces.bugtask import (
     BugTaskImportance,
     BugTaskSearchParams,
     BugTaskStatus,
+    IBugTaskSet,
+    IUpstreamBugTask,
+    RESOLVED_BUGTASK_STATUSES,
+    UNRESOLVED_BUGTASK_STATUSES,
     )
+from lp.bugs.interfaces.bugwatch import IBugWatchSet
+from lp.bugs.mail.bugnotificationrecipients import BugNotificationRecipients
 from lp.bugs.model.bugtask import build_tag_search_clause
+from lp.bugs.tests.bug import (
+    create_old_bug,
+    sync_bugtasks,
+    )
 from lp.hardwaredb.interfaces.hwdb import (
     HWBus,
     IHWDeviceSet,
     )
+from lp.registry.enum import BugNotificationLevel
 from lp.registry.interfaces.distribution import IDistributionSet
-from lp.registry.interfaces.person import IPerson, IPersonSet
+from lp.registry.interfaces.person import (
+    IPerson,
+    IPersonSet,
+    )
+from lp.registry.interfaces.product import IProductSet
+from lp.registry.interfaces.projectgroup import IProjectGroupSet
 from lp.testing import (
     ANONYMOUS,
     login,
@@ -42,6 +63,11 @@
     TestCase,
     TestCaseWithFactory,
     )
+from lp.testing.factory import (
+    is_security_proxied_or_harmless,
+    LaunchpadObjectFactory,
+    )
+from lp.testing.matchers import StartsWith
 
 
 class TestBugTaskDelta(TestCaseWithFactory):
@@ -892,7 +918,7 @@
         self.assertEqual(2, tasks.count())
         # Cache in the storm cache the account->person lookup so its not
         # distorting what we're testing.
-        _ = IPerson(person.account, None)
+        IPerson(person.account, None)
         # One query and only one should be issued to get the tasks, bugs and
         # allow access to getConjoinedMaster attribute - an attribute that
         # triggers a permission check (nb: id does not trigger such a check)
@@ -908,6 +934,456 @@
         default_result = target.searchTasks(None)
         self.assertEqual([task1], list(default_result))
 
+    def test_created_since_excludes_earlier_bugtasks(self):
+        # When we search for bug tasks that have been created since a certain
+        # time, tasks for bugs that have not been created since then are
+        # excluded.
+        target = self.makeBugTarget()
+        self.login()
+        task = self.factory.makeBugTask(target=target)
+        date = task.datecreated + timedelta(days=1)
+        result = target.searchTasks(None, created_since=date)
+        self.assertEqual([], list(result))
+
+    def test_created_since_includes_later_bugtasks(self):
+        # When we search for bug tasks that have been created since a certain
+        # time, tasks for bugs that have been created since then are
+        # included.
+        target = self.makeBugTarget()
+        self.login()
+        task = self.factory.makeBugTask(target=target)
+        date = task.datecreated - timedelta(days=1)
+        result = target.searchTasks(None, created_since=date)
+        self.assertEqual([task], list(result))
+
+    def test_created_since_includes_later_bugtasks_excludes_earlier(self):
+        # When we search for bugs that have been created since a certain
+        # time, tasks for bugs that have been created since then are
+        # included, tasks that have not are excluded.
+        target = self.makeBugTarget()
+        self.login()
+        task1 = self.factory.makeBugTask(target=target)
+        date = task1.datecreated
+        task1.datecreated -= timedelta(days=1)
+        task2 = self.factory.makeBugTask(target=target)
+        task2.datecreated += timedelta(days=1)
+        result = target.searchTasks(None, created_since=date)
+        self.assertEqual([task2], list(result))
+
+
+class BugTaskSearchBugsElsewhereTest(unittest.TestCase):
+    """Tests for searching bugs filtering on related bug tasks.
+
+    It also acts as a helper class, which makes related doctests more
+    readable, since they can use methods from this class.
+    """
+    layer = DatabaseFunctionalLayer
+
+    def __init__(self, methodName='runTest', helper_only=False):
+        """If helper_only is True, set up it only as a helper class."""
+        if not helper_only:
+            unittest.TestCase.__init__(self, methodName=methodName)
+
+    def setUp(self):
+        login(ANONYMOUS)
+
+    def tearDown(self):
+        logout()
+
+    def _getBugTaskByTarget(self, bug, target):
+        """Return a bug's bugtask for the given target."""
+        for bugtask in bug.bugtasks:
+            if bugtask.target == target:
+                return bugtask
+        else:
+            raise AssertionError(
+                "Didn't find a %s task on bug %s." % (
+                    target.bugtargetname, bug.id))
+
+    def setUpBugsResolvedUpstreamTests(self):
+        """Modify some bugtasks to match the resolved upstream filter."""
+        bugset = getUtility(IBugSet)
+        productset = getUtility(IProductSet)
+        firefox = productset.getByName("firefox")
+        thunderbird = productset.getByName("thunderbird")
+
+        # Mark an upstream task on bug #1 "Fix Released"
+        bug_one = bugset.get(1)
+        firefox_upstream = self._getBugTaskByTarget(bug_one, firefox)
+        self.assertEqual(
+            ServiceUsage.LAUNCHPAD,
+            firefox_upstream.product.bug_tracking_usage)
+        self.old_firefox_status = firefox_upstream.status
+        firefox_upstream.transitionToStatus(
+            BugTaskStatus.FIXRELEASED, getUtility(ILaunchBag).user)
+        self.firefox_upstream = firefox_upstream
+
+        # Mark an upstream task on bug #9 "Fix Committed"
+        bug_nine = bugset.get(9)
+        thunderbird_upstream = self._getBugTaskByTarget(bug_nine, thunderbird)
+        self.old_thunderbird_status = thunderbird_upstream.status
+        thunderbird_upstream.transitionToStatus(
+            BugTaskStatus.FIXCOMMITTED, getUtility(ILaunchBag).user)
+        self.thunderbird_upstream = thunderbird_upstream
+
+        # Add a watch to a Debian bug for bug #2, and mark the task Fix
+        # Released.
+        bug_two = bugset.get(2)
+        bugwatchset = getUtility(IBugWatchSet)
+
+        # Get a debbugs watch.
+        watch_debbugs_327452 = bugwatchset.get(9)
+        self.assertEquals(watch_debbugs_327452.bugtracker.name, "debbugs")
+        self.assertEquals(watch_debbugs_327452.remotebug, "327452")
+
+        # Associate the watch to a Fix Released task.
+        debian = getUtility(IDistributionSet).getByName("debian")
+        debian_firefox = debian.getSourcePackage("mozilla-firefox")
+        bug_two_in_debian_firefox = self._getBugTaskByTarget(
+            bug_two, debian_firefox)
+        bug_two_in_debian_firefox.bugwatch = watch_debbugs_327452
+        bug_two_in_debian_firefox.transitionToStatus(
+            BugTaskStatus.FIXRELEASED, getUtility(ILaunchBag).user)
+
+        flush_database_updates()
+
+    def tearDownBugsElsewhereTests(self):
+        """Resets the modified bugtasks to their original statuses."""
+        self.firefox_upstream.transitionToStatus(
+            self.old_firefox_status,
+            self.firefox_upstream.target.bug_supervisor)
+        self.thunderbird_upstream.transitionToStatus(
+            self.old_thunderbird_status,
+            self.firefox_upstream.target.bug_supervisor)
+        flush_database_updates()
+
+    def assertBugTaskIsPendingBugWatchElsewhere(self, bugtask):
+        """Assert the bugtask is pending a bug watch elsewhere.
+
+        Pending a bugwatch elsewhere means that at least one of the bugtask's
+        related task's target isn't using Malone, and that
+        related_bugtask.bugwatch is None.
+        """
+        non_malone_using_bugtasks = [
+            related_task for related_task in bugtask.related_tasks
+            if not related_task.target_uses_malone]
+        pending_bugwatch_bugtasks = [
+            related_bugtask for related_bugtask in non_malone_using_bugtasks
+            if related_bugtask.bugwatch is None]
+        self.assert_(
+            len(pending_bugwatch_bugtasks) > 0,
+            'Bugtask %s on %s has no related bug watches elsewhere.' % (
+                bugtask.id, bugtask.target.displayname))
+
+    def assertBugTaskIsResolvedUpstream(self, bugtask):
+        """Make sure at least one of the related upstream tasks is resolved.
+
+        "Resolved", for our purposes, means either that one of the related
+        tasks is an upstream task in FIXCOMMITTED or FIXRELEASED state, or
+        it is a task with a bugwatch, and in FIXCOMMITTED, FIXRELEASED, or
+        INVALID state.
+        """
+        resolved_upstream_states = [
+            BugTaskStatus.FIXCOMMITTED, BugTaskStatus.FIXRELEASED]
+        resolved_bugwatch_states = [
+            BugTaskStatus.FIXCOMMITTED, BugTaskStatus.FIXRELEASED,
+            BugTaskStatus.INVALID]
+
+        # Helper functions for the list comprehension below.
+        def _is_resolved_upstream_task(bugtask):
+            return (
+                IUpstreamBugTask.providedBy(bugtask) and
+                bugtask.status in resolved_upstream_states)
+
+        def _is_resolved_bugwatch_task(bugtask):
+            return (
+                bugtask.bugwatch and bugtask.status in
+                resolved_bugwatch_states)
+
+        resolved_related_tasks = [
+            related_task for related_task in bugtask.related_tasks
+            if (_is_resolved_upstream_task(related_task) or
+                _is_resolved_bugwatch_task(related_task))]
+
+        self.assert_(len(resolved_related_tasks) > 0)
+        self.assert_(
+            len(resolved_related_tasks) > 0,
+            'Bugtask %s on %s has no resolved related tasks.' % (
+                bugtask.id, bugtask.target.displayname))
+
+    def assertBugTaskIsOpenUpstream(self, bugtask):
+        """Make sure at least one of the related upstream tasks is open.
+
+        "Open", for our purposes, means either that one of the related
+        tasks is an upstream task or a task with a bugwatch which has
+        one of the states listed in open_states.
+        """
+        open_states = [
+            BugTaskStatus.NEW,
+            BugTaskStatus.INCOMPLETE,
+            BugTaskStatus.CONFIRMED,
+            BugTaskStatus.INPROGRESS,
+            BugTaskStatus.UNKNOWN]
+
+        # Helper functions for the list comprehension below.
+        def _is_open_upstream_task(bugtask):
+            return (
+                IUpstreamBugTask.providedBy(bugtask) and
+                bugtask.status in open_states)
+
+        def _is_open_bugwatch_task(bugtask):
+            return (
+                bugtask.bugwatch and bugtask.status in
+                open_states)
+
+        open_related_tasks = [
+            related_task for related_task in bugtask.related_tasks
+            if (_is_open_upstream_task(related_task) or
+                _is_open_bugwatch_task(related_task))]
+
+        self.assert_(
+            len(open_related_tasks) > 0,
+            'Bugtask %s on %s has no open related tasks.' % (
+                bugtask.id, bugtask.target.displayname))
+
+    def _hasUpstreamTask(self, bug):
+        """Does this bug have an upstream task associated with it?
+
+        Returns True if yes, otherwise False.
+        """
+        for bugtask in bug.bugtasks:
+            if IUpstreamBugTask.providedBy(bugtask):
+                return True
+        return False
+
+    def assertShouldBeShownOnNoUpstreamTaskSearch(self, bugtask):
+        """Should the bugtask be shown in the search no upstream task search?
+
+        Returns True if yes, otherwise False.
+        """
+        self.assert_(
+            not self._hasUpstreamTask(bugtask.bug),
+            'Bugtask %s on %s has upstream tasks.' % (
+                bugtask.id, bugtask.target.displayname))
+
+
+class BugTaskSetFindExpirableBugTasksTest(unittest.TestCase):
+    """Test `BugTaskSet.findExpirableBugTasks()` behaviour."""
+    layer = DatabaseFunctionalLayer
+
+    def setUp(self):
+        """Setup the zope interaction and create expirable bugtasks."""
+        login('test@xxxxxxxxxxxxx')
+        self.user = getUtility(ILaunchBag).user
+        self.distribution = getUtility(IDistributionSet).getByName('ubuntu')
+        self.distroseries = self.distribution.getSeries('hoary')
+        self.product = getUtility(IProductSet).getByName('jokosher')
+        self.productseries = self.product.getSeries('trunk')
+        self.bugtaskset = getUtility(IBugTaskSet)
+        bugtasks = []
+        bugtasks.append(
+            create_old_bug("90 days old", 90, self.distribution))
+        bugtasks.append(
+            self.bugtaskset.createTask(
+                bug=bugtasks[-1].bug, owner=self.user,
+                distroseries=self.distroseries))
+        bugtasks.append(
+            create_old_bug("90 days old", 90, self.product))
+        bugtasks.append(
+            self.bugtaskset.createTask(
+                bug=bugtasks[-1].bug, owner=self.user,
+                productseries=self.productseries))
+        sync_bugtasks(bugtasks)
+
+    def tearDown(self):
+        logout()
+
+    def testSupportedTargetParam(self):
+        """The target param supports a limited set of BugTargets.
+
+        Four BugTarget types may passed as the target argument:
+        Distribution, DistroSeries, Product, ProductSeries.
+        """
+        supported_targets = [self.distribution, self.distroseries,
+                             self.product, self.productseries]
+        for target in supported_targets:
+            expirable_bugtasks = self.bugtaskset.findExpirableBugTasks(
+                0, self.user, target=target)
+            self.assertNotEqual(expirable_bugtasks.count(), 0,
+                 "%s has %d expirable bugtasks." %
+                 (self.distroseries, expirable_bugtasks.count()))
+
+    def testUnsupportedBugTargetParam(self):
+        """Test that unsupported targets raise errors.
+
+        Three BugTarget types are not supported because the UI does not
+        provide bug-index to link to the 'bugs that can expire' page.
+        ProjectGroup, SourcePackage, and DistributionSourcePackage will
+        raise an NotImplementedError.
+
+        Passing an unknown bugtarget type will raise an AssertionError.
+        """
+        project = getUtility(IProjectGroupSet).getByName('mozilla')
+        distributionsourcepackage = self.distribution.getSourcePackage(
+            'mozilla-firefox')
+        sourcepackage = self.distroseries.getSourcePackage(
+            'mozilla-firefox')
+        unsupported_targets = [project, distributionsourcepackage,
+                               sourcepackage]
+        for target in unsupported_targets:
+            self.assertRaises(
+                NotImplementedError, self.bugtaskset.findExpirableBugTasks,
+                0, self.user, target=target)
+
+        # Objects that are not a known BugTarget type raise an AssertionError.
+        self.assertRaises(
+            AssertionError, self.bugtaskset.findExpirableBugTasks,
+            0, self.user, target=[])
+
+
+class BugTaskSetTest(unittest.TestCase):
+    """Test `BugTaskSet` methods."""
+    layer = DatabaseFunctionalLayer
+
+    def setUp(self):
+        login(ANONYMOUS)
+
+    def test_getBugTasks(self):
+        """ IBugTaskSet.getBugTasks() returns a dictionary mapping the given
+        bugs to their bugtasks. It does that in a single query, to avoid
+        hitting the DB again when getting the bugs' tasks.
+        """
+        login('no-priv@xxxxxxxxxxxxx')
+        factory = LaunchpadObjectFactory()
+        bug1 = factory.makeBug()
+        factory.makeBugTask(bug1)
+        bug2 = factory.makeBug()
+        factory.makeBugTask(bug2)
+        factory.makeBugTask(bug2)
+
+        bugs_and_tasks = getUtility(IBugTaskSet).getBugTasks(
+            [bug1.id, bug2.id])
+        # The bugtasks returned by getBugTasks() are exactly the same as the
+        # ones returned by bug.bugtasks, obviously.
+        self.failUnlessEqual(
+            set(bugs_and_tasks[bug1]).difference(bug1.bugtasks),
+            set([]))
+        self.failUnlessEqual(
+            set(bugs_and_tasks[bug2]).difference(bug2.bugtasks),
+            set([]))
+
+    def test_getBugTasks_with_empty_list(self):
+        # When given an empty list of bug IDs, getBugTasks() will return an
+        # empty dictionary.
+        bugs_and_tasks = getUtility(IBugTaskSet).getBugTasks([])
+        self.failUnlessEqual(bugs_and_tasks, {})
+
+
+class TestBugTaskStatuses(TestCase):
+
+    def test_open_and_resolved_statuses(self):
+        """
+        There are constants that are used to define which statuses are for
+        resolved bugs (`RESOLVED_BUGTASK_STATUSES`), and which are for
+        unresolved bugs (`UNRESOLVED_BUGTASK_STATUSES`). The two constants
+        include all statuses defined in BugTaskStatus, except for Unknown.
+        """
+        self.assertNotIn(BugTaskStatus.UNKNOWN, RESOLVED_BUGTASK_STATUSES)
+        self.assertNotIn(BugTaskStatus.UNKNOWN, UNRESOLVED_BUGTASK_STATUSES)
+
+
+class TestGetStructuralSubscribers(TestCaseWithFactory):
+
+    layer = DatabaseFunctionalLayer
+
+    def make_product_with_bug(self):
+        product = self.factory.makeProduct()
+        bug = self.factory.makeBug(product=product)
+        return product, bug
+
+    def getStructuralSubscribers(self, bugtasks, *args, **kwargs):
+        # Call IBugTaskSet.getStructuralSubscribers() and check that the
+        # result is security proxied.
+        result = getUtility(IBugTaskSet).getStructuralSubscribers(
+            bugtasks, *args, **kwargs)
+        self.assertTrue(is_security_proxied_or_harmless(result))
+        return result
+
+    def test_getStructuralSubscribers_no_subscribers(self):
+        # If there are no subscribers for any of the bug's targets then no
+        # subscribers will be returned by getStructuralSubscribers().
+        product, bug = self.make_product_with_bug()
+        subscribers = self.getStructuralSubscribers(bug.bugtasks)
+        self.assertIsInstance(subscribers, ResultSet)
+        self.assertEqual([], list(subscribers))
+
+    def test_getStructuralSubscribers_single_target(self):
+        # Subscribers for any of the bug's targets are returned.
+        subscriber = self.factory.makePerson()
+        login_person(subscriber)
+        product, bug = self.make_product_with_bug()
+        product.addBugSubscription(subscriber, subscriber)
+        self.assertEqual(
+            [subscriber], list(
+                self.getStructuralSubscribers(bug.bugtasks)))
+
+    def test_getStructuralSubscribers_multiple_targets(self):
+        # Subscribers for any of the bug's targets are returned.
+        actor = self.factory.makePerson()
+        login_person(actor)
+
+        subscriber1 = self.factory.makePerson()
+        subscriber2 = self.factory.makePerson()
+
+        product1 = self.factory.makeProduct(owner=actor)
+        product1.addBugSubscription(subscriber1, subscriber1)
+        product2 = self.factory.makeProduct(owner=actor)
+        product2.addBugSubscription(subscriber2, subscriber2)
+
+        bug = self.factory.makeBug(product=product1)
+        bug.addTask(actor, product2)
+
+        subscribers = self.getStructuralSubscribers(bug.bugtasks)
+        self.assertIsInstance(subscribers, ResultSet)
+        self.assertEqual(set([subscriber1, subscriber2]), set(subscribers))
+
+    def test_getStructuralSubscribers_recipients(self):
+        # If provided, getStructuralSubscribers() calls the appropriate
+        # methods on a BugNotificationRecipients object.
+        subscriber = self.factory.makePerson()
+        login_person(subscriber)
+        product, bug = self.make_product_with_bug()
+        product.addBugSubscription(subscriber, subscriber)
+        recipients = BugNotificationRecipients()
+        subscribers = self.getStructuralSubscribers(
+            bug.bugtasks, recipients=recipients)
+        # The return value is a list only when populating recipients.
+        self.assertIsInstance(subscribers, list)
+        self.assertEqual([subscriber], recipients.getRecipients())
+        reason, header = recipients.getReason(subscriber)
+        self.assertThat(
+            reason, StartsWith(
+                u"You received this bug notification because "
+                u"you are subscribed to "))
+        self.assertThat(header, StartsWith(u"Subscriber "))
+
+    def test_getStructuralSubscribers_level(self):
+        # getStructuralSubscribers() respects the given level.
+        subscriber = self.factory.makePerson()
+        login_person(subscriber)
+        product, bug = self.make_product_with_bug()
+        subscription = product.addBugSubscription(subscriber, subscriber)
+        subscription.bug_notification_level = BugNotificationLevel.METADATA
+        self.assertEqual(
+            [subscriber], list(
+                self.getStructuralSubscribers(
+                    bug.bugtasks, level=BugNotificationLevel.METADATA)))
+        subscription.bug_notification_level = BugNotificationLevel.METADATA
+        self.assertEqual(
+            [], list(
+                self.getStructuralSubscribers(
+                    bug.bugtasks, level=BugNotificationLevel.COMMENTS)))
+
 
 def test_suite():
     suite = unittest.TestSuite()

=== renamed file 'lib/lp/bugs/tests/test_bugtask_status.py' => 'lib/lp/bugs/model/tests/test_bugtask_status.py'
--- lib/lp/bugs/tests/test_bugtask_status.py	2010-10-18 17:29:49 +0000
+++ lib/lp/bugs/model/tests/test_bugtask_status.py	2010-10-27 02:13:03 +0000
@@ -73,6 +73,15 @@
                 UserCannotEditBugTaskStatus, self.task.transitionToStatus,
                 BugTaskStatus.CONFIRMED, self.user)
 
+    def test_user_cannot_unset_fix_released_status(self):
+        # A regular user should not be able to transition a bug away
+        # from Fix Released.
+        removeSecurityProxy(self.task).status = BugTaskStatus.FIXRELEASED
+        with person_logged_in(self.user):
+            self.assertRaises(
+                UserCannotEditBugTaskStatus, self.task.transitionToStatus,
+                BugTaskStatus.FIXRELEASED, self.user)
+
     def test_user_canTransitionToStatus(self):
         # Regular user cannot transition to BUG_SUPERVISOR_BUGTASK_STATUSES,
         # but can transition to any other status.
@@ -129,6 +138,15 @@
                 BugTaskStatus.NEW, self.user),
             False)
 
+    def test_user_canTransitionToStatus_from_fixreleased(self):
+        # A regular user cannot transition away from Fix Released,
+        # so canTransitionToStatus should return False.
+        removeSecurityProxy(self.task).status = BugTaskStatus.FIXRELEASED
+        self.assertEqual(
+            self.task.canTransitionToStatus(
+                BugTaskStatus.NEW, self.user),
+            False)
+
 
 class TestBugTaskStatusTransitionForPrivilegedUserBase:
     """Base class used to test privileged users and status transitions."""
@@ -188,6 +206,13 @@
             self.task.transitionToStatus(BugTaskStatus.CONFIRMED, self.person)
             self.assertEqual(self.task.status, BugTaskStatus.CONFIRMED)
 
+    def test_privileged_user_can_unset_wont_fix_released(self):
+        # Privileged users can transition away from Fix Released.
+        removeSecurityProxy(self.task).status = BugTaskStatus.FIXRELEASED
+        with person_logged_in(self.person):
+            self.task.transitionToStatus(BugTaskStatus.CONFIRMED, self.person)
+            self.assertEqual(self.task.status, BugTaskStatus.CONFIRMED)
+
     def test_privileged_user_canTransitionToStatus(self):
         # Privileged users (like owner or bug supervisor) should
         # be able to set any status, so canTransitionToStatus should
@@ -246,6 +271,15 @@
                 BugTaskStatus.NEW, self.person),
             True)
 
+    def test_privileged_user_canTransitionToStatus_from_fixreleased(self):
+        # A privileged user can transition away from Fix Released, so
+        # canTransitionToStatus should return True.
+        removeSecurityProxy(self.task).status = BugTaskStatus.FIXRELEASED
+        self.assertEqual(
+            self.task.canTransitionToStatus(
+                BugTaskStatus.NEW, self.person),
+            True)
+
 
 class TestBugTaskStatusTransitionOwnerPerson(
     TestBugTaskStatusTransitionForPrivilegedUserBase, TestCaseWithFactory):

=== modified file 'lib/lp/bugs/scripts/checkwatches/base.py'
--- lib/lp/bugs/scripts/checkwatches/base.py	2010-08-20 20:31:18 +0000
+++ lib/lp/bugs/scripts/checkwatches/base.py	2010-10-27 02:13:03 +0000
@@ -3,8 +3,6 @@
 
 """Common classes and functions for the checkwatches system."""
 
-from __future__ import with_statement
-
 __metaclass__ = type
 __all__ = [
     'WorkingBase',

=== modified file 'lib/lp/bugs/scripts/checkwatches/bugwatchupdater.py'
--- lib/lp/bugs/scripts/checkwatches/bugwatchupdater.py	2010-10-03 15:30:06 +0000
+++ lib/lp/bugs/scripts/checkwatches/bugwatchupdater.py	2010-10-27 02:13:03 +0000
@@ -3,8 +3,6 @@
 
 """Classes and logic for the checkwatches BugWatchUpdater."""
 
-from __future__ import with_statement
-
 __metaclass__ = type
 __all__ = [
     'BugWatchUpdater',

=== modified file 'lib/lp/bugs/scripts/checkwatches/core.py'
--- lib/lp/bugs/scripts/checkwatches/core.py	2010-10-03 15:30:06 +0000
+++ lib/lp/bugs/scripts/checkwatches/core.py	2010-10-27 02:13:03 +0000
@@ -3,8 +3,6 @@
 
 """Classes and logic for the checkwatches cronscript."""
 
-from __future__ import with_statement
-
 __metaclass__ = type
 __all__ = [
     'BaseScheduler',

=== modified file 'lib/lp/bugs/scripts/checkwatches/remotebugupdater.py'
--- lib/lp/bugs/scripts/checkwatches/remotebugupdater.py	2010-08-20 20:31:18 +0000
+++ lib/lp/bugs/scripts/checkwatches/remotebugupdater.py	2010-10-27 02:13:03 +0000
@@ -3,8 +3,6 @@
 
 """Classes and logic for the remote bug updater."""
 
-from __future__ import with_statement
-
 __metaclass__ = type
 __all__ = [
     'RemoteBugUpdater',

=== modified file 'lib/lp/bugs/scripts/checkwatches/tests/test_base.py'
--- lib/lp/bugs/scripts/checkwatches/tests/test_base.py	2010-10-04 19:50:45 +0000
+++ lib/lp/bugs/scripts/checkwatches/tests/test_base.py	2010-10-27 02:13:03 +0000
@@ -3,8 +3,6 @@
 
 """Tests for the `base` module."""
 
-from __future__ import with_statement
-
 __metaclass__ = type
 
 from contextlib import contextmanager

=== modified file 'lib/lp/bugs/scripts/checkwatches/tests/test_core.py'
--- lib/lp/bugs/scripts/checkwatches/tests/test_core.py	2010-10-04 19:50:45 +0000
+++ lib/lp/bugs/scripts/checkwatches/tests/test_core.py	2010-10-27 02:13:03 +0000
@@ -2,8 +2,6 @@
 # GNU Affero General Public License version 3 (see the file LICENSE).
 """Checkwatches unit tests."""
 
-from __future__ import with_statement
-
 __metaclass__ = type
 
 from datetime import datetime
@@ -15,20 +13,20 @@
 
 from canonical.config import config
 from canonical.launchpad.ftests import login
-from canonical.launchpad.interfaces import (
-    BugTaskStatus,
-    BugTrackerType,
-    IBugSet,
-    IBugTaskSet,
-    ILaunchpadCelebrities,
-    IPersonSet,
-    IProductSet,
-    IQuestionSet,
-    )
+from canonical.launchpad.interfaces.launchpad import ILaunchpadCelebrities
 from canonical.launchpad.scripts.logger import QuietFakeLogger
 from canonical.testing.layers import LaunchpadZopelessLayer
+from lp.answers.interfaces.questioncollection import IQuestionSet
 from lp.bugs.externalbugtracker.bugzilla import BugzillaAPI
-from lp.bugs.interfaces.bugtracker import IBugTrackerSet
+from lp.bugs.interfaces.bug import IBugSet
+from lp.bugs.interfaces.bugtask import (
+    BugTaskStatus,
+    IBugTaskSet,
+    )
+from lp.bugs.interfaces.bugtracker import (
+    BugTrackerType,
+    IBugTrackerSet,
+    )
 from lp.bugs.scripts import checkwatches
 from lp.bugs.scripts.checkwatches.base import (
     CheckWatchesErrorUtility,
@@ -45,6 +43,8 @@
     TestBugzillaAPIXMLRPCTransport,
     TestExternalBugTracker,
     )
+from lp.registry.interfaces.person import IPersonSet
+from lp.registry.interfaces.product import IProductSet
 from lp.testing import (
     TestCaseWithFactory,
     ZopeTestInSubProcess,
@@ -53,6 +53,7 @@
 
 class BugzillaAPIWithoutProducts(BugzillaAPI):
     """None of the remote bugs have products."""
+
     def getProductsForRemoteBugs(self, remote_bug_ids):
         return {}
 
@@ -210,7 +211,10 @@
                 "Unexpected last OOPS value: %s" % last_oops.value)
 
     def test_suggest_batch_size(self):
-        class RemoteSystem: pass
+
+        class RemoteSystem:
+            pass
+
         remote_system = RemoteSystem()
         # When the batch_size is None, suggest_batch_size() will set
         # it accordingly.
@@ -298,9 +302,11 @@
 class TestSchedulerBase:
 
     def test_args_and_kwargs(self):
+
         def func(name, aptitude):
             self.failUnlessEqual("Robin Hood", name)
             self.failUnlessEqual("Riding through the glen", aptitude)
+
         # Positional args specified when adding a job are passed to
         # the job function at run time.
         self.scheduler.schedule(
@@ -454,14 +460,12 @@
             ["getRemoteStatus(bug_id=u'butterscotch-1')",
              "getRemoteStatus(bug_id=u'butterscotch-2')",
              "getRemoteStatus(bug_id=u'butterscotch-3')"],
-            output_file.output['butterscotch']
-            )
+            output_file.output['butterscotch'])
         self.assertEqual(
             ["getRemoteStatus(bug_id=u'strawberry-1')",
              "getRemoteStatus(bug_id=u'strawberry-2')",
              "getRemoteStatus(bug_id=u'strawberry-3')"],
-            output_file.output['strawberry']
-            )
+            output_file.output['strawberry'])
 
 
 def test_suite():

=== modified file 'lib/lp/bugs/stories/bugs/xx-bug-text-pages.txt'
--- lib/lp/bugs/stories/bugs/xx-bug-text-pages.txt	2010-10-09 16:36:22 +0000
+++ lib/lp/bugs/stories/bugs/xx-bug-text-pages.txt	2010-10-27 02:13:03 +0000
@@ -28,7 +28,8 @@
     ...      "bug-patch.diff", is_patch=True, content_type='text/plain',
     ...      description="a patch")
 
-Next, we'll cycle through all statuses so the dates are present:
+Next, we'll cycle through all statuses so the dates are present (to
+toggle away from Fix Released we must be the target owner):
 
     >>> from lp.bugs.interfaces.bugtask import BugTaskStatus
     >>> t0 = bug.bugtasks[0]
@@ -36,7 +37,7 @@
     >>> t0.transitionToStatus(BugTaskStatus.CONFIRMED, mark)
     >>> t0.transitionToStatus(BugTaskStatus.INPROGRESS, mark)
     >>> t0.transitionToStatus(BugTaskStatus.FIXRELEASED, mark)
-    >>> t0.transitionToStatus(BugTaskStatus.NEW, mark)
+    >>> t0.transitionToStatus(BugTaskStatus.NEW, t0.target.owner)
     >>> t0.transitionToStatus(BugTaskStatus.FIXRELEASED, mark)
     >>> logout()
     >>> flush_database_updates()

=== modified file 'lib/lp/bugs/stories/bugs/xx-bugs-advanced-search-upstream-status.txt'
--- lib/lp/bugs/stories/bugs/xx-bugs-advanced-search-upstream-status.txt	2010-10-18 22:24:59 +0000
+++ lib/lp/bugs/stories/bugs/xx-bugs-advanced-search-upstream-status.txt	2010-10-27 02:13:03 +0000
@@ -17,7 +17,7 @@
 
 Now if we go to the advanced search and choose to list only the bugs
 needing a bug watch, only the bugs with tasks in other contexts that
-don't use Launchpad Bugs are shown, if at least one of those contexts 
+don't use Launchpad Bugs are shown, if at least one of those contexts
 doesn't have a bug watch.
 
     # XXX: Bjorn Tillenius 2006-07-04 bug=51853:
@@ -97,7 +97,7 @@
 demonstrate.
 
     >>> from canonical.launchpad.ftests import login, logout
-    >>> from lp.bugs.tests.test_bugtask_1 import (
+    >>> from lp.bugs.model.tests.test_bugtask import (
     ...     BugTaskSearchBugsElsewhereTest)
     >>> test_helper = BugTaskSearchBugsElsewhereTest(helper_only=True)
     >>> login('test@xxxxxxxxxxxxx')
@@ -181,8 +181,8 @@
        linux-source-2.6.15 Medium New
     2 Blackhole Trash folder
       &mdash; Medium New
- 
-The user opens a bookmark for "upstream status: Show only bugs that need 
+
+The user opens a bookmark for "upstream status: Show only bugs that need
 to be forwarded to an upstream bug tracker".
 
     >>> bookmark_params['field.status_upstream'] = 'pending_bugwatch'
@@ -225,7 +225,7 @@
     ...         bookmark_params, True))
     Traceback (most recent call last):
     ...
-    UnexpectedFormData: Unexpected value for field 'status_upstream'. 
+    UnexpectedFormData: Unexpected value for field 'status_upstream'.
     Perhaps your bookmarks are out of date or you changed the URL by hand?
 
 

=== modified file 'lib/lp/bugs/stories/bugs/xx-incomplete-bugs.txt'
--- lib/lp/bugs/stories/bugs/xx-incomplete-bugs.txt	2010-10-18 22:24:59 +0000
+++ lib/lp/bugs/stories/bugs/xx-incomplete-bugs.txt	2010-10-27 02:13:03 +0000
@@ -234,6 +234,16 @@
 cannot expire. No Privileges Person sets a Debian bug to Incomplete,
 and does not see the expiration notice.
 
+In order for this to work, the bug cannot be FIXRELEASED, which
+it is by default.  So we set the bug back to NEW.
+
+    >>> from lp.bugs.interfaces.bugtask import BugTaskStatus
+    >>> login('foo.bar@xxxxxxxxxxxxx')
+    >>> bug_8 = getUtility(IBugSet).get(8)
+    >>> bug_8.bugtasks[0].transitionToStatus(
+    ...     BugTaskStatus.NEW, bug_8.bugtasks[0].distribution.owner)
+    >>> logout()
+
     >>> user_browser.open(
     ...     'http://bugs.launchpad.dev/debian/+source/mozilla-firefox/+bug/8')
     >>> user_browser.getControl('Status').value = ['Incomplete']

=== modified file 'lib/lp/bugs/stories/distribution/xx-distribution-upstream-bug-report.txt'
--- lib/lp/bugs/stories/distribution/xx-distribution-upstream-bug-report.txt	2010-10-09 16:36:22 +0000
+++ lib/lp/bugs/stories/distribution/xx-distribution-upstream-bug-report.txt	2010-10-27 02:13:03 +0000
@@ -71,7 +71,7 @@
 broken percentage when comparing with bugs marked upstream. Perhaps
 we'd solve this by separating the watched column into natively watched
 and remote watched, but I don't know how to fix this right now.  See
-https://bugs.edge.launchpad.net/malone/+bug/188020
+https://bugs.launchpad.net/malone/+bug/188020
     -- kiko, 2008-02-01
 
 

=== modified file 'lib/lp/bugs/stories/webservice/xx-bug.txt'
--- lib/lp/bugs/stories/webservice/xx-bug.txt	2010-10-21 01:42:14 +0000
+++ lib/lp/bugs/stories/webservice/xx-bug.txt	2010-10-27 02:13:03 +0000
@@ -1486,11 +1486,38 @@
 
 It can also be used to find bugs modified since a certain date.
 
-    >>> pprint_collection(webservice.named_get(
-    ...     '/ubuntu', 'searchTasks',
-    ...     modified_since=u'2011-01-01T00:00:00+00:00').jsonBody())
-    start: 0
-    total_size: 0
+    >>> from datetime import timedelta
+    >>> from lp.testing.sampledata import ADMIN_EMAIL
+    >>> login(ADMIN_EMAIL)
+    >>> target = factory.makeProduct()
+    >>> bug = factory.makeBug(product=target)
+    >>> bug = removeSecurityProxy(bug)
+    >>> date = bug.date_last_updated - timedelta(days=6)
+    >>> logout()
+
+    >>> pprint_collection(webservice.named_get(
+    ...     '/%s' % target.name, 'searchTasks',
+    ...     modified_since=u'%s' % date ).jsonBody())
+    start: 0
+    total_size: 1
+    ...
+    ---
+
+It can also be used to find bug tasks created since a certain date.
+
+    >>> from lp.bugs.interfaces.bugtarget import IBugTarget
+    >>> login(ADMIN_EMAIL)
+    >>> target = IBugTarget(factory.makeProduct())
+    >>> task = factory.makeBugTask(target=target)
+    >>> date = task.datecreated - timedelta(days=8)
+    >>> logout()
+
+    >>> pprint_collection(webservice.named_get(
+    ...     '/%s' % target.name, 'searchTasks',
+    ...     created_since=u'%s' % date).jsonBody())
+    start: 0
+    total_size: 1
+    ...
     ---
 
 It is possible to search for bugs targeted to a milestone within a
@@ -1772,7 +1799,7 @@
     http://api.launchpad.dev/beta/firefox/+bug/1
     http://api.launchpad.dev/beta/firefox/+bug/16
     http://api.launchpad.dev/beta/firefox/+bug/20
-    http://api.launchpad.dev/beta/firefox/+bug/22
+    http://api.launchpad.dev/beta/firefox/+bug/24
 
 
 Affected users

=== modified file 'lib/lp/bugs/subscribers/bug.py'
--- lib/lp/bugs/subscribers/bug.py	2010-08-23 09:25:17 +0000
+++ lib/lp/bugs/subscribers/bug.py	2010-10-27 02:13:03 +0000
@@ -19,6 +19,8 @@
 import datetime
 from operator import attrgetter
 
+from zope.component import getUtility
+
 from canonical.config import config
 from canonical.database.sqlbase import block_implicit_flushes
 from canonical.launchpad.helpers import get_contact_email_addresses
@@ -34,14 +36,12 @@
     )
 from lp.bugs.adapters.bugdelta import BugDelta
 from lp.bugs.interfaces.bugchange import IBugChange
+from lp.bugs.interfaces.bugtask import IBugTaskSet
 from lp.bugs.mail.bugnotificationbuilder import BugNotificationBuilder
 from lp.bugs.mail.bugnotificationrecipients import BugNotificationRecipients
 from lp.bugs.mail.newbug import generate_bug_add_email
 from lp.registry.enum import BugNotificationLevel
 from lp.registry.interfaces.person import IPerson
-from lp.registry.interfaces.structuralsubscription import (
-    IStructuralSubscriptionTarget,
-    )
 
 
 @block_implicit_flushes
@@ -179,15 +179,10 @@
         if recipients is not None:
             recipients.addAssignee(bugtask.assignee)
 
-    if IStructuralSubscriptionTarget.providedBy(bugtask.target):
-        also_notified_subscribers.update(
-            bugtask.target.getBugNotificationsRecipients(
-                recipients, level=level))
-
-    if bugtask.milestone is not None:
-        also_notified_subscribers.update(
-            bugtask.milestone.getBugNotificationsRecipients(
-                recipients, level=level))
+    # Get structural subscribers.
+    also_notified_subscribers.update(
+        getUtility(IBugTaskSet).getStructuralSubscribers(
+            [bugtask], recipients, level))
 
     # If the target's bug supervisor isn't set,
     # we add the owner as a subscriber.

=== modified file 'lib/lp/bugs/tests/bugs-emailinterface.txt'
--- lib/lp/bugs/tests/bugs-emailinterface.txt	2010-10-21 01:42:14 +0000
+++ lib/lp/bugs/tests/bugs-emailinterface.txt	2010-10-27 02:13:03 +0000
@@ -1620,20 +1620,20 @@
 edited:
 
     >>> login('foo.bar@xxxxxxxxxxxxx')
-    >>> bug_eigth = getUtility(IBugSet).get(8)
-    >>> len(bug_eigth.bugtasks)
+    >>> bug_ten = getUtility(IBugSet).get(10)
+    >>> len(bug_ten.bugtasks)
     1
-    >>> submit_commands(bug_eigth, 'status confirmed')
-    >>> mozilla_task = bug_eigth.bugtasks[0]
-    >>> print mozilla_task.status.name
+    >>> submit_commands(bug_ten, 'status confirmed')
+    >>> linux_task = bug_ten.bugtasks[0]
+    >>> print linux_task.status.name
     CONFIRMED
 
     >>> bug_notification = BugNotification.selectFirst(orderBy='-id')
     >>> print bug_notification.bug.id
-    8
+    10
     >>> print bug_notification.message.text_contents
-    ** Changed in: mozilla-firefox (Debian)
-        Status: Fix Released => Confirmed
+    ** Changed in: linux-source-2.6.15 (Ubuntu)
+        Status: New => Confirmed
 
 If the bug has more than one bug task, we try to guess which bug task
 the user wanted to edit. We apply the following heuristics for choosing

=== modified file 'lib/lp/bugs/tests/test_bugbranch.py'
--- lib/lp/bugs/tests/test_bugbranch.py	2010-10-04 19:50:45 +0000
+++ lib/lp/bugs/tests/test_bugbranch.py	2010-10-27 02:13:03 +0000
@@ -1,8 +1,6 @@
 # Copyright 2010 Canonical Ltd.  This software is licensed under the
 # GNU Affero General Public License version 3 (see the file LICENSE).
 
-from __future__ import with_statement
-
 """Tests for bug-branch linking from the bugs side."""
 
 __metaclass__ = type

=== removed file 'lib/lp/bugs/tests/test_bugtask_0.py'
--- lib/lp/bugs/tests/test_bugtask_0.py	2010-10-03 15:30:06 +0000
+++ lib/lp/bugs/tests/test_bugtask_0.py	1970-01-01 00:00:00 +0000
@@ -1,36 +0,0 @@
-# Copyright 2009 Canonical Ltd.  This software is licensed under the
-# GNU Affero General Public License version 3 (see the file LICENSE).
-
-"""Tests for bugtask.py."""
-
-__metaclass__ = type
-
-from doctest import (
-    DocTestSuite,
-    ELLIPSIS,
-    NORMALIZE_WHITESPACE,
-    REPORT_NDIFF,
-    )
-
-
-def test_open_and_resolved_statuses(self):
-    """
-    There are constants that are used to define which statuses are for
-    resolved bugs (RESOLVED_BUGTASK_STATUSES), and which are for
-    unresolved bugs (UNRESOLVED_BUGTASK_STATUSES). The two constants
-    include all statuses defined in BugTaskStatus, except for Unknown.
-
-        >>> from canonical.launchpad.interfaces import (
-        ...     RESOLVED_BUGTASK_STATUSES, UNRESOLVED_BUGTASK_STATUSES)
-        >>> from canonical.launchpad.interfaces import BugTaskStatus
-        >>> not_included_status = set(BugTaskStatus.items).difference(
-        ...     RESOLVED_BUGTASK_STATUSES + UNRESOLVED_BUGTASK_STATUSES)
-        >>> [status.name for status in not_included_status]
-        ['UNKNOWN']
-    """
-
-def test_suite():
-    suite = DocTestSuite(
-        optionflags=REPORT_NDIFF|NORMALIZE_WHITESPACE|ELLIPSIS)
-    return suite
-

=== removed file 'lib/lp/bugs/tests/test_bugtask_1.py'
--- lib/lp/bugs/tests/test_bugtask_1.py	2010-10-04 19:50:45 +0000
+++ lib/lp/bugs/tests/test_bugtask_1.py	1970-01-01 00:00:00 +0000
@@ -1,343 +0,0 @@
-# Copyright 2009-2010 Canonical Ltd.  This software is licensed under the
-# GNU Affero General Public License version 3 (see the file LICENSE).
-
-"""Bugtask related tests that are too complex to be readable as doctests."""
-
-__metaclass__ = type
-
-import unittest
-
-from zope.component import getUtility
-
-from canonical.database.sqlbase import flush_database_updates
-from canonical.launchpad.ftests import (
-    ANONYMOUS,
-    login,
-    logout,
-    )
-from canonical.launchpad.webapp.interfaces import ILaunchBag
-from canonical.testing.layers import DatabaseFunctionalLayer
-from lp.app.enums import ServiceUsage
-from lp.bugs.interfaces.bug import IBugSet
-from lp.bugs.interfaces.bugtask import (
-    BugTaskStatus,
-    IBugTaskSet,
-    IUpstreamBugTask,
-    )
-from lp.bugs.interfaces.bugwatch import IBugWatchSet
-from lp.bugs.tests.bug import (
-    create_old_bug,
-    sync_bugtasks,
-    )
-from lp.registry.interfaces.distribution import IDistributionSet
-from lp.registry.interfaces.product import IProductSet
-from lp.registry.interfaces.projectgroup import IProjectGroupSet
-from lp.testing.factory import LaunchpadObjectFactory
-
-
-class BugTaskSearchBugsElsewhereTest(unittest.TestCase):
-    """Tests for searching bugs filtering on related bug tasks.
-
-    It also acts as a helper class, which makes related doctests more
-    readable, since they can use methods from this class.
-    """
-    layer = DatabaseFunctionalLayer
-
-    def __init__(self, methodName='runTest', helper_only=False):
-        """If helper_only is True, set up it only as a helper class."""
-        if not helper_only:
-            unittest.TestCase.__init__(self, methodName=methodName)
-
-    def setUp(self):
-        login(ANONYMOUS)
-
-    def tearDown(self):
-        logout()
-
-    def _getBugTaskByTarget(self, bug, target):
-        """Return a bug's bugtask for the given target."""
-        for bugtask in bug.bugtasks:
-            if bugtask.target == target:
-                return bugtask
-        else:
-            raise AssertionError(
-                "Didn't find a %s task on bug %s." % (
-                    target.bugtargetname, bug.id))
-
-    def setUpBugsResolvedUpstreamTests(self):
-        """Modify some bugtasks to match the resolved upstream filter."""
-        bugset = getUtility(IBugSet)
-        productset = getUtility(IProductSet)
-        firefox = productset.getByName("firefox")
-        thunderbird = productset.getByName("thunderbird")
-
-        # Mark an upstream task on bug #1 "Fix Released"
-        bug_one = bugset.get(1)
-        firefox_upstream = self._getBugTaskByTarget(bug_one, firefox)
-        self.assertEqual(
-            ServiceUsage.LAUNCHPAD,
-            firefox_upstream.product.bug_tracking_usage)
-        self.old_firefox_status = firefox_upstream.status
-        firefox_upstream.transitionToStatus(
-            BugTaskStatus.FIXRELEASED, getUtility(ILaunchBag).user)
-        self.firefox_upstream = firefox_upstream
-
-        # Mark an upstream task on bug #9 "Fix Committed"
-        bug_nine = bugset.get(9)
-        thunderbird_upstream = self._getBugTaskByTarget(bug_nine, thunderbird)
-        self.old_thunderbird_status = thunderbird_upstream.status
-        thunderbird_upstream.transitionToStatus(
-            BugTaskStatus.FIXCOMMITTED, getUtility(ILaunchBag).user)
-        self.thunderbird_upstream = thunderbird_upstream
-
-        # Add a watch to a Debian bug for bug #2, and mark the task Fix
-        # Released.
-        bug_two = bugset.get(2)
-        current_user = getUtility(ILaunchBag).user
-        bugtaskset = getUtility(IBugTaskSet)
-        bugwatchset = getUtility(IBugWatchSet)
-
-        # Get a debbugs watch.
-        watch_debbugs_327452 = bugwatchset.get(9)
-        self.assertEquals(watch_debbugs_327452.bugtracker.name, "debbugs")
-        self.assertEquals(watch_debbugs_327452.remotebug, "327452")
-
-        # Associate the watch to a Fix Released task.
-        debian = getUtility(IDistributionSet).getByName("debian")
-        debian_firefox = debian.getSourcePackage("mozilla-firefox")
-        bug_two_in_debian_firefox = self._getBugTaskByTarget(
-            bug_two, debian_firefox)
-        bug_two_in_debian_firefox.bugwatch = watch_debbugs_327452
-        bug_two_in_debian_firefox.transitionToStatus(
-            BugTaskStatus.FIXRELEASED, getUtility(ILaunchBag).user)
-
-        flush_database_updates()
-
-    def tearDownBugsElsewhereTests(self):
-        """Resets the modified bugtasks to their original statuses."""
-        self.firefox_upstream.transitionToStatus(
-            self.old_firefox_status, getUtility(ILaunchBag).user)
-        self.thunderbird_upstream.transitionToStatus(
-            self.old_thunderbird_status, getUtility(ILaunchBag).user)
-        flush_database_updates()
-
-    def assertBugTaskIsPendingBugWatchElsewhere(self, bugtask):
-        """Assert the bugtask is pending a bug watch elsewhere.
-
-        Pending a bugwatch elsewhere means that at least one of the bugtask's
-        related task's target isn't using Malone, and that
-        related_bugtask.bugwatch is None.
-        """
-        non_malone_using_bugtasks = [
-            related_task for related_task in bugtask.related_tasks
-            if not related_task.target_uses_malone]
-        pending_bugwatch_bugtasks = [
-            related_bugtask for related_bugtask in non_malone_using_bugtasks
-            if related_bugtask.bugwatch is None]
-        self.assert_(
-            len(pending_bugwatch_bugtasks) > 0,
-            'Bugtask %s on %s has no related bug watches elsewhere.' % (
-                bugtask.id, bugtask.target.displayname))
-
-    def assertBugTaskIsResolvedUpstream(self, bugtask):
-        """Make sure at least one of the related upstream tasks is resolved.
-
-        "Resolved", for our purposes, means either that one of the related
-        tasks is an upstream task in FIXCOMMITTED or FIXRELEASED state, or
-        it is a task with a bugwatch, and in FIXCOMMITTED, FIXRELEASED, or
-        INVALID state.
-        """
-        resolved_upstream_states = [
-            BugTaskStatus.FIXCOMMITTED, BugTaskStatus.FIXRELEASED]
-        resolved_bugwatch_states = [
-            BugTaskStatus.FIXCOMMITTED, BugTaskStatus.FIXRELEASED,
-            BugTaskStatus.INVALID]
-
-        # Helper functions for the list comprehension below.
-        def _is_resolved_upstream_task(bugtask):
-            return (
-                IUpstreamBugTask.providedBy(bugtask) and
-                bugtask.status in resolved_upstream_states)
-
-        def _is_resolved_bugwatch_task(bugtask):
-            return (
-                bugtask.bugwatch and bugtask.status in
-                resolved_bugwatch_states)
-
-        resolved_related_tasks = [
-            related_task for related_task in bugtask.related_tasks
-            if (_is_resolved_upstream_task(related_task) or
-                _is_resolved_bugwatch_task(related_task))]
-
-        self.assert_(len(resolved_related_tasks) > 0)
-        self.assert_(
-            len(resolved_related_tasks) > 0,
-            'Bugtask %s on %s has no resolved related tasks.' % (
-                bugtask.id, bugtask.target.displayname))
-
-    def assertBugTaskIsOpenUpstream(self, bugtask):
-        """Make sure at least one of the related upstream tasks is open.
-
-        "Open", for our purposes, means either that one of the related
-        tasks is an upstream task or a task with a bugwatch which has
-        one of the states listed in open_states.
-        """
-        open_states = [
-            BugTaskStatus.NEW,
-            BugTaskStatus.INCOMPLETE,
-            BugTaskStatus.CONFIRMED,
-            BugTaskStatus.INPROGRESS,
-            BugTaskStatus.UNKNOWN]
-
-        # Helper functions for the list comprehension below.
-        def _is_open_upstream_task(bugtask):
-            return (
-                IUpstreamBugTask.providedBy(bugtask) and
-                bugtask.status in open_states)
-
-        def _is_open_bugwatch_task(bugtask):
-            return (
-                bugtask.bugwatch and bugtask.status in
-                open_states)
-
-        open_related_tasks = [
-            related_task for related_task in bugtask.related_tasks
-            if (_is_open_upstream_task(related_task) or
-                _is_open_bugwatch_task(related_task))]
-
-        self.assert_(
-            len(open_related_tasks) > 0,
-            'Bugtask %s on %s has no open related tasks.' % (
-                bugtask.id, bugtask.target.displayname))
-
-    def _hasUpstreamTask(self, bug):
-        """Does this bug have an upstream task associated with it?
-
-        Returns True if yes, otherwise False.
-        """
-        for bugtask in bug.bugtasks:
-            if IUpstreamBugTask.providedBy(bugtask):
-                return True
-        return False
-
-    def assertShouldBeShownOnNoUpstreamTaskSearch(self, bugtask):
-        """Should the bugtask be shown in the search no upstream task search?
-
-        Returns True if yes, otherwise False.
-        """
-        self.assert_(
-            not self._hasUpstreamTask(bugtask.bug),
-            'Bugtask %s on %s has upstream tasks.' % (
-                bugtask.id, bugtask.target.displayname))
-
-
-class BugTaskSetFindExpirableBugTasksTest(unittest.TestCase):
-    """Test `BugTaskSet.findExpirableBugTasks()` behaviour."""
-    layer = DatabaseFunctionalLayer
-
-    def setUp(self):
-        """Setup the zope interaction and create expirable bugtasks."""
-        login('test@xxxxxxxxxxxxx')
-        self.user = getUtility(ILaunchBag).user
-        self.distribution = getUtility(IDistributionSet).getByName('ubuntu')
-        self.distroseries = self.distribution.getSeries('hoary')
-        self.product = getUtility(IProductSet).getByName('jokosher')
-        self.productseries = self.product.getSeries('trunk')
-        self.bugtaskset = getUtility(IBugTaskSet)
-        bugtasks = []
-        bugtasks.append(
-            create_old_bug("90 days old", 90, self.distribution))
-        bugtasks.append(
-            self.bugtaskset.createTask(
-                bug=bugtasks[-1].bug, owner=self.user,
-                distroseries=self.distroseries))
-        bugtasks.append(
-            create_old_bug("90 days old", 90, self.product))
-        bugtasks.append(
-            self.bugtaskset.createTask(
-                bug=bugtasks[-1].bug, owner=self.user,
-                productseries=self.productseries))
-        sync_bugtasks(bugtasks)
-
-    def tearDown(self):
-        logout()
-
-    def testSupportedTargetParam(self):
-        """The target param supports a limited set of BugTargets.
-
-        Four BugTarget types may passed as the target argument:
-        Distribution, DistroSeries, Product, ProductSeries.
-        """
-        supported_targets = [self.distribution, self.distroseries,
-                             self.product, self.productseries]
-        for target in supported_targets:
-            expirable_bugtasks = self.bugtaskset.findExpirableBugTasks(
-                0, self.user, target=target)
-            self.assertNotEqual(expirable_bugtasks.count(), 0,
-                 "%s has %d expirable bugtasks." %
-                 (self.distroseries, expirable_bugtasks.count()))
-
-    def testUnsupportedBugTargetParam(self):
-        """Test that unsupported targets raise errors.
-
-        Three BugTarget types are not supported because the UI does not
-        provide bug-index to link to the 'bugs that can expire' page.
-        ProjectGroup, SourcePackage, and DistributionSourcePackage will
-        raise an NotImplementedError.
-
-        Passing an unknown bugtarget type will raise an AssertionError.
-        """
-        project = getUtility(IProjectGroupSet).getByName('mozilla')
-        distributionsourcepackage = self.distribution.getSourcePackage(
-            'mozilla-firefox')
-        sourcepackage = self.distroseries.getSourcePackage(
-            'mozilla-firefox')
-        unsupported_targets = [project, distributionsourcepackage,
-                               sourcepackage]
-        for target in unsupported_targets:
-            self.assertRaises(
-                NotImplementedError, self.bugtaskset.findExpirableBugTasks,
-                0, self.user, target=target)
-
-        # Objects that are not a known BugTarget type raise an AssertionError.
-        self.assertRaises(
-            AssertionError, self.bugtaskset.findExpirableBugTasks,
-            0, self.user, target=[])
-
-
-class BugTaskSetTest(unittest.TestCase):
-    """Test `BugTaskSet` methods."""
-    layer = DatabaseFunctionalLayer
-
-    def setUp(self):
-        login(ANONYMOUS)
-
-    def test_getBugTasks(self):
-        """ IBugTaskSet.getBugTasks() returns a dictionary mapping the given
-        bugs to their bugtasks. It does that in a single query, to avoid
-        hitting the DB again when getting the bugs' tasks.
-        """
-        login('no-priv@xxxxxxxxxxxxx')
-        factory = LaunchpadObjectFactory()
-        bug1 = factory.makeBug()
-        factory.makeBugTask(bug1)
-        bug2 = factory.makeBug()
-        factory.makeBugTask(bug2)
-        factory.makeBugTask(bug2)
-
-        bugs_and_tasks = getUtility(IBugTaskSet).getBugTasks(
-            [bug1.id, bug2.id])
-        # The bugtasks returned by getBugTasks() are exactly the same as the
-        # ones returned by bug.bugtasks, obviously.
-        self.failUnlessEqual(
-            set(bugs_and_tasks[bug1]).difference(bug1.bugtasks),
-            set([]))
-        self.failUnlessEqual(
-            set(bugs_and_tasks[bug2]).difference(bug2.bugtasks),
-            set([]))
-
-    def test_getBugTasks_with_empty_list(self):
-        # When given an empty list of bug IDs, getBugTasks() will return an
-        # empty dictionary.
-        bugs_and_tasks = getUtility(IBugTaskSet).getBugTasks([])
-        self.failUnlessEqual(bugs_and_tasks, {})

=== modified file 'lib/lp/bugs/tests/test_bugtask_search.py'
--- lib/lp/bugs/tests/test_bugtask_search.py	2010-10-20 09:37:11 +0000
+++ lib/lp/bugs/tests/test_bugtask_search.py	2010-10-27 02:13:03 +0000
@@ -9,14 +9,19 @@
 
 from zope.component import getUtility
 
-from canonical.testing.layers import DatabaseFunctionalLayer
+from canonical.launchpad.searchbuilder import any
+from canonical.testing.layers import (
+    LaunchpadFunctionalLayer,
+    )
 
+from lp.bugs.interfaces.bugattachment import BugAttachmentType
 from lp.bugs.interfaces.bugtask import (
     BugTaskImportance,
     BugTaskSearchParams,
     BugTaskStatus,
     IBugTaskSet,
     )
+from lp.registry.interfaces.person import IPersonSet
 from lp.testing import (
     person_logged_in,
     TestCaseWithFactory,
@@ -26,7 +31,7 @@
 class SearchTestBase:
     """A mixin class with tests useful for all targets and search variants."""
 
-    layer = DatabaseFunctionalLayer
+    layer = LaunchpadFunctionalLayer
 
     def setUp(self):
         super(SearchTestBase, self).setUp()
@@ -40,7 +45,7 @@
         expected = self.resultValuesForBugtasks(self.bugtasks)
         self.assertEqual(expected, search_result)
 
-    def test_private_bug_not_in_search_result_for_anonymous(self):
+    def test_private_bug_in_search_result(self):
         # Private bugs are not included in search results for anonymous users.
         with person_logged_in(self.owner):
             self.bugtasks[-1].bug.setPrivate(True, self.owner)
@@ -49,28 +54,148 @@
         expected = self.resultValuesForBugtasks(self.bugtasks)[:-1]
         self.assertEqual(expected, search_result)
 
-    def test_private_bug_not_in_search_result_for_regular_user(self):
         # Private bugs are not included in search results for ordinary users.
-        with person_logged_in(self.owner):
-            self.bugtasks[-1].bug.setPrivate(True, self.owner)
         user = self.factory.makePerson()
         params = self.getBugTaskSearchParams(user=user)
         search_result = self.runSearch(params)
         expected = self.resultValuesForBugtasks(self.bugtasks)[:-1]
         self.assertEqual(expected, search_result)
 
-    def test_private_bug_in_search_result_for_subscribed_user(self):
-        # Private bugs are included in search results for ordinary users
-        # which are subscribed to the bug.
+        # If the user is subscribed to the bug, it is included in the
+        # search result.
         user = self.factory.makePerson()
         with person_logged_in(self.owner):
-            self.bugtasks[-1].bug.setPrivate(True, self.owner)
             self.bugtasks[-1].bug.subscribe(user, self.owner)
         params = self.getBugTaskSearchParams(user=user)
         search_result = self.runSearch(params)
         expected = self.resultValuesForBugtasks(self.bugtasks)
         self.assertEqual(expected, search_result)
 
+        # Private bugs are included in search results for admins.
+        admin = getUtility(IPersonSet).getByEmail('foo.bar@xxxxxxxxxxxxx')
+        params = self.getBugTaskSearchParams(user=admin)
+        search_result = self.runSearch(params)
+        expected = self.resultValuesForBugtasks(self.bugtasks)
+        self.assertEqual(expected, search_result)
+
+    def test_search_by_bug_reporter(self):
+        # Search results can be limited to bugs filed by a given person.
+        bugtask = self.bugtasks[0]
+        reporter = bugtask.bug.owner
+        params = self.getBugTaskSearchParams(
+            user=None, bug_reporter=reporter)
+        search_result = self.runSearch(params)
+        expected = self.resultValuesForBugtasks([bugtask])
+        self.assertEqual(expected, search_result)
+
+    def test_search_by_bug_commenter(self):
+        # Search results can be limited to bugs having a comment from a
+        # given person.
+        # Note that this does not include the bug description (which is
+        # stored as the first comment of a bug.) Hence, if we let the
+        # reporter of our first test bug comment on the second test bug,
+        # a search for bugs having comments from this person retruns only
+        # the second bug.
+        commenter = self.bugtasks[0].bug.owner
+        expected = self.bugtasks[1]
+        with person_logged_in(commenter):
+            expected.bug.newMessage(owner=commenter, content='a comment')
+        params = self.getBugTaskSearchParams(
+            user=None, bug_commenter=commenter)
+        search_result = self.runSearch(params)
+        expected = self.resultValuesForBugtasks([expected])
+        self.assertEqual(expected, search_result)
+
+    def test_search_by_person_affected_by_bug(self):
+        # Search results can be limited to bugs which affect a given person.
+        affected_user = self.factory.makePerson()
+        expected = self.bugtasks[0]
+        with person_logged_in(affected_user):
+            expected.bug.markUserAffected(affected_user)
+        params = self.getBugTaskSearchParams(
+            user=None, affected_user=affected_user)
+        search_result = self.runSearch(params)
+        expected = self.resultValuesForBugtasks([expected])
+        self.assertEqual(expected, search_result)
+
+    def test_search_by_bugtask_assignee(self):
+        # Search results can be limited to bugtask assigned to a given
+        # person.
+        assignee = self.factory.makePerson()
+        expected = self.bugtasks[0]
+        with person_logged_in(assignee):
+            expected.transitionToAssignee(assignee)
+        params = self.getBugTaskSearchParams(user=None, assignee=assignee)
+        search_result = self.runSearch(params)
+        expected = self.resultValuesForBugtasks([expected])
+        self.assertEqual(expected, search_result)
+
+    def test_search_by_bug_subscriber(self):
+        # Search results can be limited to bugs to which a given person
+        # is subscribed.
+        subscriber = self.factory.makePerson()
+        expected = self.bugtasks[0]
+        with person_logged_in(subscriber):
+            expected.bug.subscribe(subscriber, subscribed_by=subscriber)
+        params = self.getBugTaskSearchParams(user=None, subscriber=subscriber)
+        search_result = self.runSearch(params)
+        expected = self.resultValuesForBugtasks([expected])
+        self.assertEqual(expected, search_result)
+
+    def test_search_by_bug_attachment(self):
+        # Search results can be limited to bugs having attachments of
+        # a given type.
+        with person_logged_in(self.owner):
+            self.bugtasks[0].bug.addAttachment(
+                owner=self.owner, data='filedata', comment='a comment',
+                filename='file1.txt', is_patch=False)
+            self.bugtasks[1].bug.addAttachment(
+                owner=self.owner, data='filedata', comment='a comment',
+                filename='file1.txt', is_patch=True)
+        # We can search for bugs with non-patch attachments...
+        params = self.getBugTaskSearchParams(
+            user=None, attachmenttype=BugAttachmentType.UNSPECIFIED)
+        search_result = self.runSearch(params)
+        expected = self.resultValuesForBugtasks(self.bugtasks[:1])
+        self.assertEqual(expected, search_result)
+        # ... for bugs with patches...
+        params = self.getBugTaskSearchParams(
+            user=None, attachmenttype=BugAttachmentType.PATCH)
+        search_result = self.runSearch(params)
+        expected = self.resultValuesForBugtasks(self.bugtasks[1:2])
+        self.assertEqual(expected, search_result)
+        # and for bugs with patches or attachments
+        params = self.getBugTaskSearchParams(
+            user=None, attachmenttype=any(
+                BugAttachmentType.PATCH,
+                BugAttachmentType.UNSPECIFIED))
+        search_result = self.runSearch(params)
+        expected = self.resultValuesForBugtasks(self.bugtasks[:2])
+        self.assertEqual(expected, search_result)
+
+
+class ProductAndDistributionTests:
+    """Tests which are useful for distributions and products."""
+
+    def makeSeries(self):
+        """Return a series for the main bug target of this class."""
+        raise NotImplementedError
+
+    def test_search_by_bug_nomination(self):
+        # Search results can be limited to bugs nominated to a given
+        # series.
+        series1 = self.makeSeries()
+        series2 = self.makeSeries()
+        nominator = self.factory.makePerson()
+        with person_logged_in(self.owner):
+            self.bugtasks[0].bug.addNomination(nominator, series1)
+            self.bugtasks[1].bug.addNomination(nominator, series2)
+        params = self.getBugTaskSearchParams(
+            user=None, nominated_for=series1)
+        search_result = self.runSearch(params)
+        expected = self.resultValuesForBugtasks(self.bugtasks[:1])
+        self.assertEqual(expected, search_result)
+
 
 class BugTargetTestBase:
     """A base class for the bug target mixin classes."""
@@ -123,7 +248,8 @@
             self.searchtarget.setBugSupervisor(supervisor, self.owner)
 
 
-class ProductTarget(BugTargetTestBase, BugTargetWithBugSuperVisor):
+class ProductTarget(BugTargetTestBase, ProductAndDistributionTests,
+                    BugTargetWithBugSuperVisor):
     """Use a product as the bug target."""
 
     def setUp(self):
@@ -141,6 +267,10 @@
         params.setProduct(self.searchtarget)
         return params
 
+    def makeSeries(self):
+        """See `ProductAndDistributionTests`."""
+        return self.factory.makeProductSeries(product=self.searchtarget)
+
 
 class ProductSeriesTarget(BugTargetTestBase):
     """Use a product series as the bug target."""
@@ -239,7 +369,8 @@
                 bugtask.transitionToMilestone(self.searchtarget, self.owner)
 
 
-class DistributionTarget(BugTargetTestBase, BugTargetWithBugSuperVisor):
+class DistributionTarget(BugTargetTestBase, ProductAndDistributionTests,
+                         BugTargetWithBugSuperVisor):
     """Use a distribution as the bug target."""
 
     def setUp(self):
@@ -257,6 +388,10 @@
         params.setDistribution(self.searchtarget)
         return params
 
+    def makeSeries(self):
+        """See `ProductAndDistributionTests`."""
+        return self.factory.makeDistroSeries(distribution=self.searchtarget)
+
 
 class DistroseriesTarget(BugTargetTestBase):
     """Use a distro series as the bug target."""

=== modified file 'lib/lp/buildmaster/doc/builder.txt'
--- lib/lp/buildmaster/doc/builder.txt	2010-09-23 12:35:21 +0000
+++ lib/lp/buildmaster/doc/builder.txt	2010-10-27 02:13:03 +0000
@@ -19,9 +19,6 @@
 As expected, it implements IBuilder.
 
     >>> from canonical.launchpad.webapp.testing import verifyObject
-    >>> from lp.buildmaster.interfaces.builder import IBuilder
-    >>> verifyObject(IBuilder, builder)
-    True
 
     >>> print builder.name
     bob
@@ -86,7 +83,7 @@
 The 'new' method will create a new builder in the database.
 
     >>> bnew = builderset.new(1, 'http://dummy.com:8221/', 'dummy',
-    ...	                   'Dummy Title', 'eh ?', 1)
+    ...                    'Dummy Title', 'eh ?', 1)
     >>> bnew.name
     u'dummy'
 
@@ -170,7 +167,7 @@
     >>> recipe_bq.processor = i386_family.processors[0]
     >>> recipe_bq.virtualized = True
     >>> transaction.commit()
-    
+
     >>> queue_sizes = builderset.getBuildQueueSizes()
     >>> print queue_sizes['virt']['386']
     (1L, datetime.timedelta(0, 64))
@@ -188,116 +185,3 @@
 
     >>> print queue_sizes['virt']['386']
     (2L, datetime.timedelta(0, 128))
-
-
-Resuming buildd slaves
-======================
-
-Virtual slaves are resumed using a command specified in the
-configuration profile. Production configuration uses a SSH trigger
-account accessed via a private key available in the builddmaster
-machine (which used ftpmaster configuration profile) as in:
-
-{{{
-ssh ~/.ssh/ppa-reset-key ppa@%(vm_host)s
-}}}
-
-The test configuration uses a fake command that can be performed in
-development machine and allow us to tests the important features used
-in production, as 'vm_host' variable replacement.
-
-    >>> from canonical.config import config
-    >>> config.builddmaster.vm_resume_command
-    'echo %(vm_host)s'
-
-Before performing the command, it checks if the builder is indeed
-virtual and raises CannotResumeHost if it isn't.
-
-    >>> bob = getUtility(IBuilderSet)['bob']
-    >>> bob.resumeSlaveHost()
-    Traceback (most recent call last):
-    ...
-    CannotResumeHost: Builder is not virtualized.
-
-For testing purposes resumeSlaveHost returns the stdout and stderr
-buffer resulted from the command.
-
-    >>> frog = getUtility(IBuilderSet)['frog']
-    >>> out, err = frog.resumeSlaveHost()
-    >>> print out.strip()
-    localhost-host.ppa
-
-If the specified command fails, resumeSlaveHost also raises
-CannotResumeHost exception with the results stdout and stderr.
-
-    # The command must have a vm_host dict key and when executed,
-    # have a returncode that is not 0.
-    >>> vm_resume_command = """
-    ...     [builddmaster]
-    ...     vm_resume_command: test "%(vm_host)s = 'false'"
-    ...     """
-    >>> config.push('vm_resume_command', vm_resume_command)
-    >>> frog.resumeSlaveHost()
-    Traceback (most recent call last):
-    ...
-    CannotResumeHost: Resuming failed:
-    OUT:
-    <BLANKLINE>
-    ERR:
-    <BLANKLINE>
-
-Restore default value for resume command.
-
-    >>> config_data = config.pop('vm_resume_command')
-
-
-Rescuing lost slaves
-====================
-
-Builder.rescueIfLost() checks the build ID reported in the slave status
-against the database. If it isn't building what we think it should be,
-the current build will be aborted and the slave cleaned in preparation
-for a new task. The decision about the slave's correctness is left up
-to IBuildFarmJobBehavior.verifySlaveBuildCookie -- for these examples we
-will use a special behavior that just checks if the cookie reads 'good'.
-
-    >>> import logging
-    >>> from lp.buildmaster.interfaces.builder import CorruptBuildCookie
-    >>> from lp.buildmaster.tests.mock_slaves import (
-    ...    BuildingSlave, MockBuilder, OkSlave, WaitingSlave)
-
-    >>> class TestBuildBehavior:
-    ...     def verifySlaveBuildCookie(self, cookie):
-    ...         if cookie != 'good':
-    ...             raise CorruptBuildCookie('Bad value')
-
-    >>> def rescue_slave_if_lost(slave):
-    ...     builder = MockBuilder('mock', slave, TestBuildBehavior())
-    ...     builder.rescueIfLost(logging.getLogger())
-
-An idle slave is not rescued.
-
-    >>> rescue_slave_if_lost(OkSlave())
-
-Slaves building or having built the correct build are not rescued
-either.
-
-    >>> rescue_slave_if_lost(BuildingSlave(build_id='good'))
-    >>> rescue_slave_if_lost(WaitingSlave(build_id='good'))
-
-But if a slave is building the wrong ID, it is declared lost and
-an abort is attempted. MockSlave prints out a message when it is aborted
-or cleaned.
-
-    >>> rescue_slave_if_lost(BuildingSlave(build_id='bad'))
-    Aborting slave
-    INFO:root:Builder 'mock' rescued from 'bad': 'Bad value'
-
-Slaves having completed an incorrect build are also declared lost,
-but there's no need to abort a completed build. Such builders are
-instead simply cleaned, ready for the next build.
-
-    >>> rescue_slave_if_lost(WaitingSlave(build_id='bad'))
-    Cleaning slave
-    INFO:root:Builder 'mock' rescued from 'bad': 'Bad value'
-

=== modified file 'lib/lp/buildmaster/interfaces/builder.py'
--- lib/lp/buildmaster/interfaces/builder.py	2010-09-23 18:17:21 +0000
+++ lib/lp/buildmaster/interfaces/builder.py	2010-10-27 02:13:03 +0000
@@ -154,11 +154,6 @@
 
     currentjob = Attribute("BuildQueue instance for job being processed.")
 
-    is_available = Bool(
-        title=_("Whether or not a builder is available for building "
-                "new jobs. "),
-        required=False)
-
     failure_count = Int(
         title=_('Failure Count'), required=False, default=0,
         description=_("Number of consecutive failures for this builder."))
@@ -173,32 +168,74 @@
     def resetFailureCount():
         """Set the failure_count back to zero."""
 
-    def checkSlaveAlive():
-        """Check that the buildd slave is alive.
-
-        This pings the slave over the network via the echo method and looks
-        for the sent message as the reply.
-
-        :raises BuildDaemonError: When the slave is down.
+    def failBuilder(reason):
+        """Mark builder as failed for a given reason."""
+
+    def setSlaveForTesting(proxy):
+        """Sets the RPC proxy through which to operate the build slave."""
+
+    def verifySlaveBuildCookie(slave_build_id):
+        """Verify that a slave's build cookie is consistent.
+
+        This should delegate to the current `IBuildFarmJobBehavior`.
+        """
+
+    def transferSlaveFileToLibrarian(file_sha1, filename, private):
+        """Transfer a file from the slave to the librarian.
+
+        :param file_sha1: The file's sha1, which is how the file is addressed
+            in the slave XMLRPC protocol. Specially, the file_sha1 'buildlog'
+            will cause the build log to be retrieved and gzipped.
+        :param filename: The name of the file to be given to the librarian file
+            alias.
+        :param private: True if the build is for a private archive.
+        :return: A librarian file alias.
+        """
+
+    def getBuildQueue():
+        """Return a `BuildQueue` if there's an active job on this builder.
+
+        :return: A BuildQueue, or None.
+        """
+
+    def getCurrentBuildFarmJob():
+        """Return a `BuildFarmJob` for this builder."""
+
+    # All methods below here return Deferred.
+
+    def isAvailable():
+        """Whether or not a builder is available for building new jobs.
+
+        :return: A Deferred that fires with True or False, depending on
+            whether the builder is available or not.
         """
 
     def rescueIfLost(logger=None):
         """Reset the slave if its job information doesn't match the DB.
 
-        If the builder is BUILDING or WAITING but has a build ID string
-        that doesn't match what is stored in the DB, we have to dismiss
-        its current actions and clean the slave for another job, assuming
-        the XMLRPC is working properly at this point.
+        This checks the build ID reported in the slave status against the
+        database. If it isn't building what we think it should be, the current
+        build will be aborted and the slave cleaned in preparation for a new
+        task. The decision about the slave's correctness is left up to
+        `IBuildFarmJobBehavior.verifySlaveBuildCookie`.
+
+        :return: A Deferred that fires when the dialog with the slave is
+            finished.  It does not have a return value.
         """
 
     def updateStatus(logger=None):
-        """Update the builder's status by probing it."""
+        """Update the builder's status by probing it.
+        
+        :return: A Deferred that fires when the dialog with the slave is
+            finished.  It does not have a return value.
+        """
 
     def cleanSlave():
-        """Clean any temporary files from the slave."""
-
-    def failBuilder(reason):
-        """Mark builder as failed for a given reason."""
+        """Clean any temporary files from the slave.
+        
+        :return: A Deferred that fires when the dialog with the slave is
+            finished.  It does not have a return value.
+        """
 
     def requestAbort():
         """Ask that a build be aborted.
@@ -206,6 +243,9 @@
         This takes place asynchronously: Actually killing everything running
         can take some time so the slave status should be queried again to
         detect when the abort has taken effect. (Look for status ABORTED).
+
+        :return: A Deferred that fires when the dialog with the slave is
+            finished.  It does not have a return value.
         """
 
     def resumeSlaveHost():
@@ -217,37 +257,35 @@
         :raises: CannotResumeHost: if builder is not virtual or if the
             configuration command has failed.
 
-        :return: command stdout and stderr buffers as a tuple.
+        :return: A Deferred that fires when the resume operation finishes,
+            whose value is a (stdout, stderr) tuple for success, or a Failure
+            whose value is a CannotResumeHost exception.
         """
 
-    def setSlaveForTesting(proxy):
-        """Sets the RPC proxy through which to operate the build slave."""
-
     def slaveStatus():
         """Get the slave status for this builder.
 
-        :return: a dict containing at least builder_status, but potentially
-            other values included by the current build behavior.
+        :return: A Deferred which fires when the slave dialog is complete.
+            Its value is a dict containing at least builder_status, but
+            potentially other values included by the current build
+            behavior.
         """
 
     def slaveStatusSentence():
         """Get the slave status sentence for this builder.
 
-        :return: A tuple with the first element containing the slave status,
-            build_id-queue-id and then optionally more elements depending on
-            the status.
-        """
-
-    def verifySlaveBuildCookie(slave_build_id):
-        """Verify that a slave's build cookie is consistent.
-
-        This should delegate to the current `IBuildFarmJobBehavior`.
+        :return: A Deferred which fires when the slave dialog is complete.
+            Its value is a  tuple with the first element containing the
+            slave status, build_id-queue-id and then optionally more
+            elements depending on the status.
         """
 
     def updateBuild(queueItem):
         """Verify the current build job status.
 
         Perform the required actions for each state.
+
+        :return: A Deferred that fires when the slave dialog is finished.
         """
 
     def startBuild(build_queue_item, logger):
@@ -255,21 +293,10 @@
 
         :param build_queue_item: A BuildQueueItem to build.
         :param logger: A logger to be used to log diagnostic information.
-        :raises BuildSlaveFailure: When the build slave fails.
-        :raises CannotBuild: When a build cannot be started for some reason
-            other than the build slave failing.
-        """
-
-    def transferSlaveFileToLibrarian(file_sha1, filename, private):
-        """Transfer a file from the slave to the librarian.
-
-        :param file_sha1: The file's sha1, which is how the file is addressed
-            in the slave XMLRPC protocol. Specially, the file_sha1 'buildlog'
-            will cause the build log to be retrieved and gzipped.
-        :param filename: The name of the file to be given to the librarian file
-            alias.
-        :param private: True if the build is for a private archive.
-        :return: A librarian file alias.
+
+        :return: A Deferred that fires after the dispatch has completed whose
+            value is None, or a Failure that contains an exception
+            explaining what went wrong.
         """
 
     def handleTimeout(logger, error_message):
@@ -284,6 +311,8 @@
 
         :param logger: The logger object to be used for logging.
         :param error_message: The error message to be used for logging.
+        :return: A Deferred that fires after the virtual slave was resumed
+            or immediately if it's a non-virtual slave.
         """
 
     def findAndStartJob(buildd_slave=None):
@@ -291,17 +320,9 @@
 
         :param buildd_slave: An optional buildd slave that this builder should
             talk to.
-        :return: the `IBuildQueue` instance found or None if no job was found.
-        """
-
-    def getBuildQueue():
-        """Return a `BuildQueue` if there's an active job on this builder.
-
-        :return: A BuildQueue, or None.
-        """
-
-    def getCurrentBuildFarmJob():
-        """Return a `BuildFarmJob` for this builder."""
+        :return: A Deferred whose value is the `IBuildQueue` instance
+            found or None if no job was found.
+        """
 
 
 class IBuilderSet(Interface):

=== modified file 'lib/lp/buildmaster/manager.py'
--- lib/lp/buildmaster/manager.py	2010-09-24 15:40:49 +0000
+++ lib/lp/buildmaster/manager.py	2010-10-27 02:13:03 +0000
@@ -10,13 +10,10 @@
     'BuilddManager',
     'BUILDD_MANAGER_LOG_NAME',
     'FailDispatchResult',
-    'RecordingSlave',
     'ResetDispatchResult',
-    'buildd_success_result_map',
     ]
 
 import logging
-import os
 
 import transaction
 from twisted.application import service
@@ -24,129 +21,27 @@
     defer,
     reactor,
     )
-from twisted.protocols.policies import TimeoutMixin
+from twisted.internet.task import LoopingCall
 from twisted.python import log
-from twisted.python.failure import Failure
-from twisted.web import xmlrpc
 from zope.component import getUtility
 
-from canonical.config import config
-from canonical.launchpad.webapp import urlappend
-from lp.services.database import write_transaction
 from lp.buildmaster.enums import BuildStatus
-from lp.services.twistedsupport.processmonitor import ProcessWithTimeout
+from lp.buildmaster.interfaces.buildfarmjobbehavior import (
+    BuildBehaviorMismatch,
+    )
+from lp.buildmaster.model.builder import Builder
+from lp.buildmaster.interfaces.builder import (
+    BuildDaemonError,
+    BuildSlaveFailure,
+    CannotBuild,
+    CannotFetchFile,
+    CannotResumeHost,
+    )
 
 
 BUILDD_MANAGER_LOG_NAME = "slave-scanner"
 
 
-buildd_success_result_map = {
-    'ensurepresent': True,
-    'build': 'BuilderStatus.BUILDING',
-    }
-
-
-class QueryWithTimeoutProtocol(xmlrpc.QueryProtocol, TimeoutMixin):
-    """XMLRPC query protocol with a configurable timeout.
-
-    XMLRPC queries using this protocol will be unconditionally closed
-    when the timeout is elapsed. The timeout is fetched from the context
-    Launchpad configuration file (`config.builddmaster.socket_timeout`).
-    """
-    def connectionMade(self):
-        xmlrpc.QueryProtocol.connectionMade(self)
-        self.setTimeout(config.builddmaster.socket_timeout)
-
-
-class QueryFactoryWithTimeout(xmlrpc._QueryFactory):
-    """XMLRPC client factory with timeout support."""
-    # Make this factory quiet.
-    noisy = False
-    # Use the protocol with timeout support.
-    protocol = QueryWithTimeoutProtocol
-
-
-class RecordingSlave:
-    """An RPC proxy for buildd slaves that records instructions to the latter.
-
-    The idea here is to merely record the instructions that the slave-scanner
-    issues to the buildd slaves and "replay" them a bit later in asynchronous
-    and parallel fashion.
-
-    By dealing with a number of buildd slaves in parallel we remove *the*
-    major slave-scanner throughput issue while avoiding large-scale changes to
-    its code base.
-    """
-
-    def __init__(self, name, url, vm_host):
-        self.name = name
-        self.url = url
-        self.vm_host = vm_host
-
-        self.resume_requested = False
-        self.calls = []
-
-    def __repr__(self):
-        return '<%s:%s>' % (self.name, self.url)
-
-    def cacheFile(self, logger, libraryfilealias):
-        """Cache the file on the server."""
-        self.ensurepresent(
-            libraryfilealias.content.sha1, libraryfilealias.http_url, '', '')
-
-    def sendFileToSlave(self, *args):
-        """Helper to send a file to this builder."""
-        return self.ensurepresent(*args)
-
-    def ensurepresent(self, *args):
-        """Download files needed for the build."""
-        self.calls.append(('ensurepresent', args))
-        result = buildd_success_result_map.get('ensurepresent')
-        return [result, 'Download']
-
-    def build(self, *args):
-        """Perform the build."""
-        # XXX: This method does not appear to be used.
-        self.calls.append(('build', args))
-        result = buildd_success_result_map.get('build')
-        return [result, args[0]]
-
-    def resume(self):
-        """Record the request to resume the builder..
-
-        Always succeed.
-
-        :return: a (stdout, stderr, subprocess exitcode) triple
-        """
-        self.resume_requested = True
-        return ['', '', 0]
-
-    def resumeSlave(self, clock=None):
-        """Resume the builder in a asynchronous fashion.
-
-        Used the configuration command-line in the same way
-        `BuilddSlave.resume` does.
-
-        Also use the builddmaster configuration 'socket_timeout' as
-        the process timeout.
-
-        :param clock: An optional twisted.internet.task.Clock to override
-                      the default clock.  For use in tests.
-
-        :return: a Deferred
-        """
-        resume_command = config.builddmaster.vm_resume_command % {
-            'vm_host': self.vm_host}
-        # Twisted API require string and the configuration provides unicode.
-        resume_argv = [str(term) for term in resume_command.split()]
-
-        d = defer.Deferred()
-        p = ProcessWithTimeout(
-            d, config.builddmaster.socket_timeout, clock=clock)
-        p.spawnProcess(resume_argv[0], tuple(resume_argv))
-        return d
-
-
 def get_builder(name):
     """Helper to return the builder given the slave for this request."""
     # Avoiding circular imports.
@@ -159,9 +54,12 @@
     # builder.currentjob hides a complicated query, don't run it twice.
     # See bug 623281.
     current_job = builder.currentjob
-    build_job = current_job.specific_job.build
+    if current_job is None:
+        job_failure_count = 0
+    else:
+        job_failure_count = current_job.specific_job.build.failure_count
 
-    if builder.failure_count == build_job.failure_count:
+    if builder.failure_count == job_failure_count and current_job is not None:
         # If the failure count for the builder is the same as the
         # failure count for the job being built, then we cannot
         # tell whether the job or the builder is at fault. The  best
@@ -170,17 +68,28 @@
         current_job.reset()
         return
 
-    if builder.failure_count > build_job.failure_count:
+    if builder.failure_count > job_failure_count:
         # The builder has failed more than the jobs it's been
-        # running, so let's disable it and re-schedule the build.
-        builder.failBuilder(fail_notes)
-        current_job.reset()
+        # running.
+
+        # Re-schedule the build if there is one.
+        if current_job is not None:
+            current_job.reset()
+
+        # We are a little more tolerant with failing builders than
+        # failing jobs because sometimes they get unresponsive due to
+        # human error, flaky networks etc.  We expect the builder to get
+        # better, whereas jobs are very unlikely to get better.
+        if builder.failure_count >= Builder.FAILURE_THRESHOLD:
+            # It's also gone over the threshold so let's disable it.
+            builder.failBuilder(fail_notes)
     else:
         # The job is the culprit!  Override its status to 'failed'
         # to make sure it won't get automatically dispatched again,
         # and remove the buildqueue request.  The failure should
         # have already caused any relevant slave data to be stored
         # on the build record so don't worry about that here.
+        build_job = current_job.specific_job.build
         build_job.status = BuildStatus.FAILEDTOBUILD
         builder.currentjob.destroySelf()
 
@@ -190,133 +99,108 @@
         # next buildd scan.
 
 
-class BaseDispatchResult:
-    """Base class for *DispatchResult variations.
-
-    It will be extended to represent dispatching results and allow
-    homogeneous processing.
-    """
-
-    def __init__(self, slave, info=None):
-        self.slave = slave
-        self.info = info
-
-    def _cleanJob(self, job):
-        """Clean up in case of builder reset or dispatch failure."""
-        if job is not None:
-            job.reset()
-
-    def assessFailureCounts(self):
-        """View builder/job failure_count and work out which needs to die.
-
-        :return: True if we disabled something, False if we did not.
-        """
-        builder = get_builder(self.slave.name)
-        assessFailureCounts(builder, self.info)
-
-    def ___call__(self):
-        raise NotImplementedError(
-            "Call sites must define an evaluation method.")
-
-
-class FailDispatchResult(BaseDispatchResult):
-    """Represents a communication failure while dispatching a build job..
-
-    When evaluated this object mark the corresponding `IBuilder` as
-    'NOK' with the given text as 'failnotes'. It also cleans up the running
-    job (`IBuildQueue`).
-    """
-
-    def __repr__(self):
-        return  '%r failure (%s)' % (self.slave, self.info)
-
-    @write_transaction
-    def __call__(self):
-        self.assessFailureCounts()
-
-
-class ResetDispatchResult(BaseDispatchResult):
-    """Represents a failure to reset a builder.
-
-    When evaluated this object simply cleans up the running job
-    (`IBuildQueue`) and marks the builder down.
-    """
-
-    def __repr__(self):
-        return  '%r reset failure' % self.slave
-
-    @write_transaction
-    def __call__(self):
-        builder = get_builder(self.slave.name)
-        # Builders that fail to reset should be disabled as per bug
-        # 563353.
-        # XXX Julian bug=586362
-        # This is disabled until this code is not also used for dispatch
-        # failures where we *don't* want to disable the builder.
-        # builder.failBuilder(self.info)
-        self._cleanJob(builder.currentjob)
-
-
 class SlaveScanner:
     """A manager for a single builder."""
 
+    # The interval between each poll cycle, in seconds.  We'd ideally
+    # like this to be lower but 5 seems a reasonable compromise between
+    # responsivity and load on the database server, since in each cycle
+    # we can run quite a few queries.
     SCAN_INTERVAL = 5
 
-    # These are for the benefit of tests; see `TestingSlaveScanner`.
-    # It pokes fake versions in here so that it can verify methods were
-    # called.  The tests should really be using FakeMethod() though.
-    reset_result = ResetDispatchResult
-    fail_result = FailDispatchResult
-
     def __init__(self, builder_name, logger):
         self.builder_name = builder_name
         self.logger = logger
-        self._deferred_list = []
-
-    def scheduleNextScanCycle(self):
-        """Schedule another scan of the builder some time in the future."""
-        self._deferred_list = []
-        # XXX: Change this to use LoopingCall.
-        reactor.callLater(self.SCAN_INTERVAL, self.startCycle)
 
     def startCycle(self):
         """Scan the builder and dispatch to it or deal with failures."""
+        self.loop = LoopingCall(self.singleCycle)
+        self.stopping_deferred = self.loop.start(self.SCAN_INTERVAL)
+        return self.stopping_deferred
+
+    def stopCycle(self):
+        """Terminate the LoopingCall."""
+        self.loop.stop()
+
+    def singleCycle(self):
         self.logger.debug("Scanning builder: %s" % self.builder_name)
-
+        d = self.scan()
+
+        d.addErrback(self._scanFailed)
+        return d
+
+    def _scanFailed(self, failure):
+        """Deal with failures encountered during the scan cycle.
+
+        1. Print the error in the log
+        2. Increment and assess failure counts on the builder and job.
+        """
+        # Make sure that pending database updates are removed as it
+        # could leave the database in an inconsistent state (e.g. The
+        # job says it's running but the buildqueue has no builder set).
+        transaction.abort()
+
+        # If we don't recognise the exception include a stack trace with
+        # the error.
+        error_message = failure.getErrorMessage()
+        if failure.check(
+            BuildSlaveFailure, CannotBuild, BuildBehaviorMismatch,
+            CannotResumeHost, BuildDaemonError, CannotFetchFile):
+            self.logger.info("Scanning failed with: %s" % error_message)
+        else:
+            self.logger.info("Scanning failed with: %s\n%s" %
+                (failure.getErrorMessage(), failure.getTraceback()))
+
+        # Decide if we need to terminate the job or fail the
+        # builder.
         try:
-            slave = self.scan()
-            if slave is None:
-                self.scheduleNextScanCycle()
+            builder = get_builder(self.builder_name)
+            builder.gotFailure()
+            if builder.currentjob is not None:
+                build_farm_job = builder.getCurrentBuildFarmJob()
+                build_farm_job.gotFailure()
+                self.logger.info(
+                    "builder %s failure count: %s, "
+                    "job '%s' failure count: %s" % (
+                        self.builder_name,
+                        builder.failure_count,
+                        build_farm_job.title, 
+                        build_farm_job.failure_count))
             else:
-                # XXX: Ought to return Deferred.
-                self.resumeAndDispatch(slave)
+                self.logger.info(
+                    "Builder %s failed a probe, count: %s" % (
+                        self.builder_name, builder.failure_count))
+            assessFailureCounts(builder, failure.getErrorMessage())
+            transaction.commit()
         except:
-            error = Failure()
-            self.logger.info("Scanning failed with: %s\n%s" %
-                (error.getErrorMessage(), error.getTraceback()))
-
-            builder = get_builder(self.builder_name)
-
-            # Decide if we need to terminate the job or fail the
-            # builder.
-            self._incrementFailureCounts(builder)
-            self.logger.info(
-                "builder failure count: %s, job failure count: %s" % (
-                    builder.failure_count,
-                    builder.getCurrentBuildFarmJob().failure_count))
-            assessFailureCounts(builder, error.getErrorMessage())
-            transaction.commit()
-
-            self.scheduleNextScanCycle()
-
-    @write_transaction
+            # Catastrophic code failure! Not much we can do.
+            self.logger.error(
+                "Miserable failure when trying to examine failure counts:\n",
+                exc_info=True)
+            transaction.abort()
+
     def scan(self):
         """Probe the builder and update/dispatch/collect as appropriate.
 
-        The whole method is wrapped in a transaction, but we do partial
-        commits to avoid holding locks on tables.
-
-        :return: A `RecordingSlave` if we dispatched a job to it, or None.
+        There are several steps to scanning:
+
+        1. If the builder is marked as "ok" then probe it to see what state
+            it's in.  This is where lost jobs are rescued if we think the
+            builder is doing something that it later tells us it's not,
+            and also where the multi-phase abort procedure happens.
+            See IBuilder.rescueIfLost, which is called by
+            IBuilder.updateStatus().
+        2. If the builder is still happy, we ask it if it has an active build
+            and then either update the build in Launchpad or collect the
+            completed build. (builder.updateBuild)
+        3. If the builder is not happy or it was marked as unavailable
+            mid-build, we need to reset the job that we thought it had, so
+            that the job is dispatched elsewhere.
+        4. If the builder is idle and we have another build ready, dispatch
+            it.
+
+        :return: A Deferred that fires when the scan is complete, whose
+            value is A `BuilderSlave` if we dispatched a job to it, or None.
         """
         # We need to re-fetch the builder object on each cycle as the
         # Storm store is invalidated over transaction boundaries.
@@ -324,240 +208,72 @@
         self.builder = get_builder(self.builder_name)
 
         if self.builder.builderok:
-            self.builder.updateStatus(self.logger)
-            transaction.commit()
-
-        # See if we think there's an active build on the builder.
-        buildqueue = self.builder.getBuildQueue()
-
-        # XXX Julian 2010-07-29 bug=611258
-        # We're not using the RecordingSlave until dispatching, which
-        # means that this part blocks until we've received a response
-        # from the builder.  updateBuild() needs to be made
-        # asyncronous.
-
-        # Scan the slave and get the logtail, or collect the build if
-        # it's ready.  Yes, "updateBuild" is a bad name.
-        if buildqueue is not None:
-            self.builder.updateBuild(buildqueue)
-            transaction.commit()
-
-        # If the builder is in manual mode, don't dispatch anything.
-        if self.builder.manual:
-            self.logger.debug(
-                '%s is in manual mode, not dispatching.' % self.builder.name)
-            return None
-
-        # If the builder is marked unavailable, don't dispatch anything.
-        # Additionaly, because builders can be removed from the pool at
-        # any time, we need to see if we think there was a build running
-        # on it before it was marked unavailable. In this case we reset
-        # the build thusly forcing it to get re-dispatched to another
-        # builder.
-        if not self.builder.is_available:
-            job = self.builder.currentjob
-            if job is not None and not self.builder.builderok:
-                self.logger.info(
-                    "%s was made unavailable, resetting attached "
-                    "job" % self.builder.name)
-                job.reset()
-                transaction.commit()
-            return None
-
-        # See if there is a job we can dispatch to the builder slave.
-
-        # XXX: Rather than use the slave actually associated with the builder
-        # (which, incidentally, shouldn't be a property anyway), we make a new
-        # RecordingSlave so we can get access to its asynchronous
-        # "resumeSlave" method. Blech.
-        slave = RecordingSlave(
-            self.builder.name, self.builder.url, self.builder.vm_host)
-        # XXX: Passing buildd_slave=slave overwrites the 'slave' property of
-        # self.builder. Not sure why this is needed yet.
-        self.builder.findAndStartJob(buildd_slave=slave)
-        if self.builder.currentjob is not None:
-            # After a successful dispatch we can reset the
-            # failure_count.
-            self.builder.resetFailureCount()
-            transaction.commit()
-            return slave
-
-        return None
-
-    def resumeAndDispatch(self, slave):
-        """Chain the resume and dispatching Deferreds."""
-        # XXX: resumeAndDispatch makes Deferreds without returning them.
-        if slave.resume_requested:
-            # The slave needs to be reset before we can dispatch to
-            # it (e.g. a virtual slave)
-
-            # XXX: Two problems here. The first is that 'resumeSlave' only
-            # exists on RecordingSlave (BuilderSlave calls it 'resume').
-            d = slave.resumeSlave()
-            d.addBoth(self.checkResume, slave)
+            d = self.builder.updateStatus(self.logger)
         else:
-            # No resume required, build dispatching can commence.
             d = defer.succeed(None)
 
-        # Dispatch the build to the slave asynchronously.
-        d.addCallback(self.initiateDispatch, slave)
-        # Store this deferred so we can wait for it along with all
-        # the others that will be generated by RecordingSlave during
-        # the dispatch process, and chain a callback after they've
-        # all fired.
-        self._deferred_list.append(d)
-
-    def initiateDispatch(self, resume_result, slave):
-        """Start dispatching a build to a slave.
-
-        If the previous task in chain (slave resuming) has failed it will
-        receive a `ResetBuilderRequest` instance as 'resume_result' and
-        will immediately return that so the subsequent callback can collect
-        it.
-
-        If the slave resuming succeeded, it starts the XMLRPC dialogue.  The
-        dialogue may consist of many calls to the slave before the build
-        starts.  Each call is done via a Deferred event, where slave calls
-        are sent in callSlave(), and checked in checkDispatch() which will
-        keep firing events via callSlave() until all the events are done or
-        an error occurs.
-        """
-        if resume_result is not None:
-            self.slaveConversationEnded()
-            return resume_result
-
-        self.logger.info('Dispatching: %s' % slave)
-        self.callSlave(slave)
-
-    def _getProxyForSlave(self, slave):
-        """Return a twisted.web.xmlrpc.Proxy for the buildd slave.
-
-        Uses a protocol with timeout support, See QueryFactoryWithTimeout.
-        """
-        proxy = xmlrpc.Proxy(str(urlappend(slave.url, 'rpc')))
-        proxy.queryFactory = QueryFactoryWithTimeout
-        return proxy
-
-    def callSlave(self, slave):
-        """Dispatch the next XMLRPC for the given slave."""
-        if len(slave.calls) == 0:
-            # That's the end of the dialogue with the slave.
-            self.slaveConversationEnded()
-            return
-
-        # Get an XMLRPC proxy for the buildd slave.
-        proxy = self._getProxyForSlave(slave)
-        method, args = slave.calls.pop(0)
-        d = proxy.callRemote(method, *args)
-        d.addBoth(self.checkDispatch, method, slave)
-        self._deferred_list.append(d)
-        self.logger.debug('%s -> %s(%s)' % (slave, method, args))
-
-    def slaveConversationEnded(self):
-        """After all the Deferreds are set up, chain a callback on them."""
-        dl = defer.DeferredList(self._deferred_list, consumeErrors=True)
-        dl.addBoth(self.evaluateDispatchResult)
-        return dl
-
-    def evaluateDispatchResult(self, deferred_list_results):
-        """Process the DispatchResult for this dispatch chain.
-
-        After waiting for the Deferred chain to finish, we'll have a
-        DispatchResult to evaluate, which deals with the result of
-        dispatching.
-        """
-        # The `deferred_list_results` is what we get when waiting on a
-        # DeferredList.  It's a list of tuples of (status, result) where
-        # result is what the last callback in that chain returned.
-
-        # If the result is an instance of BaseDispatchResult we need to
-        # evaluate it, as there's further action required at the end of
-        # the dispatch chain.  None, resulting from successful chains,
-        # are discarded.
-
-        dispatch_results = [
-            result for status, result in deferred_list_results
-            if isinstance(result, BaseDispatchResult)]
-
-        for result in dispatch_results:
-            self.logger.info("%r" % result)
-            result()
-
-        # At this point, we're done dispatching, so we can schedule the
-        # next scan cycle.
-        self.scheduleNextScanCycle()
-
-        # For the test suite so that it can chain callback results.
-        return deferred_list_results
-
-    def checkResume(self, response, slave):
-        """Check the result of resuming a slave.
-
-        If there's a problem resuming, we return a ResetDispatchResult which
-        will get evaluated at the end of the scan, or None if the resume
-        was OK.
-
-        :param response: the tuple that's constructed in
-            ProcessWithTimeout.processEnded(), or a Failure that
-            contains the tuple.
-        :param slave: the slave object we're talking to
-        """
-        if isinstance(response, Failure):
-            out, err, code = response.value
-        else:
-            out, err, code = response
-            if code == os.EX_OK:
-                return None
-
-        error_text = '%s\n%s' % (out, err)
-        self.logger.error('%s resume failure: %s' % (slave, error_text))
-        return self.reset_result(slave, error_text)
-
-    def _incrementFailureCounts(self, builder):
-        builder.gotFailure()
-        builder.getCurrentBuildFarmJob().gotFailure()
-
-    def checkDispatch(self, response, method, slave):
-        """Verify the results of a slave xmlrpc call.
-
-        If it failed and it compromises the slave then return a corresponding
-        `FailDispatchResult`, if it was a communication failure, simply
-        reset the slave by returning a `ResetDispatchResult`.
-        """
-        from lp.buildmaster.interfaces.builder import IBuilderSet
-        builder = getUtility(IBuilderSet)[slave.name]
-
-        # XXX these DispatchResult classes are badly named and do the
-        # same thing.  We need to fix that.
-        self.logger.debug(
-            '%s response for "%s": %s' % (slave, method, response))
-
-        if isinstance(response, Failure):
-            self.logger.warn(
-                '%s communication failed (%s)' %
-                (slave, response.getErrorMessage()))
-            self.slaveConversationEnded()
-            self._incrementFailureCounts(builder)
-            return self.fail_result(slave)
-
-        if isinstance(response, list) and len(response) == 2:
-            if method in buildd_success_result_map:
-                expected_status = buildd_success_result_map.get(method)
-                status, info = response
-                if status == expected_status:
-                    self.callSlave(slave)
+        def status_updated(ignored):
+            # Commit the changes done while possibly rescuing jobs, to
+            # avoid holding table locks.
+            transaction.commit()
+
+            # See if we think there's an active build on the builder.
+            buildqueue = self.builder.getBuildQueue()
+
+            # Scan the slave and get the logtail, or collect the build if
+            # it's ready.  Yes, "updateBuild" is a bad name.
+            if buildqueue is not None:
+                return self.builder.updateBuild(buildqueue)
+
+        def build_updated(ignored):
+            # Commit changes done while updating the build, to avoid
+            # holding table locks.
+            transaction.commit()
+
+            # If the builder is in manual mode, don't dispatch anything.
+            if self.builder.manual:
+                self.logger.debug(
+                    '%s is in manual mode, not dispatching.' %
+                    self.builder.name)
+                return
+
+            # If the builder is marked unavailable, don't dispatch anything.
+            # Additionaly, because builders can be removed from the pool at
+            # any time, we need to see if we think there was a build running
+            # on it before it was marked unavailable. In this case we reset
+            # the build thusly forcing it to get re-dispatched to another
+            # builder.
+
+            return self.builder.isAvailable().addCallback(got_available)
+
+        def got_available(available):
+            if not available:
+                job = self.builder.currentjob
+                if job is not None and not self.builder.builderok:
+                    self.logger.info(
+                        "%s was made unavailable, resetting attached "
+                        "job" % self.builder.name)
+                    job.reset()
+                    transaction.commit()
+                return
+
+            # See if there is a job we can dispatch to the builder slave.
+
+            d = self.builder.findAndStartJob()
+            def job_started(candidate):
+                if self.builder.currentjob is not None:
+                    # After a successful dispatch we can reset the
+                    # failure_count.
+                    self.builder.resetFailureCount()
+                    transaction.commit()
+                    return self.builder.slave
+                else:
                     return None
-            else:
-                info = 'Unknown slave method: %s' % method
-        else:
-            info = 'Unexpected response: %s' % repr(response)
-
-        self.logger.error(
-            '%s failed to dispatch (%s)' % (slave, info))
-
-        self.slaveConversationEnded()
-        self._incrementFailureCounts(builder)
-        return self.fail_result(slave, info)
+            return d.addCallback(job_started)
+
+        d.addCallback(status_updated)
+        d.addCallback(build_updated)
+        return d
 
 
 class NewBuildersScanner:
@@ -578,15 +294,21 @@
         self.current_builders = [
             builder.name for builder in getUtility(IBuilderSet)]
 
+    def stop(self):
+        """Terminate the LoopingCall."""
+        self.loop.stop()
+
     def scheduleScan(self):
         """Schedule a callback SCAN_INTERVAL seconds later."""
-        return self._clock.callLater(self.SCAN_INTERVAL, self.scan)
+        self.loop = LoopingCall(self.scan)
+        self.loop.clock = self._clock
+        self.stopping_deferred = self.loop.start(self.SCAN_INTERVAL)
+        return self.stopping_deferred
 
     def scan(self):
         """If a new builder appears, create a SlaveScanner for it."""
         new_builders = self.checkForNewBuilders()
         self.manager.addScanForBuilders(new_builders)
-        self.scheduleScan()
 
     def checkForNewBuilders(self):
         """See if any new builders were added."""
@@ -609,10 +331,7 @@
             manager=self, clock=clock)
 
     def _setupLogger(self):
-        """Setup a 'slave-scanner' logger that redirects to twisted.
-
-        It is going to be used locally and within the thread running
-        the scan() method.
+        """Set up a 'slave-scanner' logger that redirects to twisted.
 
         Make it less verbose to avoid messing too much with the old code.
         """
@@ -643,12 +362,29 @@
         # Events will now fire in the SlaveScanner objects to scan each
         # builder.
 
+    def stopService(self):
+        """Callback for when we need to shut down."""
+        # XXX: lacks unit tests
+        # All the SlaveScanner objects need to be halted gracefully.
+        deferreds = [slave.stopping_deferred for slave in self.builder_slaves]
+        deferreds.append(self.new_builders_scanner.stopping_deferred)
+
+        self.new_builders_scanner.stop()
+        for slave in self.builder_slaves:
+            slave.stopCycle()
+
+        # The 'stopping_deferred's are called back when the loops are
+        # stopped, so we can wait on them all at once here before
+        # exiting.
+        d = defer.DeferredList(deferreds, consumeErrors=True)
+        return d
+
     def addScanForBuilders(self, builders):
         """Set up scanner objects for the builders specified."""
         for builder in builders:
             slave_scanner = SlaveScanner(builder, self.logger)
             self.builder_slaves.append(slave_scanner)
-            slave_scanner.scheduleNextScanCycle()
+            slave_scanner.startCycle()
 
         # Return the slave list for the benefit of tests.
         return self.builder_slaves

=== modified file 'lib/lp/buildmaster/model/builder.py'
--- lib/lp/buildmaster/model/builder.py	2010-09-24 13:39:27 +0000
+++ lib/lp/buildmaster/model/builder.py	2010-10-27 02:13:03 +0000
@@ -13,12 +13,11 @@
     ]
 
 import gzip
-import httplib
 import logging
 import os
 import socket
-import subprocess
 import tempfile
+import transaction
 import urllib2
 import xmlrpclib
 
@@ -34,6 +33,13 @@
     Count,
     Sum,
     )
+
+from twisted.internet import (
+    defer,
+    reactor as default_reactor,
+    )
+from twisted.web import xmlrpc
+
 from zope.component import getUtility
 from zope.interface import implements
 
@@ -58,7 +64,6 @@
 from lp.buildmaster.interfaces.builder import (
     BuildDaemonError,
     BuildSlaveFailure,
-    CannotBuild,
     CannotFetchFile,
     CannotResumeHost,
     CorruptBuildCookie,
@@ -66,9 +71,6 @@
     IBuilderSet,
     )
 from lp.buildmaster.interfaces.buildfarmjob import IBuildFarmJobSet
-from lp.buildmaster.interfaces.buildfarmjobbehavior import (
-    BuildBehaviorMismatch,
-    )
 from lp.buildmaster.interfaces.buildqueue import IBuildQueueSet
 from lp.buildmaster.model.buildfarmjobbehavior import IdleBuildBehavior
 from lp.buildmaster.model.buildqueue import (
@@ -78,9 +80,9 @@
 from lp.registry.interfaces.person import validate_public_person
 from lp.services.job.interfaces.job import JobStatus
 from lp.services.job.model.job import Job
-from lp.services.osutils import until_no_eintr
 from lp.services.propertycache import cachedproperty
-from lp.services.twistedsupport.xmlrpc import BlockingProxy
+from lp.services.twistedsupport.processmonitor import ProcessWithTimeout
+from lp.services.twistedsupport import cancel_on_timeout
 # XXX Michael Nelson 2010-01-13 bug=491330
 # These dependencies on soyuz will be removed when getBuildRecords()
 # is moved.
@@ -92,25 +94,9 @@
 from lp.soyuz.model.processor import Processor
 
 
-class TimeoutHTTPConnection(httplib.HTTPConnection):
-
-    def connect(self):
-        """Override the standard connect() methods to set a timeout"""
-        ret = httplib.HTTPConnection.connect(self)
-        self.sock.settimeout(config.builddmaster.socket_timeout)
-        return ret
-
-
-class TimeoutHTTP(httplib.HTTP):
-    _connection_class = TimeoutHTTPConnection
-
-
-class TimeoutTransport(xmlrpclib.Transport):
-    """XMLRPC Transport to setup a socket with defined timeout"""
-
-    def make_connection(self, host):
-        host, extra_headers, x509 = self.get_host_info(host)
-        return TimeoutHTTP(host)
+class QuietQueryFactory(xmlrpc._QueryFactory):
+    """XMLRPC client factory that doesn't splatter the log with junk."""
+    noisy = False
 
 
 class BuilderSlave(object):
@@ -125,24 +111,7 @@
     # many false positives in your test run and will most likely break
     # production.
 
-    # XXX: This (BuilderSlave) should use composition, rather than
-    # inheritance.
-
-    # XXX: Have a documented interface for the XML-RPC server:
-    #  - what methods
-    #  - what return values expected
-    #  - what faults
-    #  (see XMLRPCBuildDSlave in lib/canonical/buildd/slave.py).
-
-    # XXX: Arguably, this interface should be asynchronous
-    # (i.e. Deferred-returning). This would mean that Builder (see below)
-    # would have to expect Deferreds.
-
-    # XXX: Once we have a client object with a defined, tested interface, we
-    # should make a test double that doesn't do any XML-RPC and can be used to
-    # make testing easier & tests faster.
-
-    def __init__(self, proxy, builder_url, vm_host):
+    def __init__(self, proxy, builder_url, vm_host, reactor=None):
         """Initialize a BuilderSlave.
 
         :param proxy: An XML-RPC proxy, implementing 'callRemote'. It must
@@ -155,63 +124,87 @@
         self._file_cache_url = urlappend(builder_url, 'filecache')
         self._server = proxy
 
+        if reactor is None:
+            self.reactor = default_reactor
+        else:
+            self.reactor = reactor
+
     @classmethod
-    def makeBlockingSlave(cls, builder_url, vm_host):
-        rpc_url = urlappend(builder_url, 'rpc')
-        server_proxy = xmlrpclib.ServerProxy(
-            rpc_url, transport=TimeoutTransport(), allow_none=True)
-        return cls(BlockingProxy(server_proxy), builder_url, vm_host)
+    def makeBuilderSlave(cls, builder_url, vm_host, reactor=None, proxy=None):
+        """Create and return a `BuilderSlave`.
+
+        :param builder_url: The URL of the slave buildd machine,
+            e.g. http://localhost:8221
+        :param vm_host: If the slave is virtual, specify its host machine here.
+        :param reactor: Used by tests to override the Twisted reactor.
+        :param proxy: Used By tests to override the xmlrpc.Proxy.
+        """
+        rpc_url = urlappend(builder_url.encode('utf-8'), 'rpc')
+        if proxy is None:
+            server_proxy = xmlrpc.Proxy(rpc_url, allowNone=True)
+            server_proxy.queryFactory = QuietQueryFactory
+        else:
+            server_proxy = proxy
+        return cls(server_proxy, builder_url, vm_host, reactor)
+
+    def _with_timeout(self, d):
+        TIMEOUT = config.builddmaster.socket_timeout
+        return cancel_on_timeout(d, TIMEOUT, self.reactor)
 
     def abort(self):
         """Abort the current build."""
-        return self._server.callRemote('abort')
+        return self._with_timeout(self._server.callRemote('abort'))
 
     def clean(self):
         """Clean up the waiting files and reset the slave's internal state."""
-        return self._server.callRemote('clean')
+        return self._with_timeout(self._server.callRemote('clean'))
 
     def echo(self, *args):
         """Echo the arguments back."""
-        return self._server.callRemote('echo', *args)
+        return self._with_timeout(self._server.callRemote('echo', *args))
 
     def info(self):
         """Return the protocol version and the builder methods supported."""
-        return self._server.callRemote('info')
+        return self._with_timeout(self._server.callRemote('info'))
 
     def status(self):
         """Return the status of the build daemon."""
-        return self._server.callRemote('status')
+        return self._with_timeout(self._server.callRemote('status'))
 
     def ensurepresent(self, sha1sum, url, username, password):
+        # XXX: Nothing external calls this. Make it private.
         """Attempt to ensure the given file is present."""
-        return self._server.callRemote(
-            'ensurepresent', sha1sum, url, username, password)
+        return self._with_timeout(self._server.callRemote(
+            'ensurepresent', sha1sum, url, username, password))
 
     def getFile(self, sha_sum):
         """Construct a file-like object to return the named file."""
+        # XXX 2010-10-18 bug=662631
+        # Change this to do non-blocking IO.
         file_url = urlappend(self._file_cache_url, sha_sum)
         return urllib2.urlopen(file_url)
 
-    def resume(self):
-        """Resume a virtual builder.
-
-        It uses the configuration command-line (replacing 'vm_host') and
-        return its output.
-
-        :return: a (stdout, stderr, subprocess exitcode) triple
+    def resume(self, clock=None):
+        """Resume the builder in an asynchronous fashion.
+
+        We use the builddmaster configuration 'socket_timeout' as
+        the process timeout.
+
+        :param clock: An optional twisted.internet.task.Clock to override
+                      the default clock.  For use in tests.
+
+        :return: a Deferred that returns a
+            (stdout, stderr, subprocess exitcode) triple
         """
-        # XXX: This executes the vm_resume_command
-        # synchronously. RecordingSlave does so asynchronously. Since we
-        # always want to do this asynchronously, there's no need for the
-        # duplication.
         resume_command = config.builddmaster.vm_resume_command % {
             'vm_host': self._vm_host}
-        resume_argv = resume_command.split()
-        resume_process = subprocess.Popen(
-            resume_argv, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-        stdout, stderr = resume_process.communicate()
-
-        return (stdout, stderr, resume_process.returncode)
+        # Twisted API requires string but the configuration provides unicode.
+        resume_argv = [term.encode('utf-8') for term in resume_command.split()]
+        d = defer.Deferred()
+        p = ProcessWithTimeout(
+            d, config.builddmaster.socket_timeout, clock=clock)
+        p.spawnProcess(resume_argv[0], tuple(resume_argv))
+        return d
 
     def cacheFile(self, logger, libraryfilealias):
         """Make sure that the file at 'libraryfilealias' is on the slave.
@@ -224,13 +217,15 @@
             "Asking builder on %s to ensure it has file %s (%s, %s)" % (
                 self._file_cache_url, libraryfilealias.filename, url,
                 libraryfilealias.content.sha1))
-        self.sendFileToSlave(libraryfilealias.content.sha1, url)
+        return self.sendFileToSlave(libraryfilealias.content.sha1, url)
 
     def sendFileToSlave(self, sha1, url, username="", password=""):
         """Helper to send the file at 'url' with 'sha1' to this builder."""
-        present, info = self.ensurepresent(sha1, url, username, password)
-        if not present:
-            raise CannotFetchFile(url, info)
+        d = self.ensurepresent(sha1, url, username, password)
+        def check_present((present, info)):
+            if not present:
+                raise CannotFetchFile(url, info)
+        return d.addCallback(check_present)
 
     def build(self, buildid, builder_type, chroot_sha1, filemap, args):
         """Build a thing on this build slave.
@@ -243,19 +238,18 @@
         :param args: A dictionary of extra arguments. The contents depend on
             the build job type.
         """
-        try:
-            return self._server.callRemote(
-                'build', buildid, builder_type, chroot_sha1, filemap, args)
-        except xmlrpclib.Fault, info:
-            raise BuildSlaveFailure(info)
+        d = self._with_timeout(self._server.callRemote(
+            'build', buildid, builder_type, chroot_sha1, filemap, args))
+        def got_fault(failure):
+            failure.trap(xmlrpclib.Fault)
+            raise BuildSlaveFailure(failure.value)
+        return d.addErrback(got_fault)
 
 
 # This is a separate function since MockBuilder needs to use it too.
 # Do not use it -- (Mock)Builder.rescueIfLost should be used instead.
 def rescueBuilderIfLost(builder, logger=None):
     """See `IBuilder`."""
-    status_sentence = builder.slaveStatusSentence()
-
     # 'ident_position' dict relates the position of the job identifier
     # token in the sentence received from status(), according the
     # two status we care about. See see lib/canonical/buildd/slave.py
@@ -265,61 +259,58 @@
         'BuilderStatus.WAITING': 2
         }
 
-    # Isolate the BuilderStatus string, always the first token in
-    # see lib/canonical/buildd/slave.py and
-    # IBuilder.slaveStatusSentence().
-    status = status_sentence[0]
-
-    # If the cookie test below fails, it will request an abort of the
-    # builder.  This will leave the builder in the aborted state and
-    # with no assigned job, and we should now "clean" the slave which
-    # will reset its state back to IDLE, ready to accept new builds.
-    # This situation is usually caused by a temporary loss of
-    # communications with the slave and the build manager had to reset
-    # the job.
-    if status == 'BuilderStatus.ABORTED' and builder.currentjob is None:
-        builder.cleanSlave()
-        if logger is not None:
-            logger.info(
-                "Builder '%s' cleaned up from ABORTED" % builder.name)
-        return
-
-    # If slave is not building nor waiting, it's not in need of rescuing.
-    if status not in ident_position.keys():
-        return
-
-    slave_build_id = status_sentence[ident_position[status]]
-
-    try:
-        builder.verifySlaveBuildCookie(slave_build_id)
-    except CorruptBuildCookie, reason:
-        if status == 'BuilderStatus.WAITING':
-            builder.cleanSlave()
+    d = builder.slaveStatusSentence()
+
+    def got_status(status_sentence):
+        """After we get the status, clean if we have to.
+
+        Always return status_sentence.
+        """
+        # Isolate the BuilderStatus string, always the first token in
+        # see lib/canonical/buildd/slave.py and
+        # IBuilder.slaveStatusSentence().
+        status = status_sentence[0]
+
+        # If the cookie test below fails, it will request an abort of the
+        # builder.  This will leave the builder in the aborted state and
+        # with no assigned job, and we should now "clean" the slave which
+        # will reset its state back to IDLE, ready to accept new builds.
+        # This situation is usually caused by a temporary loss of
+        # communications with the slave and the build manager had to reset
+        # the job.
+        if status == 'BuilderStatus.ABORTED' and builder.currentjob is None:
+            if logger is not None:
+                logger.info(
+                    "Builder '%s' being cleaned up from ABORTED" %
+                    (builder.name,))
+            d = builder.cleanSlave()
+            return d.addCallback(lambda ignored: status_sentence)
         else:
-            builder.requestAbort()
-        if logger:
-            logger.info(
-                "Builder '%s' rescued from '%s': '%s'" %
-                (builder.name, slave_build_id, reason))
-
-
-def _update_builder_status(builder, logger=None):
-    """Really update the builder status."""
-    try:
-        builder.checkSlaveAlive()
-        builder.rescueIfLost(logger)
-    # Catch only known exceptions.
-    # XXX cprov 2007-06-15 bug=120571: ValueError & TypeError catching is
-    # disturbing in this context. We should spend sometime sanitizing the
-    # exceptions raised in the Builder API since we already started the
-    # main refactoring of this area.
-    except (ValueError, TypeError, xmlrpclib.Fault,
-            BuildDaemonError), reason:
-        builder.failBuilder(str(reason))
-        if logger:
-            logger.warn(
-                "%s (%s) marked as failed due to: %s",
-                builder.name, builder.url, builder.failnotes, exc_info=True)
+            return status_sentence
+
+    def rescue_slave(status_sentence):
+        # If slave is not building nor waiting, it's not in need of rescuing.
+        status = status_sentence[0]
+        if status not in ident_position.keys():
+            return
+        slave_build_id = status_sentence[ident_position[status]]
+        try:
+            builder.verifySlaveBuildCookie(slave_build_id)
+        except CorruptBuildCookie, reason:
+            if status == 'BuilderStatus.WAITING':
+                d = builder.cleanSlave()
+            else:
+                d = builder.requestAbort()
+            def log_rescue(ignored):
+                if logger:
+                    logger.info(
+                        "Builder '%s' rescued from '%s': '%s'" %
+                        (builder.name, slave_build_id, reason))
+            return d.addCallback(log_rescue)
+
+    d.addCallback(got_status)
+    d.addCallback(rescue_slave)
+    return d
 
 
 def updateBuilderStatus(builder, logger=None):
@@ -327,16 +318,7 @@
     if logger:
         logger.debug('Checking %s' % builder.name)
 
-    MAX_EINTR_RETRIES = 42 # pulling a number out of my a$$ here
-    try:
-        return until_no_eintr(
-            MAX_EINTR_RETRIES, _update_builder_status, builder, logger=logger)
-    except socket.error, reason:
-        # In Python 2.6 we can use IOError instead.  It also has
-        # reason.errno but we might be using 2.5 here so use the
-        # index hack.
-        error_message = str(reason)
-        builder.handleTimeout(logger, error_message)
+    return builder.rescueIfLost(logger)
 
 
 class Builder(SQLBase):
@@ -364,6 +346,10 @@
     active = BoolCol(dbName='active', notNull=True, default=True)
     failure_count = IntCol(dbName='failure_count', default=0, notNull=True)
 
+    # The number of times a builder can consecutively fail before we
+    # give up and mark it builderok=False.
+    FAILURE_THRESHOLD = 5
+
     def _getCurrentBuildBehavior(self):
         """Return the current build behavior."""
         if not safe_hasattr(self, '_current_build_behavior'):
@@ -409,18 +395,13 @@
         """See `IBuilder`."""
         self.failure_count = 0
 
-    def checkSlaveAlive(self):
-        """See IBuilder."""
-        if self.slave.echo("Test")[0] != "Test":
-            raise BuildDaemonError("Failed to echo OK")
-
     def rescueIfLost(self, logger=None):
         """See `IBuilder`."""
-        rescueBuilderIfLost(self, logger)
+        return rescueBuilderIfLost(self, logger)
 
     def updateStatus(self, logger=None):
         """See `IBuilder`."""
-        updateBuilderStatus(self, logger)
+        return updateBuilderStatus(self, logger)
 
     def cleanSlave(self):
         """See IBuilder."""
@@ -440,20 +421,23 @@
     def resumeSlaveHost(self):
         """See IBuilder."""
         if not self.virtualized:
-            raise CannotResumeHost('Builder is not virtualized.')
+            return defer.fail(CannotResumeHost('Builder is not virtualized.'))
 
         if not self.vm_host:
-            raise CannotResumeHost('Undefined vm_host.')
+            return defer.fail(CannotResumeHost('Undefined vm_host.'))
 
         logger = self._getSlaveScannerLogger()
         logger.debug("Resuming %s (%s)" % (self.name, self.url))
 
-        stdout, stderr, returncode = self.slave.resume()
-        if returncode != 0:
+        d = self.slave.resume()
+        def got_resume_ok((stdout, stderr, returncode)):
+            return stdout, stderr
+        def got_resume_bad(failure):
+            stdout, stderr, code = failure.value
             raise CannotResumeHost(
                 "Resuming failed:\nOUT:\n%s\nERR:\n%s\n" % (stdout, stderr))
 
-        return stdout, stderr
+        return d.addCallback(got_resume_ok).addErrback(got_resume_bad)
 
     @cachedproperty
     def slave(self):
@@ -462,7 +446,7 @@
         # the slave object, which is usually an XMLRPC client, with a
         # stub object that removes the need to actually create a buildd
         # slave in various states - which can be hard to create.
-        return BuilderSlave.makeBlockingSlave(self.url, self.vm_host)
+        return BuilderSlave.makeBuilderSlave(self.url, self.vm_host)
 
     def setSlaveForTesting(self, proxy):
         """See IBuilder."""
@@ -483,18 +467,23 @@
 
         # If we are building a virtual build, resume the virtual machine.
         if self.virtualized:
-            self.resumeSlaveHost()
+            d = self.resumeSlaveHost()
+        else:
+            d = defer.succeed(None)
 
-        # Do it.
-        build_queue_item.markAsBuilding(self)
-        try:
-            self.current_build_behavior.dispatchBuildToSlave(
+        def resume_done(ignored):
+            return self.current_build_behavior.dispatchBuildToSlave(
                 build_queue_item.id, logger)
-        except BuildSlaveFailure, e:
-            logger.debug("Disabling builder: %s" % self.url, exc_info=1)
+
+        def eb_slave_failure(failure):
+            failure.trap(BuildSlaveFailure)
+            e = failure.value
             self.failBuilder(
                 "Exception (%s) when setting up to new job" % (e,))
-        except CannotFetchFile, e:
+
+        def eb_cannot_fetch_file(failure):
+            failure.trap(CannotFetchFile)
+            e = failure.value
             message = """Slave '%s' (%s) was unable to fetch file.
             ****** URL ********
             %s
@@ -503,10 +492,19 @@
             *******************
             """ % (self.name, self.url, e.file_url, e.error_information)
             raise BuildDaemonError(message)
-        except socket.error, e:
+
+        def eb_socket_error(failure):
+            failure.trap(socket.error)
+            e = failure.value
             error_message = "Exception (%s) when setting up new job" % (e,)
-            self.handleTimeout(logger, error_message)
-            raise BuildSlaveFailure
+            d = self.handleTimeout(logger, error_message)
+            return d.addBoth(lambda ignored: failure)
+
+        d.addCallback(resume_done)
+        d.addErrback(eb_slave_failure)
+        d.addErrback(eb_cannot_fetch_file)
+        d.addErrback(eb_socket_error)
+        return d
 
     def failBuilder(self, reason):
         """See IBuilder"""
@@ -534,22 +532,24 @@
 
     def slaveStatus(self):
         """See IBuilder."""
-        builder_version, builder_arch, mechanisms = self.slave.info()
-        status_sentence = self.slave.status()
-
-        status = {'builder_status': status_sentence[0]}
-
-        # Extract detailed status and log information if present.
-        # Although build_id is also easily extractable here, there is no
-        # valid reason for anything to use it, so we exclude it.
-        if status['builder_status'] == 'BuilderStatus.WAITING':
-            status['build_status'] = status_sentence[1]
-        else:
-            if status['builder_status'] == 'BuilderStatus.BUILDING':
-                status['logtail'] = status_sentence[2]
-
-        self.current_build_behavior.updateSlaveStatus(status_sentence, status)
-        return status
+        d = self.slave.status()
+        def got_status(status_sentence):
+            status = {'builder_status': status_sentence[0]}
+
+            # Extract detailed status and log information if present.
+            # Although build_id is also easily extractable here, there is no
+            # valid reason for anything to use it, so we exclude it.
+            if status['builder_status'] == 'BuilderStatus.WAITING':
+                status['build_status'] = status_sentence[1]
+            else:
+                if status['builder_status'] == 'BuilderStatus.BUILDING':
+                    status['logtail'] = status_sentence[2]
+
+            self.current_build_behavior.updateSlaveStatus(
+                status_sentence, status)
+            return status
+
+        return d.addCallback(got_status)
 
     def slaveStatusSentence(self):
         """See IBuilder."""
@@ -562,13 +562,15 @@
 
     def updateBuild(self, queueItem):
         """See `IBuilder`."""
-        self.current_build_behavior.updateBuild(queueItem)
+        return self.current_build_behavior.updateBuild(queueItem)
 
     def transferSlaveFileToLibrarian(self, file_sha1, filename, private):
         """See IBuilder."""
         out_file_fd, out_file_name = tempfile.mkstemp(suffix=".buildlog")
         out_file = os.fdopen(out_file_fd, "r+")
         try:
+            # XXX 2010-10-18 bug=662631
+            # Change this to do non-blocking IO.
             slave_file = self.slave.getFile(file_sha1)
             copy_and_close(slave_file, out_file)
             # If the requested file is the 'buildlog' compress it using gzip
@@ -599,18 +601,17 @@
 
         return library_file.id
 
-    @property
-    def is_available(self):
+    def isAvailable(self):
         """See `IBuilder`."""
         if not self.builderok:
-            return False
-        try:
-            slavestatus = self.slaveStatusSentence()
-        except (xmlrpclib.Fault, socket.error):
-            return False
-        if slavestatus[0] != BuilderStatus.IDLE:
-            return False
-        return True
+            return defer.succeed(False)
+        d = self.slaveStatusSentence()
+        def catch_fault(failure):
+            failure.trap(xmlrpclib.Fault, socket.error)
+            return False
+        def check_available(status):
+            return status[0] == BuilderStatus.IDLE
+        return d.addCallbacks(check_available, catch_fault)
 
     def _getSlaveScannerLogger(self):
         """Return the logger instance from buildd-slave-scanner.py."""
@@ -621,6 +622,27 @@
         logger = logging.getLogger('slave-scanner')
         return logger
 
+    def acquireBuildCandidate(self):
+        """Acquire a build candidate in an atomic fashion.
+
+        When retrieiving a candidate we need to mark it as building
+        immediately so that it is not dispatched by another builder in the
+        build manager.
+
+        We can consider this to be atomic because although the build manager
+        is a Twisted app and gives the appearance of doing lots of things at
+        once, it's still single-threaded so no more than one builder scan
+        can be in this code at the same time.
+
+        If there's ever more than one build manager running at once, then
+        this code will need some sort of mutex.
+        """
+        candidate = self._findBuildCandidate()
+        if candidate is not None:
+            candidate.markAsBuilding(self)
+            transaction.commit()
+        return candidate
+
     def _findBuildCandidate(self):
         """Find a candidate job for dispatch to an idle buildd slave.
 
@@ -700,52 +722,46 @@
         :param candidate: The job to dispatch.
         """
         logger = self._getSlaveScannerLogger()
-        try:
-            self.startBuild(candidate, logger)
-        except (BuildSlaveFailure, CannotBuild, BuildBehaviorMismatch), err:
-            logger.warn('Could not build: %s' % err)
+        # Using maybeDeferred ensures that any exceptions are also
+        # wrapped up and caught later.
+        d = defer.maybeDeferred(self.startBuild, candidate, logger)
+        return d
 
     def handleTimeout(self, logger, error_message):
         """See IBuilder."""
-        builder_should_be_failed = True
-
         if self.virtualized:
             # Virtualized/PPA builder: attempt a reset.
             logger.warn(
                 "Resetting builder: %s -- %s" % (self.url, error_message),
                 exc_info=True)
-            try:
-                self.resumeSlaveHost()
-            except CannotResumeHost, err:
-                # Failed to reset builder.
-                logger.warn(
-                    "Failed to reset builder: %s -- %s" %
-                    (self.url, str(err)), exc_info=True)
-            else:
-                # Builder was reset, do *not* mark it as failed.
-                builder_should_be_failed = False
-
-        if builder_should_be_failed:
+            d = self.resumeSlaveHost()
+            return d
+        else:
+            # XXX: This should really let the failure bubble up to the
+            # scan() method that does the failure counting.
             # Mark builder as 'failed'.
             logger.warn(
-                "Disabling builder: %s -- %s" % (self.url, error_message),
-                exc_info=True)
+                "Disabling builder: %s -- %s" % (self.url, error_message))
             self.failBuilder(error_message)
+            return defer.succeed(None)
 
     def findAndStartJob(self, buildd_slave=None):
         """See IBuilder."""
+        # XXX This method should be removed in favour of two separately
+        # called methods that find and dispatch the job.  It will
+        # require a lot of test fixing.
         logger = self._getSlaveScannerLogger()
-        candidate = self._findBuildCandidate()
+        candidate = self.acquireBuildCandidate()
 
         if candidate is None:
             logger.debug("No build candidates available for builder.")
-            return None
+            return defer.succeed(None)
 
         if buildd_slave is not None:
             self.setSlaveForTesting(buildd_slave)
 
-        self._dispatchBuildCandidate(candidate)
-        return candidate
+        d = self._dispatchBuildCandidate(candidate)
+        return d.addCallback(lambda ignored: candidate)
 
     def getBuildQueue(self):
         """See `IBuilder`."""

=== modified file 'lib/lp/buildmaster/model/buildfarmjobbehavior.py'
--- lib/lp/buildmaster/model/buildfarmjobbehavior.py	2010-08-20 20:31:18 +0000
+++ lib/lp/buildmaster/model/buildfarmjobbehavior.py	2010-10-27 02:13:03 +0000
@@ -16,13 +16,18 @@
 import socket
 import xmlrpclib
 
+from twisted.internet import defer
+
 from zope.component import getUtility
 from zope.interface import implements
 from zope.security.proxy import removeSecurityProxy
 
 from canonical import encoding
 from canonical.librarian.interfaces import ILibrarianClient
-from lp.buildmaster.interfaces.builder import CorruptBuildCookie
+from lp.buildmaster.interfaces.builder import (
+    BuildSlaveFailure,
+    CorruptBuildCookie,
+    )
 from lp.buildmaster.interfaces.buildfarmjobbehavior import (
     BuildBehaviorMismatch,
     IBuildFarmJobBehavior,
@@ -69,54 +74,53 @@
         """See `IBuildFarmJobBehavior`."""
         logger = logging.getLogger('slave-scanner')
 
-        try:
-            slave_status = self._builder.slaveStatus()
-        except (xmlrpclib.Fault, socket.error), info:
-            # XXX cprov 2005-06-29:
-            # Hmm, a problem with the xmlrpc interface,
-            # disable the builder ?? or simple notice the failure
-            # with a timestamp.
+        d = self._builder.slaveStatus()
+
+        def got_failure(failure):
+            failure.trap(xmlrpclib.Fault, socket.error)
+            info = failure.value
             info = ("Could not contact the builder %s, caught a (%s)"
                     % (queueItem.builder.url, info))
-            logger.debug(info, exc_info=True)
-            # keep the job for scan
-            return
-
-        builder_status_handlers = {
-            'BuilderStatus.IDLE': self.updateBuild_IDLE,
-            'BuilderStatus.BUILDING': self.updateBuild_BUILDING,
-            'BuilderStatus.ABORTING': self.updateBuild_ABORTING,
-            'BuilderStatus.ABORTED': self.updateBuild_ABORTED,
-            'BuilderStatus.WAITING': self.updateBuild_WAITING,
-            }
-
-        builder_status = slave_status['builder_status']
-        if builder_status not in builder_status_handlers:
-            logger.critical(
-                "Builder on %s returned unknown status %s, failing it"
-                % (self._builder.url, builder_status))
-            self._builder.failBuilder(
-                "Unknown status code (%s) returned from status() probe."
-                % builder_status)
-            # XXX: This will leave the build and job in a bad state, but
-            # should never be possible, since our builder statuses are
-            # known.
-            queueItem._builder = None
-            queueItem.setDateStarted(None)
-            return
-
-        # Since logtail is a xmlrpclib.Binary container and it is returned
-        # from the IBuilder content class, it arrives protected by a Zope
-        # Security Proxy, which is not declared, thus empty. Before passing
-        # it to the status handlers we will simply remove the proxy.
-        logtail = removeSecurityProxy(slave_status.get('logtail'))
-
-        method = builder_status_handlers[builder_status]
-        try:
-            method(queueItem, slave_status, logtail, logger)
-        except TypeError, e:
-            logger.critical("Received wrong number of args in response.")
-            logger.exception(e)
+            raise BuildSlaveFailure(info)
+
+        def got_status(slave_status):
+            builder_status_handlers = {
+                'BuilderStatus.IDLE': self.updateBuild_IDLE,
+                'BuilderStatus.BUILDING': self.updateBuild_BUILDING,
+                'BuilderStatus.ABORTING': self.updateBuild_ABORTING,
+                'BuilderStatus.ABORTED': self.updateBuild_ABORTED,
+                'BuilderStatus.WAITING': self.updateBuild_WAITING,
+                }
+
+            builder_status = slave_status['builder_status']
+            if builder_status not in builder_status_handlers:
+                logger.critical(
+                    "Builder on %s returned unknown status %s, failing it"
+                    % (self._builder.url, builder_status))
+                self._builder.failBuilder(
+                    "Unknown status code (%s) returned from status() probe."
+                    % builder_status)
+                # XXX: This will leave the build and job in a bad state, but
+                # should never be possible, since our builder statuses are
+                # known.
+                queueItem._builder = None
+                queueItem.setDateStarted(None)
+                return
+
+            # Since logtail is a xmlrpclib.Binary container and it is
+            # returned from the IBuilder content class, it arrives
+            # protected by a Zope Security Proxy, which is not declared,
+            # thus empty. Before passing it to the status handlers we
+            # will simply remove the proxy.
+            logtail = removeSecurityProxy(slave_status.get('logtail'))
+
+            method = builder_status_handlers[builder_status]
+            return defer.maybeDeferred(
+                method, queueItem, slave_status, logtail, logger)
+
+        d.addErrback(got_failure)
+        d.addCallback(got_status)
+        return d
 
     def updateBuild_IDLE(self, queueItem, slave_status, logtail, logger):
         """Somehow the builder forgot about the build job.
@@ -146,11 +150,13 @@
 
         Clean the builder for another jobs.
         """
-        queueItem.builder.cleanSlave()
-        queueItem.builder = None
-        if queueItem.job.status != JobStatus.FAILED:
-            queueItem.job.fail()
-        queueItem.specific_job.jobAborted()
+        d = queueItem.builder.cleanSlave()
+        def got_cleaned(ignored):
+            queueItem.builder = None
+            if queueItem.job.status != JobStatus.FAILED:
+                queueItem.job.fail()
+            queueItem.specific_job.jobAborted()
+        return d.addCallback(got_cleaned)
 
     def extractBuildStatus(self, slave_status):
         """Read build status name.
@@ -185,6 +191,8 @@
         # XXX: dsilvers 2005-03-02: Confirm the builder has the right build?
 
         build = queueItem.specific_job.build
+        # XXX 2010-10-18 bug=662631
+        # Change this to do non-blocking IO.
         build.handleStatus(build_status, librarian, slave_status)
 
 

=== modified file 'lib/lp/buildmaster/model/packagebuild.py'
--- lib/lp/buildmaster/model/packagebuild.py	2010-10-02 11:41:43 +0000
+++ lib/lp/buildmaster/model/packagebuild.py	2010-10-27 02:13:03 +0000
@@ -1,8 +1,6 @@
 # Copyright 2010 Canonical Ltd.  This software is licensed under the
 # GNU Affero General Public License version 3 (see the file LICENSE).
 
-from __future__ import with_statement
-
 __metaclass__ = type
 __all__ = [
     'PackageBuild',
@@ -165,6 +163,8 @@
     def getLogFromSlave(package_build):
         """See `IPackageBuild`."""
         builder = package_build.buildqueue_record.builder
+        # XXX 2010-10-18 bug=662631
+        # Change this to do non-blocking IO.
         return builder.transferSlaveFileToLibrarian(
             SLAVE_LOG_FILENAME,
             package_build.buildqueue_record.getLogFileName(),
@@ -180,6 +180,8 @@
         # log, builder and date_finished are read-only, so we must
         # currently remove the security proxy to set them.
         naked_build = removeSecurityProxy(build)
+        # XXX 2010-10-18 bug=662631
+        # Change this to do non-blocking IO.
         naked_build.log = build.getLogFromSlave(build)
         naked_build.builder = build.buildqueue_record.builder
         # XXX cprov 20060615 bug=120584: Currently buildduration includes
@@ -276,6 +278,8 @@
             logger.critical("Unknown BuildStatus '%s' for builder '%s'"
                             % (status, self.buildqueue_record.builder.url))
             return
+        # XXX 2010-10-18 bug=662631
+        # Change this to do non-blocking IO.
         method(librarian, slave_status, logger)
 
     def _handleStatus_OK(self, librarian, slave_status, logger):

=== modified file 'lib/lp/buildmaster/tests/mock_slaves.py'
--- lib/lp/buildmaster/tests/mock_slaves.py	2010-09-23 12:35:21 +0000
+++ lib/lp/buildmaster/tests/mock_slaves.py	2010-10-27 02:13:03 +0000
@@ -6,21 +6,40 @@
 __metaclass__ = type
 
 __all__ = [
+    'AbortedSlave',
+    'AbortingSlave',
+    'BrokenSlave',
+    'BuildingSlave',
+    'CorruptBehavior',
+    'DeadProxy',
+    'LostBuildingBrokenSlave',
     'MockBuilder',
-    'LostBuildingBrokenSlave',
-    'BrokenSlave',
     'OkSlave',
-    'BuildingSlave',
-    'AbortedSlave',
+    'SlaveTestHelpers',
+    'TrivialBehavior',
     'WaitingSlave',
-    'AbortingSlave',
     ]
 
+import fixtures
+import os
+
 from StringIO import StringIO
 import xmlrpclib
 
-from lp.buildmaster.interfaces.builder import CannotFetchFile
+from testtools.content import Content
+from testtools.content_type import UTF8_TEXT
+
+from twisted.internet import defer
+from twisted.web import xmlrpc
+
+from canonical.buildd.tests.harness import BuilddSlaveTestSetup
+
+from lp.buildmaster.interfaces.builder import (
+    CannotFetchFile,
+    CorruptBuildCookie,
+    )
 from lp.buildmaster.model.builder import (
+    BuilderSlave,
     rescueBuilderIfLost,
     updateBuilderStatus,
     )
@@ -59,15 +78,9 @@
             slave_build_id)
 
     def cleanSlave(self):
-        # XXX: This should not print anything. The print is only here to make
-        # doc/builder.txt a meaningful test.
-        print 'Cleaning slave'
         return self.slave.clean()
 
     def requestAbort(self):
-        # XXX: This should not print anything. The print is only here to make
-        # doc/builder.txt a meaningful test.
-        print 'Aborting slave'
         return self.slave.abort()
 
     def resumeSlave(self, logger):
@@ -77,10 +90,10 @@
         pass
 
     def rescueIfLost(self, logger=None):
-        rescueBuilderIfLost(self, logger)
+        return rescueBuilderIfLost(self, logger)
 
     def updateStatus(self, logger=None):
-        updateBuilderStatus(self, logger)
+        return defer.maybeDeferred(updateBuilderStatus, self, logger)
 
 
 # XXX: It would be *really* nice to run some set of tests against the real
@@ -95,36 +108,44 @@
         self.arch_tag = arch_tag
 
     def status(self):
-        return ('BuilderStatus.IDLE', '')
+        return defer.succeed(('BuilderStatus.IDLE', ''))
 
     def ensurepresent(self, sha1, url, user=None, password=None):
         self.call_log.append(('ensurepresent', url, user, password))
-        return True, None
+        return defer.succeed((True, None))
 
     def build(self, buildid, buildtype, chroot, filemap, args):
         self.call_log.append(
             ('build', buildid, buildtype, chroot, filemap.keys(), args))
         info = 'OkSlave BUILDING'
-        return ('BuildStatus.Building', info)
+        return defer.succeed(('BuildStatus.Building', info))
 
     def echo(self, *args):
         self.call_log.append(('echo',) + args)
-        return args
+        return defer.succeed(args)
 
     def clean(self):
         self.call_log.append('clean')
+        return defer.succeed(None)
 
     def abort(self):
         self.call_log.append('abort')
+        return defer.succeed(None)
 
     def info(self):
         self.call_log.append('info')
-        return ('1.0', self.arch_tag, 'debian')
+        return defer.succeed(('1.0', self.arch_tag, 'debian'))
+
+    def resume(self):
+        self.call_log.append('resume')
+        return defer.succeed(("", "", 0))
 
     def sendFileToSlave(self, sha1, url, username="", password=""):
-        present, info = self.ensurepresent(sha1, url, username, password)
-        if not present:
-            raise CannotFetchFile(url, info)
+        d = self.ensurepresent(sha1, url, username, password)
+        def check_present((present, info)):
+            if not present:
+                raise CannotFetchFile(url, info)
+        return d.addCallback(check_present)
 
     def cacheFile(self, logger, libraryfilealias):
         return self.sendFileToSlave(
@@ -141,9 +162,11 @@
     def status(self):
         self.call_log.append('status')
         buildlog = xmlrpclib.Binary("This is a build log")
-        return ('BuilderStatus.BUILDING', self.build_id, buildlog)
+        return defer.succeed(
+            ('BuilderStatus.BUILDING', self.build_id, buildlog))
 
     def getFile(self, sum):
+        # XXX: This needs to be updated to return a Deferred.
         self.call_log.append('getFile')
         if sum == "buildlog":
             s = StringIO("This is a build log")
@@ -155,11 +178,15 @@
     """A mock slave that looks like it's currently waiting."""
 
     def __init__(self, state='BuildStatus.OK', dependencies=None,
-                 build_id='1-1'):
+                 build_id='1-1', filemap=None):
         super(WaitingSlave, self).__init__()
         self.state = state
         self.dependencies = dependencies
         self.build_id = build_id
+        if filemap is None:
+            self.filemap = {}
+        else:
+            self.filemap = filemap
 
         # By default, the slave only has a buildlog, but callsites
         # can update this list as needed.
@@ -167,10 +194,12 @@
 
     def status(self):
         self.call_log.append('status')
-        return ('BuilderStatus.WAITING', self.state, self.build_id, {},
-                self.dependencies)
+        return defer.succeed((
+            'BuilderStatus.WAITING', self.state, self.build_id, self.filemap,
+            self.dependencies))
 
     def getFile(self, hash):
+        # XXX: This needs to be updated to return a Deferred.
         self.call_log.append('getFile')
         if hash in self.valid_file_hashes:
             content = "This is a %s" % hash
@@ -184,15 +213,19 @@
 
     def status(self):
         self.call_log.append('status')
-        return ('BuilderStatus.ABORTING', '1-1')
+        return defer.succeed(('BuilderStatus.ABORTING', '1-1'))
 
 
 class AbortedSlave(OkSlave):
     """A mock slave that looks like it's aborted."""
 
+    def clean(self):
+        self.call_log.append('status')
+        return defer.succeed(None)
+
     def status(self):
-        self.call_log.append('status')
-        return ('BuilderStatus.ABORTED', '1-1')
+        self.call_log.append('clean')
+        return defer.succeed(('BuilderStatus.ABORTED', '1-1'))
 
 
 class LostBuildingBrokenSlave:
@@ -206,16 +239,108 @@
 
     def status(self):
         self.call_log.append('status')
-        return ('BuilderStatus.BUILDING', '1000-10000')
+        return defer.succeed(('BuilderStatus.BUILDING', '1000-10000'))
 
     def abort(self):
         self.call_log.append('abort')
-        raise xmlrpclib.Fault(8002, "Could not abort")
+        return defer.fail(xmlrpclib.Fault(8002, "Could not abort"))
 
 
 class BrokenSlave:
     """A mock slave that reports that it is broken."""
 
+    def __init__(self):
+        self.call_log = []
+
     def status(self):
         self.call_log.append('status')
-        raise xmlrpclib.Fault(8001, "Broken slave")
+        return defer.fail(xmlrpclib.Fault(8001, "Broken slave"))
+
+
+class CorruptBehavior:
+
+    def verifySlaveBuildCookie(self, cookie):
+        raise CorruptBuildCookie("Bad value: %r" % (cookie,))
+
+
+class TrivialBehavior:
+
+    def verifySlaveBuildCookie(self, cookie):
+        pass
+
+
+class DeadProxy(xmlrpc.Proxy):
+    """An xmlrpc.Proxy that doesn't actually send any messages.
+
+    Used when you want to test timeouts, for example.
+    """
+
+    def callRemote(self, *args, **kwargs):
+        return defer.Deferred()
+
+
+class SlaveTestHelpers(fixtures.Fixture):
+
+    # The URL for the XML-RPC service set up by `BuilddSlaveTestSetup`.
+    BASE_URL = 'http://localhost:8221'
+    TEST_URL = '%s/rpc/' % (BASE_URL,)
+
+    def getServerSlave(self):
+        """Set up a test build slave server.
+
+        :return: A `BuilddSlaveTestSetup` object.
+        """
+        tachandler = BuilddSlaveTestSetup()
+        tachandler.setUp()
+        # Basically impossible to do this w/ TrialTestCase. But it would be
+        # really nice to keep it.
+        #
+        # def addLogFile(exc_info):
+        #     self.addDetail(
+        #         'xmlrpc-log-file',
+        #         Content(UTF8_TEXT, lambda: open(tachandler.logfile, 'r').read()))
+        # self.addOnException(addLogFile)
+        self.addCleanup(tachandler.tearDown)
+        return tachandler
+
+    def getClientSlave(self, reactor=None, proxy=None):
+        """Return a `BuilderSlave` for use in testing.
+
+        Points to a fixed URL that is also used by `BuilddSlaveTestSetup`.
+        """
+        return BuilderSlave.makeBuilderSlave(
+            self.TEST_URL, 'vmhost', reactor, proxy)
+
+    def makeCacheFile(self, tachandler, filename):
+        """Make a cache file available on the remote slave.
+
+        :param tachandler: The TacTestSetup object used to start the remote
+            slave.
+        :param filename: The name of the file to create in the file cache
+            area.
+        """
+        path = os.path.join(tachandler.root, 'filecache', filename)
+        fd = open(path, 'w')
+        fd.write('something')
+        fd.close()
+        self.addCleanup(os.unlink, path)
+
+    def triggerGoodBuild(self, slave, build_id=None):
+        """Trigger a good build on 'slave'.
+
+        :param slave: A `BuilderSlave` instance to trigger the build on.
+        :param build_id: The build identifier. If not specified, defaults to
+            an arbitrary string.
+        :type build_id: str
+        :return: The build id returned by the slave.
+        """
+        if build_id is None:
+            build_id = 'random-build-id'
+        tachandler = self.getServerSlave()
+        chroot_file = 'fake-chroot'
+        dsc_file = 'thing'
+        self.makeCacheFile(tachandler, chroot_file)
+        self.makeCacheFile(tachandler, dsc_file)
+        return slave.build(
+            build_id, 'debian', chroot_file, {'.dsc': dsc_file},
+            {'ogrecomponent': 'main'})

=== modified file 'lib/lp/buildmaster/tests/test_builder.py'
--- lib/lp/buildmaster/tests/test_builder.py	2010-10-06 09:06:30 +0000
+++ lib/lp/buildmaster/tests/test_builder.py	2010-10-27 02:13:03 +0000
@@ -3,20 +3,24 @@
 
 """Test Builder features."""
 
-import errno
 import os
-import socket
+import signal
 import xmlrpclib
 
-from testtools.content import Content
-from testtools.content_type import UTF8_TEXT
+from twisted.web.client import getPage
+
+from twisted.internet.defer import CancelledError
+from twisted.internet.task import Clock
+from twisted.python.failure import Failure
+from twisted.trial.unittest import TestCase as TrialTestCase
 
 from zope.component import getUtility
 from zope.security.proxy import removeSecurityProxy
 
 from canonical.buildd.slave import BuilderStatus
-from canonical.buildd.tests.harness import BuilddSlaveTestSetup
+from canonical.config import config
 from canonical.database.sqlbase import flush_database_updates
+from canonical.launchpad.scripts import QuietFakeLogger
 from canonical.launchpad.webapp.interfaces import (
     DEFAULT_FLAVOR,
     IStoreSelector,
@@ -24,21 +28,38 @@
     )
 from canonical.testing.layers import (
     DatabaseFunctionalLayer,
-    LaunchpadZopelessLayer
+    LaunchpadZopelessLayer,
+    TwistedLaunchpadZopelessLayer,
+    TwistedLayer,
     )
 from lp.buildmaster.enums import BuildStatus
-from lp.buildmaster.interfaces.builder import IBuilder, IBuilderSet
+from lp.buildmaster.interfaces.builder import (
+    CannotFetchFile,
+    IBuilder,
+    IBuilderSet,
+    )
 from lp.buildmaster.interfaces.buildfarmjobbehavior import (
     IBuildFarmJobBehavior,
     )
 from lp.buildmaster.interfaces.buildqueue import IBuildQueueSet
-from lp.buildmaster.model.builder import BuilderSlave
+from lp.buildmaster.interfaces.builder import CannotResumeHost
 from lp.buildmaster.model.buildfarmjobbehavior import IdleBuildBehavior
 from lp.buildmaster.model.buildqueue import BuildQueue
 from lp.buildmaster.tests.mock_slaves import (
     AbortedSlave,
+    AbortingSlave,
+    BrokenSlave,
+    BuildingSlave,
+    CorruptBehavior,
+    DeadProxy,
+    LostBuildingBrokenSlave,
     MockBuilder,
+    OkSlave,
+    SlaveTestHelpers,
+    TrivialBehavior,
+    WaitingSlave,
     )
+from lp.services.job.interfaces.job import JobStatus
 from lp.soyuz.enums import (
     ArchivePurpose,
     PackagePublishingStatus,
@@ -49,9 +70,12 @@
     )
 from lp.soyuz.tests.test_publishing import SoyuzTestPublisher
 from lp.testing import (
-    TestCase,
+    ANONYMOUS,
+    login_as,
+    logout,
     TestCaseWithFactory,
     )
+from lp.testing.factory import LaunchpadObjectFactory
 from lp.testing.fakemethod import FakeMethod
 
 
@@ -92,42 +116,121 @@
         bq = builder.getBuildQueue()
         self.assertIs(None, bq)
 
-    def test_updateBuilderStatus_catches_repeated_EINTR(self):
-        # A single EINTR return from a socket operation should cause the
-        # operation to be retried, not fail/reset the builder.
-        builder = removeSecurityProxy(self.factory.makeBuilder())
-        builder.handleTimeout = FakeMethod()
-        builder.rescueIfLost = FakeMethod()
-
-        def _fake_checkSlaveAlive():
-            # Raise an EINTR error for all invocations.
-            raise socket.error(errno.EINTR, "fake eintr")
-
-        builder.checkSlaveAlive = _fake_checkSlaveAlive
-        builder.updateStatus()
-
-        # builder.updateStatus should eventually have called
-        # handleTimeout()
-        self.assertEqual(1, builder.handleTimeout.call_count)
-
-    def test_updateBuilderStatus_catches_single_EINTR(self):
-        builder = removeSecurityProxy(self.factory.makeBuilder())
-        builder.handleTimeout = FakeMethod()
-        builder.rescueIfLost = FakeMethod()
-        self.eintr_returned = False
-
-        def _fake_checkSlaveAlive():
-            # raise an EINTR error for the first invocation only.
-            if not self.eintr_returned:
-                self.eintr_returned = True
-                raise socket.error(errno.EINTR, "fake eintr")
-
-        builder.checkSlaveAlive = _fake_checkSlaveAlive
-        builder.updateStatus()
-
-        # builder.updateStatus should never call handleTimeout() for a
-        # single EINTR.
-        self.assertEqual(0, builder.handleTimeout.call_count)
+
+class TestBuilderWithTrial(TrialTestCase):
+
+    layer = TwistedLaunchpadZopelessLayer
+
+    def setUp(self):
+        super(TestBuilderWithTrial, self)
+        self.slave_helper = SlaveTestHelpers()
+        self.slave_helper.setUp()
+        self.addCleanup(self.slave_helper.cleanUp)
+        self.factory = LaunchpadObjectFactory()
+        login_as(ANONYMOUS)
+        self.addCleanup(logout)
+
+    def test_updateStatus_aborts_lost_and_broken_slave(self):
+        # A slave that's 'lost' should be aborted; when the slave is
+        # broken then abort() should also throw a fault.
+        slave = LostBuildingBrokenSlave()
+        lostbuilding_builder = MockBuilder(
+            'Lost Building Broken Slave', slave, behavior=CorruptBehavior())
+        d = lostbuilding_builder.updateStatus(QuietFakeLogger())
+        def check_slave_status(failure):
+            self.assertIn('abort', slave.call_log)
+            # 'Fault' comes from the LostBuildingBrokenSlave, this is
+            # just testing that the value is passed through.
+            self.assertIsInstance(failure.value, xmlrpclib.Fault)
+        return d.addBoth(check_slave_status)
+
+    def test_resumeSlaveHost_nonvirtual(self):
+        builder = self.factory.makeBuilder(virtualized=False)
+        d = builder.resumeSlaveHost()
+        return self.assertFailure(d, CannotResumeHost)
+
+    def test_resumeSlaveHost_no_vmhost(self):
+        builder = self.factory.makeBuilder(virtualized=True, vm_host=None)
+        d = builder.resumeSlaveHost()
+        return self.assertFailure(d, CannotResumeHost)
+
+    def test_resumeSlaveHost_success(self):
+        reset_config = """
+            [builddmaster]
+            vm_resume_command: /bin/echo -n parp"""
+        config.push('reset', reset_config)
+        self.addCleanup(config.pop, 'reset')
+
+        builder = self.factory.makeBuilder(virtualized=True, vm_host="pop")
+        d = builder.resumeSlaveHost()
+        def got_resume(output):
+            self.assertEqual(('parp', ''), output)
+        return d.addCallback(got_resume)
+
+    def test_resumeSlaveHost_command_failed(self):
+        reset_fail_config = """
+            [builddmaster]
+            vm_resume_command: /bin/false"""
+        config.push('reset fail', reset_fail_config)
+        self.addCleanup(config.pop, 'reset fail')
+        builder = self.factory.makeBuilder(virtualized=True, vm_host="pop")
+        d = builder.resumeSlaveHost()
+        return self.assertFailure(d, CannotResumeHost)
+
+    def test_handleTimeout_resume_failure(self):
+        reset_fail_config = """
+            [builddmaster]
+            vm_resume_command: /bin/false"""
+        config.push('reset fail', reset_fail_config)
+        self.addCleanup(config.pop, 'reset fail')
+        builder = self.factory.makeBuilder(virtualized=True, vm_host="pop")
+        builder.builderok = True
+        d = builder.handleTimeout(QuietFakeLogger(), 'blah')
+        return self.assertFailure(d, CannotResumeHost)
+
+    def _setupRecipeBuildAndBuilder(self):
+        # Helper function to make a builder capable of building a
+        # recipe, returning both.
+        processor = self.factory.makeProcessor(name="i386")
+        builder = self.factory.makeBuilder(
+            processor=processor, virtualized=True, vm_host="bladh")
+        builder.setSlaveForTesting(OkSlave())
+        distroseries = self.factory.makeDistroSeries()
+        das = self.factory.makeDistroArchSeries(
+            distroseries=distroseries, architecturetag="i386",
+            processorfamily=processor.family)
+        chroot = self.factory.makeLibraryFileAlias()
+        das.addOrUpdateChroot(chroot)
+        distroseries.nominatedarchindep = das
+        build = self.factory.makeSourcePackageRecipeBuild(
+            distroseries=distroseries)
+        return builder, build
+
+    def test_findAndStartJob_returns_candidate(self):
+        # findAndStartJob finds the next queued job using _findBuildCandidate.
+        # We don't care about the type of build at all.
+        builder, build = self._setupRecipeBuildAndBuilder()
+        candidate = build.queueBuild()
+        # _findBuildCandidate is tested elsewhere, we just make sure that
+        # findAndStartJob delegates to it.
+        removeSecurityProxy(builder)._findBuildCandidate = FakeMethod(
+            result=candidate)
+        d = builder.findAndStartJob()
+        return d.addCallback(self.assertEqual, candidate)
+
+    def test_findAndStartJob_starts_job(self):
+        # findAndStartJob finds the next queued job using _findBuildCandidate
+        # and then starts it.
+        # We don't care about the type of build at all.
+        builder, build = self._setupRecipeBuildAndBuilder()
+        candidate = build.queueBuild()
+        removeSecurityProxy(builder)._findBuildCandidate = FakeMethod(
+            result=candidate)
+        d = builder.findAndStartJob()
+        def check_build_started(candidate):
+            self.assertEqual(candidate.builder, builder)
+            self.assertEqual(BuildStatus.BUILDING, build.status)
+        return d.addCallback(check_build_started)
 
     def test_slave(self):
         # Builder.slave is a BuilderSlave that points at the actual Builder.
@@ -136,25 +239,147 @@
         builder = removeSecurityProxy(self.factory.makeBuilder())
         self.assertEqual(builder.url, builder.slave.url)
 
-
-class Test_rescueBuilderIfLost(TestCaseWithFactory):
-    """Tests for lp.buildmaster.model.builder.rescueBuilderIfLost."""
-
-    layer = LaunchpadZopelessLayer
-
     def test_recovery_of_aborted_slave(self):
         # If a slave is in the ABORTED state, rescueBuilderIfLost should
         # clean it if we don't think it's currently building anything.
         # See bug 463046.
         aborted_slave = AbortedSlave()
-        # The slave's clean() method is normally an XMLRPC call, so we
-        # can just stub it out and check that it got called.
-        aborted_slave.clean = FakeMethod()
         builder = MockBuilder("mock_builder", aborted_slave)
         builder.currentjob = None
-        builder.rescueIfLost()
-
-        self.assertEqual(1, aborted_slave.clean.call_count)
+        d = builder.rescueIfLost()
+        def check_slave_calls(ignored):
+            self.assertIn('clean', aborted_slave.call_log)
+        return d.addCallback(check_slave_calls)
+
+    def test_recover_ok_slave(self):
+        # An idle slave is not rescued.
+        slave = OkSlave()
+        builder = MockBuilder("mock_builder", slave, TrivialBehavior())
+        d = builder.rescueIfLost()
+        def check_slave_calls(ignored):
+            self.assertNotIn('abort', slave.call_log)
+            self.assertNotIn('clean', slave.call_log)
+        return d.addCallback(check_slave_calls)
+
+    def test_recover_waiting_slave_with_good_id(self):
+        # rescueIfLost does not attempt to abort or clean a builder that is
+        # WAITING.
+        waiting_slave = WaitingSlave()
+        builder = MockBuilder("mock_builder", waiting_slave, TrivialBehavior())
+        d = builder.rescueIfLost()
+        def check_slave_calls(ignored):
+            self.assertNotIn('abort', waiting_slave.call_log)
+            self.assertNotIn('clean', waiting_slave.call_log)
+        return d.addCallback(check_slave_calls)
+
+    def test_recover_waiting_slave_with_bad_id(self):
+        # If a slave is WAITING with a build for us to get, and the build
+        # cookie cannot be verified, which means we don't recognize the build,
+        # then rescueBuilderIfLost should attempt to abort it, so that the
+        # builder is reset for a new build, and the corrupt build is
+        # discarded.
+        waiting_slave = WaitingSlave()
+        builder = MockBuilder("mock_builder", waiting_slave, CorruptBehavior())
+        d = builder.rescueIfLost()
+        def check_slave_calls(ignored):
+            self.assertNotIn('abort', waiting_slave.call_log)
+            self.assertIn('clean', waiting_slave.call_log)
+        return d.addCallback(check_slave_calls)
+
+    def test_recover_building_slave_with_good_id(self):
+        # rescueIfLost does not attempt to abort or clean a builder that is
+        # BUILDING.
+        building_slave = BuildingSlave()
+        builder = MockBuilder("mock_builder", building_slave, TrivialBehavior())
+        d = builder.rescueIfLost()
+        def check_slave_calls(ignored):
+            self.assertNotIn('abort', building_slave.call_log)
+            self.assertNotIn('clean', building_slave.call_log)
+        return d.addCallback(check_slave_calls)
+
+    def test_recover_building_slave_with_bad_id(self):
+        # If a slave is BUILDING with a build id we don't recognize, then we
+        # abort the build, thus stopping it in its tracks.
+        building_slave = BuildingSlave()
+        builder = MockBuilder("mock_builder", building_slave, CorruptBehavior())
+        d = builder.rescueIfLost()
+        def check_slave_calls(ignored):
+            self.assertIn('abort', building_slave.call_log)
+            self.assertNotIn('clean', building_slave.call_log)
+        return d.addCallback(check_slave_calls)
+
+
+class TestBuilderSlaveStatus(TestBuilderWithTrial):
+
+    # Verify what IBuilder.slaveStatus returns with slaves in different
+    # states.
+
+    def assertStatus(self, slave, builder_status=None,
+                     build_status=None, logtail=False, filemap=None,
+                     dependencies=None):
+        builder = self.factory.makeBuilder()
+        builder.setSlaveForTesting(slave)
+        d = builder.slaveStatus()
+
+        def got_status(status_dict):
+            expected = {}
+            if builder_status is not None:
+                expected["builder_status"] = builder_status
+            if build_status is not None:
+                expected["build_status"] = build_status
+            if dependencies is not None:
+                expected["dependencies"] = dependencies
+
+            # We don't care so much about the content of the logtail,
+            # just that it's there.
+            if logtail:
+                tail = status_dict.pop("logtail")
+                self.assertIsInstance(tail, xmlrpclib.Binary)
+
+            self.assertEqual(expected, status_dict)
+
+        return d.addCallback(got_status)
+
+    def test_slaveStatus_idle_slave(self):
+        self.assertStatus(
+            OkSlave(), builder_status='BuilderStatus.IDLE')
+
+    def test_slaveStatus_building_slave(self):
+        self.assertStatus(
+            BuildingSlave(), builder_status='BuilderStatus.BUILDING',
+            logtail=True)
+
+    def test_slaveStatus_waiting_slave(self):
+        self.assertStatus(
+            WaitingSlave(), builder_status='BuilderStatus.WAITING',
+            build_status='BuildStatus.OK', filemap={})
+
+    def test_slaveStatus_aborting_slave(self):
+        self.assertStatus(
+            AbortingSlave(), builder_status='BuilderStatus.ABORTING')
+
+    def test_slaveStatus_aborted_slave(self):
+        self.assertStatus(
+            AbortedSlave(), builder_status='BuilderStatus.ABORTED')
+
+    def test_isAvailable_with_not_builderok(self):
+        # isAvailable() is a wrapper around slaveStatusSentence()
+        builder = self.factory.makeBuilder()
+        builder.builderok = False
+        d = builder.isAvailable()
+        return d.addCallback(self.assertFalse)
+
+    def test_isAvailable_with_slave_fault(self):
+        builder = self.factory.makeBuilder()
+        builder.setSlaveForTesting(BrokenSlave())
+        d = builder.isAvailable()
+        return d.addCallback(self.assertFalse)
+
+    def test_isAvailable_with_slave_idle(self):
+        builder = self.factory.makeBuilder()
+        builder.setSlaveForTesting(OkSlave())
+        d = builder.isAvailable()
+        return d.addCallback(self.assertTrue)
 
 
 class TestFindBuildCandidateBase(TestCaseWithFactory):
@@ -188,6 +413,49 @@
             builder.manual = False
 
 
+class TestFindBuildCandidateGeneralCases(TestFindBuildCandidateBase):
+    # Test usage of findBuildCandidate not specific to any archive type.
+
+    def test_findBuildCandidate_supersedes_builds(self):
+        # IBuilder._findBuildCandidate identifies if there are builds
+        # for superseded source package releases in the queue and marks
+        # the corresponding build record as SUPERSEDED.
+        archive = self.factory.makeArchive()
+        self.publisher.getPubSource(
+            sourcename="gedit", status=PackagePublishingStatus.PUBLISHED,
+            archive=archive).createMissingBuilds()
+        old_candidate = removeSecurityProxy(
+            self.frog_builder)._findBuildCandidate()
+
+        # The candidate starts off as NEEDSBUILD:
+        build = getUtility(IBinaryPackageBuildSet).getByQueueEntry(
+            old_candidate)
+        self.assertEqual(BuildStatus.NEEDSBUILD, build.status)
+
+        # Now supersede the source package:
+        publication = build.current_source_publication
+        publication.status = PackagePublishingStatus.SUPERSEDED
+
+        # The candidate returned is now a different one:
+        new_candidate = removeSecurityProxy(
+            self.frog_builder)._findBuildCandidate()
+        self.assertNotEqual(new_candidate, old_candidate)
+
+        # And the old_candidate is superseded:
+        self.assertEqual(BuildStatus.SUPERSEDED, build.status)
+
+    def test_acquireBuildCandidate_marks_building(self):
+        # acquireBuildCandidate() should call _findBuildCandidate and
+        # mark the build as building.
+        archive = self.factory.makeArchive()
+        self.publisher.getPubSource(
+            sourcename="gedit", status=PackagePublishingStatus.PUBLISHED,
+            archive=archive).createMissingBuilds()
+        candidate = removeSecurityProxy(
+            self.frog_builder).acquireBuildCandidate()
+        self.assertEqual(JobStatus.RUNNING, candidate.job.status)
+
+
 class TestFindBuildCandidatePPAWithSingleBuilder(TestCaseWithFactory):
 
     layer = LaunchpadZopelessLayer
@@ -320,6 +588,16 @@
         build = getUtility(IBinaryPackageBuildSet).getByQueueEntry(next_job)
         self.failUnlessEqual('joesppa', build.archive.name)
 
+    def test_findBuildCandidate_with_disabled_archive(self):
+        # Disabled archives should not be considered for dispatching
+        # builds.
+        disabled_job = removeSecurityProxy(self.builder4)._findBuildCandidate()
+        build = getUtility(IBinaryPackageBuildSet).getByQueueEntry(
+            disabled_job)
+        build.archive.disable()
+        next_job = removeSecurityProxy(self.builder4)._findBuildCandidate()
+        self.assertNotEqual(disabled_job, next_job)
+
 
 class TestFindBuildCandidatePrivatePPA(TestFindBuildCandidatePPABase):
 
@@ -332,6 +610,14 @@
         build = getUtility(IBinaryPackageBuildSet).getByQueueEntry(next_job)
         self.failUnlessEqual('joesppa', build.archive.name)
 
+        # If the source for the build is still pending, it won't be
+        # dispatched because the builder has to fetch the source files
+        # from the (password protected) repo area, not the librarian.
+        pub = build.current_source_publication
+        pub.status = PackagePublishingStatus.PENDING
+        candidate = removeSecurityProxy(self.builder4)._findBuildCandidate()
+        self.assertNotEqual(next_job.id, candidate.id)
+
 
 class TestFindBuildCandidateDistroArchive(TestFindBuildCandidateBase):
 
@@ -474,97 +760,48 @@
             self.builder.current_build_behavior, BinaryPackageBuildBehavior)
 
 
-class TestSlave(TestCase):
+class TestSlave(TrialTestCase):
     """
     Integration tests for BuilderSlave that verify how it works against a
     real slave server.
     """
 
+    layer = TwistedLayer
+
+    def setUp(self):
+        super(TestSlave, self).setUp()
+        self.slave_helper = SlaveTestHelpers()
+        self.slave_helper.setUp()
+        self.addCleanup(self.slave_helper.cleanUp)
+
     # XXX: JonathanLange 2010-09-20 bug=643521: There are also tests for
     # BuilderSlave in buildd-slave.txt and in other places. The tests here
     # ought to become the canonical tests for BuilderSlave vs running buildd
     # XML-RPC server interaction.
 
-    # The URL for the XML-RPC service set up by `BuilddSlaveTestSetup`.
-    TEST_URL = 'http://localhost:8221/rpc/'
-
-    def getServerSlave(self):
-        """Set up a test build slave server.
-
-        :return: A `BuilddSlaveTestSetup` object.
-        """
-        tachandler = BuilddSlaveTestSetup()
-        tachandler.setUp()
-        self.addCleanup(tachandler.tearDown)
-        def addLogFile(exc_info):
-            self.addDetail(
-                'xmlrpc-log-file',
-                Content(UTF8_TEXT, lambda: open(tachandler.logfile, 'r').read()))
-        self.addOnException(addLogFile)
-        return tachandler
-
-    def getClientSlave(self):
-        """Return a `BuilderSlave` for use in testing.
-
-        Points to a fixed URL that is also used by `BuilddSlaveTestSetup`.
-        """
-        return BuilderSlave.makeBlockingSlave(self.TEST_URL, 'vmhost')
-
-    def makeCacheFile(self, tachandler, filename):
-        """Make a cache file available on the remote slave.
-
-        :param tachandler: The TacTestSetup object used to start the remote
-            slave.
-        :param filename: The name of the file to create in the file cache
-            area.
-        """
-        path = os.path.join(tachandler.root, 'filecache', filename)
-        fd = open(path, 'w')
-        fd.write('something')
-        fd.close()
-        self.addCleanup(os.unlink, path)
-
-    def triggerGoodBuild(self, slave, build_id=None):
-        """Trigger a good build on 'slave'.
-
-        :param slave: A `BuilderSlave` instance to trigger the build on.
-        :param build_id: The build identifier. If not specified, defaults to
-            an arbitrary string.
-        :type build_id: str
-        :return: The build id returned by the slave.
-        """
-        if build_id is None:
-            build_id = self.getUniqueString()
-        tachandler = self.getServerSlave()
-        chroot_file = 'fake-chroot'
-        dsc_file = 'thing'
-        self.makeCacheFile(tachandler, chroot_file)
-        self.makeCacheFile(tachandler, dsc_file)
-        return slave.build(
-            build_id, 'debian', chroot_file, {'.dsc': dsc_file},
-            {'ogrecomponent': 'main'})
-
     # XXX 2010-10-06 Julian bug=655559
     # This is failing on buildbot but not locally; it's trying to abort
     # before the build has started.
     def disabled_test_abort(self):
-        slave = self.getClientSlave()
+        slave = self.slave_helper.getClientSlave()
         # We need to be in a BUILDING state before we can abort.
-        self.triggerGoodBuild(slave)
-        result = slave.abort()
-        self.assertEqual(result, BuilderStatus.ABORTING)
+        d = self.slave_helper.triggerGoodBuild(slave)
+        d.addCallback(lambda ignored: slave.abort())
+        d.addCallback(self.assertEqual, BuilderStatus.ABORTING)
+        return d
 
     def test_build(self):
         # Calling 'build' with an expected builder type, a good build id,
         # valid chroot & filemaps works and returns a BuilderStatus of
         # BUILDING.
         build_id = 'some-id'
-        slave = self.getClientSlave()
-        result = self.triggerGoodBuild(slave, build_id)
-        self.assertEqual([BuilderStatus.BUILDING, build_id], result)
+        slave = self.slave_helper.getClientSlave()
+        d = self.slave_helper.triggerGoodBuild(slave, build_id)
+        return d.addCallback(
+            self.assertEqual, [BuilderStatus.BUILDING, build_id])
 
     def test_clean(self):
-        slave = self.getClientSlave()
+        slave = self.slave_helper.getClientSlave()
         # XXX: JonathanLange 2010-09-21: Calling clean() on the slave requires
         # it to be in either the WAITING or ABORTED states, and both of these
         # states are very difficult to achieve in a test environment. For the
@@ -574,57 +811,248 @@
     def test_echo(self):
         # Calling 'echo' contacts the server which returns the arguments we
         # gave it.
-        self.getServerSlave()
-        slave = self.getClientSlave()
-        result = slave.echo('foo', 'bar', 42)
-        self.assertEqual(['foo', 'bar', 42], result)
+        self.slave_helper.getServerSlave()
+        slave = self.slave_helper.getClientSlave()
+        d = slave.echo('foo', 'bar', 42)
+        return d.addCallback(self.assertEqual, ['foo', 'bar', 42])
 
     def test_info(self):
         # Calling 'info' gets some information about the slave.
-        self.getServerSlave()
-        slave = self.getClientSlave()
-        result = slave.info()
+        self.slave_helper.getServerSlave()
+        slave = self.slave_helper.getClientSlave()
+        d = slave.info()
         # We're testing the hard-coded values, since the version is hard-coded
         # into the remote slave, the supported build managers are hard-coded
         # into the tac file for the remote slave and config is returned from
         # the configuration file.
-        self.assertEqual(
+        return d.addCallback(
+            self.assertEqual,
             ['1.0',
              'i386',
              ['sourcepackagerecipe',
-              'translation-templates', 'binarypackage', 'debian']],
-            result)
+              'translation-templates', 'binarypackage', 'debian']])
 
     def test_initial_status(self):
         # Calling 'status' returns the current status of the slave. The
         # initial status is IDLE.
-        self.getServerSlave()
-        slave = self.getClientSlave()
-        status = slave.status()
-        self.assertEqual([BuilderStatus.IDLE, ''], status)
+        self.slave_helper.getServerSlave()
+        slave = self.slave_helper.getClientSlave()
+        d = slave.status()
+        return d.addCallback(self.assertEqual, [BuilderStatus.IDLE, ''])
 
     def test_status_after_build(self):
         # Calling 'status' returns the current status of the slave. After a
         # build has been triggered, the status is BUILDING.
-        slave = self.getClientSlave()
+        slave = self.slave_helper.getClientSlave()
         build_id = 'status-build-id'
-        self.triggerGoodBuild(slave, build_id)
-        status = slave.status()
-        self.assertEqual([BuilderStatus.BUILDING, build_id], status[:2])
-        [log_file] = status[2:]
-        self.assertIsInstance(log_file, xmlrpclib.Binary)
+        d = self.slave_helper.triggerGoodBuild(slave, build_id)
+        d.addCallback(lambda ignored: slave.status())
+        def check_status(status):
+            self.assertEqual([BuilderStatus.BUILDING, build_id], status[:2])
+            [log_file] = status[2:]
+            self.assertIsInstance(log_file, xmlrpclib.Binary)
+        return d.addCallback(check_status)
 
     def test_ensurepresent_not_there(self):
         # ensurepresent checks to see if a file is there.
-        self.getServerSlave()
-        slave = self.getClientSlave()
-        result = slave.ensurepresent('blahblah', None, None, None)
-        self.assertEqual([False, 'No URL'], result)
+        self.slave_helper.getServerSlave()
+        slave = self.slave_helper.getClientSlave()
+        d = slave.ensurepresent('blahblah', None, None, None)
+        d.addCallback(self.assertEqual, [False, 'No URL'])
+        return d
 
     def test_ensurepresent_actually_there(self):
         # ensurepresent checks to see if a file is there.
-        tachandler = self.getServerSlave()
-        slave = self.getClientSlave()
-        self.makeCacheFile(tachandler, 'blahblah')
-        result = slave.ensurepresent('blahblah', None, None, None)
-        self.assertEqual([True, 'No URL'], result)
+        tachandler = self.slave_helper.getServerSlave()
+        slave = self.slave_helper.getClientSlave()
+        self.slave_helper.makeCacheFile(tachandler, 'blahblah')
+        d = slave.ensurepresent('blahblah', None, None, None)
+        d.addCallback(self.assertEqual, [True, 'No URL'])
+        return d
+
+    def test_sendFileToSlave_not_there(self):
+        self.slave_helper.getServerSlave()
+        slave = self.slave_helper.getClientSlave()
+        d = slave.sendFileToSlave('blahblah', None, None, None)
+        return self.assertFailure(d, CannotFetchFile)
+
+    def test_sendFileToSlave_actually_there(self):
+        tachandler = self.slave_helper.getServerSlave()
+        slave = self.slave_helper.getClientSlave()
+        self.slave_helper.makeCacheFile(tachandler, 'blahblah')
+        d = slave.sendFileToSlave('blahblah', None, None, None)
+        def check_present(ignored):
+            d = slave.ensurepresent('blahblah', None, None, None)
+            return d.addCallback(self.assertEqual, [True, 'No URL'])
+        d.addCallback(check_present)
+        return d
+
+    def test_resumeHost_success(self):
+        # On a successful resume resume() fires the returned deferred
+        # callback with 'None'.
+        self.slave_helper.getServerSlave()
+        slave = self.slave_helper.getClientSlave()
+
+        # The configuration testing command-line.
+        self.assertEqual(
+            'echo %(vm_host)s', config.builddmaster.vm_resume_command)
+
+        # On success the response is None.
+        def check_resume_success(response):
+            out, err, code = response
+            self.assertEqual(os.EX_OK, code)
+            # XXX: JonathanLange 2010-09-23: We should instead pass the
+            # expected vm_host into the client slave. Not doing this now,
+            # since the SlaveHelper is being moved around.
+            self.assertEqual("%s\n" % slave._vm_host, out)
+        d = slave.resume()
+        d.addBoth(check_resume_success)
+        return d
+
+    def test_resumeHost_failure(self):
+        # On a failed resume, 'resumeHost' fires the returned deferred
+        # errorback with the `ProcessTerminated` failure.
+        self.slave_helper.getServerSlave()
+        slave = self.slave_helper.getClientSlave()
+
+        # Override the configuration command-line with one that will fail.
+        failed_config = """
+        [builddmaster]
+        vm_resume_command: test "%(vm_host)s = 'no-sir'"
+        """
+        config.push('failed_resume_command', failed_config)
+        self.addCleanup(config.pop, 'failed_resume_command')
+
+        # On failures, the response is a twisted `Failure` object containing
+        # a tuple.
+        def check_resume_failure(failure):
+            out, err, code = failure.value
+            # The process will exit with a return code of "1".
+            self.assertEqual(code, 1)
+        d = slave.resume()
+        d.addBoth(check_resume_failure)
+        return d
+
+    def test_resumeHost_timeout(self):
+        # On a resume timeouts, 'resumeHost' fires the returned deferred
+        # errorback with the `TimeoutError` failure.
+        self.slave_helper.getServerSlave()
+        slave = self.slave_helper.getClientSlave()
+
+        # Override the configuration command-line with one that will timeout.
+        timeout_config = """
+        [builddmaster]
+        vm_resume_command: sleep 5
+        socket_timeout: 1
+        """
+        config.push('timeout_resume_command', timeout_config)
+        self.addCleanup(config.pop, 'timeout_resume_command')
+
+        # On timeouts, the response is a twisted `Failure` object containing
+        # a `TimeoutError` error.
+        def check_resume_timeout(failure):
+            self.assertIsInstance(failure, Failure)
+            out, err, code = failure.value
+            self.assertEqual(code, signal.SIGKILL)
+        clock = Clock()
+        d = slave.resume(clock=clock)
+        # Move the clock beyond the socket_timeout but earlier than the
+        # sleep 5.  This stops the test having to wait for the timeout.
+        # Fast tests FTW!
+        clock.advance(2)
+        d.addBoth(check_resume_timeout)
+        return d
+
+
+class TestSlaveTimeouts(TrialTestCase):
+    # Testing that the methods that call callRemote() all time out
+    # as required.
+
+    layer = TwistedLayer
+
+    def setUp(self):
+        super(TestSlaveTimeouts, self).setUp()
+        self.slave_helper = SlaveTestHelpers()
+        self.slave_helper.setUp()
+        self.addCleanup(self.slave_helper.cleanUp)
+        self.clock = Clock()
+        self.proxy = DeadProxy("url")
+        self.slave = self.slave_helper.getClientSlave(
+            reactor=self.clock, proxy=self.proxy)
+
+    def assertCancelled(self, d):
+        self.clock.advance(config.builddmaster.socket_timeout + 1)
+        return self.assertFailure(d, CancelledError)
+
+    def test_timeout_abort(self):
+        return self.assertCancelled(self.slave.abort())
+
+    def test_timeout_clean(self):
+        return self.assertCancelled(self.slave.clean())
+
+    def test_timeout_echo(self):
+        return self.assertCancelled(self.slave.echo())
+
+    def test_timeout_info(self):
+        return self.assertCancelled(self.slave.info())
+
+    def test_timeout_status(self):
+        return self.assertCancelled(self.slave.status())
+
+    def test_timeout_ensurepresent(self):
+        return self.assertCancelled(
+            self.slave.ensurepresent(None, None, None, None))
+
+    def test_timeout_build(self):
+        return self.assertCancelled(
+            self.slave.build(None, None, None, None, None))
+
+
+class TestSlaveWithLibrarian(TrialTestCase):
+    """Tests that need more of Launchpad to run."""
+
+    layer = TwistedLaunchpadZopelessLayer
+
+    def setUp(self):
+        super(TestSlaveWithLibrarian, self)
+        self.slave_helper = SlaveTestHelpers()
+        self.slave_helper.setUp()
+        self.addCleanup(self.slave_helper.cleanUp)
+        self.factory = LaunchpadObjectFactory()
+        login_as(ANONYMOUS)
+        self.addCleanup(logout)
+
+    def test_ensurepresent_librarian(self):
+        # ensurepresent, when given an http URL for a file will download the
+        # file from that URL and report that the file is present, and it was
+        # downloaded.
+
+        # Use the Librarian because it's a "convenient" web server.
+        lf = self.factory.makeLibraryFileAlias(
+            'HelloWorld.txt', content="Hello World")
+        self.layer.txn.commit()
+        self.slave_helper.getServerSlave()
+        slave = self.slave_helper.getClientSlave()
+        d = slave.ensurepresent(
+            lf.content.sha1, lf.http_url, "", "")
+        d.addCallback(self.assertEqual, [True, 'Download'])
+        return d
+
+    def test_retrieve_files_from_filecache(self):
+        # Files that are present on the slave can be downloaded with a
+        # filename made from the sha1 of the content underneath the
+        # 'filecache' directory.
+        content = "Hello World"
+        lf = self.factory.makeLibraryFileAlias(
+            'HelloWorld.txt', content=content)
+        self.layer.txn.commit()
+        expected_url = '%s/filecache/%s' % (
+            self.slave_helper.BASE_URL, lf.content.sha1)
+        self.slave_helper.getServerSlave()
+        slave = self.slave_helper.getClientSlave()
+        d = slave.ensurepresent(
+            lf.content.sha1, lf.http_url, "", "")
+        def check_file(ignored):
+            d = getPage(expected_url.encode('utf8'))
+            return d.addCallback(self.assertEqual, content)
+        return d.addCallback(check_file)

=== modified file 'lib/lp/buildmaster/tests/test_manager.py'
--- lib/lp/buildmaster/tests/test_manager.py	2010-09-28 11:05:14 +0000
+++ lib/lp/buildmaster/tests/test_manager.py	2010-10-27 02:13:03 +0000
@@ -6,6 +6,7 @@
 import os
 import signal
 import time
+import xmlrpclib
 
 import transaction
 
@@ -14,9 +15,7 @@
     reactor,
     task,
     )
-from twisted.internet.error import ConnectionClosed
 from twisted.internet.task import (
-    Clock,
     deferLater,
     )
 from twisted.python.failure import Failure
@@ -30,577 +29,45 @@
     ANONYMOUS,
     login,
     )
-from canonical.launchpad.scripts.logger import BufferLogger
+from canonical.launchpad.scripts.logger import (
+    QuietFakeLogger,
+    )
 from canonical.testing.layers import (
     LaunchpadScriptLayer,
-    LaunchpadZopelessLayer,
+    TwistedLaunchpadZopelessLayer,
     TwistedLayer,
+    ZopelessDatabaseLayer,
     )
 from lp.buildmaster.enums import BuildStatus
 from lp.buildmaster.interfaces.builder import IBuilderSet
 from lp.buildmaster.interfaces.buildqueue import IBuildQueueSet
 from lp.buildmaster.manager import (
-    BaseDispatchResult,
-    buildd_success_result_map,
+    assessFailureCounts,
     BuilddManager,
-    FailDispatchResult,
     NewBuildersScanner,
-    RecordingSlave,
-    ResetDispatchResult,
     SlaveScanner,
     )
+from lp.buildmaster.model.builder import Builder
 from lp.buildmaster.tests.harness import BuilddManagerTestSetup
-from lp.buildmaster.tests.mock_slaves import BuildingSlave
+from lp.buildmaster.tests.mock_slaves import (
+    BrokenSlave,
+    BuildingSlave,
+    OkSlave,
+    )
 from lp.registry.interfaces.distribution import IDistributionSet
 from lp.soyuz.interfaces.binarypackagebuild import IBinaryPackageBuildSet
-from lp.soyuz.tests.test_publishing import SoyuzTestPublisher
-from lp.testing import TestCase as LaunchpadTestCase
+from lp.testing import TestCaseWithFactory
 from lp.testing.factory import LaunchpadObjectFactory
 from lp.testing.fakemethod import FakeMethod
 from lp.testing.sampledata import BOB_THE_BUILDER_NAME
 
 
-class TestRecordingSlaves(TrialTestCase):
-    """Tests for the recording slave class."""
-    layer = TwistedLayer
-
-    def setUp(self):
-        """Setup a fresh `RecordingSlave` for tests."""
-        TrialTestCase.setUp(self)
-        self.slave = RecordingSlave(
-            'foo', 'http://foo:8221/rpc', 'foo.host')
-
-    def test_representation(self):
-        """`RecordingSlave` has a custom representation.
-
-        It encloses builder name and xmlrpc url for debug purposes.
-        """
-        self.assertEqual('<foo:http://foo:8221/rpc>', repr(self.slave))
-
-    def assert_ensurepresent(self, func):
-        """Helper function to test results from calling ensurepresent."""
-        self.assertEqual(
-            [True, 'Download'],
-            func('boing', 'bar', 'baz'))
-        self.assertEqual(
-            [('ensurepresent', ('boing', 'bar', 'baz'))],
-            self.slave.calls)
-
-    def test_ensurepresent(self):
-        """`RecordingSlave.ensurepresent` always succeeds.
-
-        It returns the expected succeed code and records the interaction
-        information for later use.
-        """
-        self.assert_ensurepresent(self.slave.ensurepresent)
-
-    def test_sendFileToSlave(self):
-        """RecordingSlave.sendFileToSlave always succeeeds.
-
-        It calls ensurepresent() and hence returns the same results.
-        """
-        self.assert_ensurepresent(self.slave.sendFileToSlave)
-
-    def test_build(self):
-        """`RecordingSlave.build` always succeeds.
-
-        It returns the expected succeed code and records the interaction
-        information for later use.
-        """
-        self.assertEqual(
-            ['BuilderStatus.BUILDING', 'boing'],
-            self.slave.build('boing', 'bar', 'baz'))
-        self.assertEqual(
-            [('build', ('boing', 'bar', 'baz'))],
-            self.slave.calls)
-
-    def test_resume(self):
-        """`RecordingSlave.resume` always returns successs."""
-        # Resume isn't requested in a just-instantiated RecordingSlave.
-        self.assertFalse(self.slave.resume_requested)
-
-        # When resume is called, it returns the success list and mark
-        # the slave for resuming.
-        self.assertEqual(['', '', os.EX_OK], self.slave.resume())
-        self.assertTrue(self.slave.resume_requested)
-
-    def test_resumeHost_success(self):
-        # On a successful resume resumeHost() fires the returned deferred
-        # callback with 'None'.
-
-        # The configuration testing command-line.
-        self.assertEqual(
-            'echo %(vm_host)s', config.builddmaster.vm_resume_command)
-
-        # On success the response is None.
-        def check_resume_success(response):
-            out, err, code = response
-            self.assertEqual(os.EX_OK, code)
-            self.assertEqual("%s\n" % self.slave.vm_host, out)
-        d = self.slave.resumeSlave()
-        d.addBoth(check_resume_success)
-        return d
-
-    def test_resumeHost_failure(self):
-        # On a failed resume, 'resumeHost' fires the returned deferred
-        # errorback with the `ProcessTerminated` failure.
-
-        # Override the configuration command-line with one that will fail.
-        failed_config = """
-        [builddmaster]
-        vm_resume_command: test "%(vm_host)s = 'no-sir'"
-        """
-        config.push('failed_resume_command', failed_config)
-        self.addCleanup(config.pop, 'failed_resume_command')
-
-        # On failures, the response is a twisted `Failure` object containing
-        # a tuple.
-        def check_resume_failure(failure):
-            out, err, code = failure.value
-            # The process will exit with a return code of "1".
-            self.assertEqual(code, 1)
-        d = self.slave.resumeSlave()
-        d.addBoth(check_resume_failure)
-        return d
-
-    def test_resumeHost_timeout(self):
-        # On a resume timeouts, 'resumeHost' fires the returned deferred
-        # errorback with the `TimeoutError` failure.
-
-        # Override the configuration command-line with one that will timeout.
-        timeout_config = """
-        [builddmaster]
-        vm_resume_command: sleep 5
-        socket_timeout: 1
-        """
-        config.push('timeout_resume_command', timeout_config)
-        self.addCleanup(config.pop, 'timeout_resume_command')
-
-        # On timeouts, the response is a twisted `Failure` object containing
-        # a `TimeoutError` error.
-        def check_resume_timeout(failure):
-            self.assertIsInstance(failure, Failure)
-            out, err, code = failure.value
-            self.assertEqual(code, signal.SIGKILL)
-        clock = Clock()
-        d = self.slave.resumeSlave(clock=clock)
-        # Move the clock beyond the socket_timeout but earlier than the
-        # sleep 5.  This stops the test having to wait for the timeout.
-        # Fast tests FTW!
-        clock.advance(2)
-        d.addBoth(check_resume_timeout)
-        return d
-
-
-class TestingXMLRPCProxy:
-    """This class mimics a twisted XMLRPC Proxy class."""
-
-    def __init__(self, failure_info=None):
-        self.calls = []
-        self.failure_info = failure_info
-        self.works = failure_info is None
-
-    def callRemote(self, *args):
-        self.calls.append(args)
-        if self.works:
-            result = buildd_success_result_map.get(args[0])
-        else:
-            result = 'boing'
-        return defer.succeed([result, self.failure_info])
-
-
-class TestingResetDispatchResult(ResetDispatchResult):
-    """Override the evaluation method to simply annotate the call."""
-
-    def __init__(self, slave, info=None):
-        ResetDispatchResult.__init__(self, slave, info)
-        self.processed = False
-
-    def __call__(self):
-        self.processed = True
-
-
-class TestingFailDispatchResult(FailDispatchResult):
-    """Override the evaluation method to simply annotate the call."""
-
-    def __init__(self, slave, info=None):
-        FailDispatchResult.__init__(self, slave, info)
-        self.processed = False
-
-    def __call__(self):
-        self.processed = True
-
-
-class TestingSlaveScanner(SlaveScanner):
-    """Override the dispatch result factories """
-
-    reset_result = TestingResetDispatchResult
-    fail_result = TestingFailDispatchResult
-
-
-class TestSlaveScanner(TrialTestCase):
-    """Tests for the actual build slave manager."""
-    layer = LaunchpadZopelessLayer
-
-    def setUp(self):
-        TrialTestCase.setUp(self)
-        self.manager = TestingSlaveScanner(
-            BOB_THE_BUILDER_NAME, BufferLogger())
-
-        self.fake_builder_url = 'http://bob.buildd:8221/'
-        self.fake_builder_host = 'bob.host'
-
-        # We will use an instrumented SlaveScanner instance for tests in
-        # this context.
-
-        # Stop cyclic execution and record the end of the cycle.
-        self.stopped = False
-
-        def testNextCycle():
-            self.stopped = True
-
-        self.manager.scheduleNextScanCycle = testNextCycle
-
-        # Return the testing Proxy version.
-        self.test_proxy = TestingXMLRPCProxy()
-
-        def testGetProxyForSlave(slave):
-            return self.test_proxy
-        self.manager._getProxyForSlave = testGetProxyForSlave
-
-        # Deactivate the 'scan' method.
-        def testScan():
-            pass
-        self.manager.scan = testScan
-
-        # Stop automatic collection of dispatching results.
-        def testslaveConversationEnded():
-            pass
-        self._realslaveConversationEnded = self.manager.slaveConversationEnded
-        self.manager.slaveConversationEnded = testslaveConversationEnded
-
-    def assertIsDispatchReset(self, result):
-        self.assertTrue(
-            isinstance(result, TestingResetDispatchResult),
-            'Dispatch failure did not result in a ResetBuildResult object')
-
-    def assertIsDispatchFail(self, result):
-        self.assertTrue(
-            isinstance(result, TestingFailDispatchResult),
-            'Dispatch failure did not result in a FailBuildResult object')
-
-    def test_checkResume(self):
-        """`SlaveScanner.checkResume` is chained after resume requests.
-
-        If the resume request succeed it returns None, otherwise it returns
-        a `ResetBuildResult` (the one in the test context) that will be
-        collect and evaluated later.
-
-        See `RecordingSlave.resumeHost` for more information about the resume
-        result contents.
-        """
-        slave = RecordingSlave('foo', 'http://foo.buildd:8221/', 'foo.host')
-
-        successful_response = ['', '', os.EX_OK]
-        result = self.manager.checkResume(successful_response, slave)
-        self.assertEqual(
-            None, result, 'Successful resume checks should return None')
-
-        failed_response = ['stdout', 'stderr', 1]
-        result = self.manager.checkResume(failed_response, slave)
-        self.assertIsDispatchReset(result)
-        self.assertEqual(
-            '<foo:http://foo.buildd:8221/> reset failure', repr(result))
-        self.assertEqual(
-            result.info, "stdout\nstderr")
-
-    def test_fail_to_resume_slave_resets_slave(self):
-        # If an attempt to resume and dispatch a slave fails, we reset the
-        # slave by calling self.reset_result(slave)().
-
-        reset_result_calls = []
-
-        class LoggingResetResult(BaseDispatchResult):
-            """A DispatchResult that logs calls to itself.
-
-            This *must* subclass BaseDispatchResult, otherwise finishCycle()
-            won't treat it like a dispatch result.
-            """
-
-            def __init__(self, slave, info=None):
-                self.slave = slave
-
-            def __call__(self):
-                reset_result_calls.append(self.slave)
-
-        # Make a failing slave that is requesting a resume.
-        slave = RecordingSlave('foo', 'http://foo.buildd:8221/', 'foo.host')
-        slave.resume_requested = True
-        slave.resumeSlave = lambda: deferLater(
-            reactor, 0, defer.fail, Failure(('out', 'err', 1)))
-
-        # Make the manager log the reset result calls.
-        self.manager.reset_result = LoggingResetResult
-
-        # We only care about this one slave. Reset the list of manager
-        # deferreds in case setUp did something unexpected.
-        self.manager._deferred_list = []
-
-        # Here, we're patching the slaveConversationEnded method so we can
-        # get an extra callback at the end of it, so we can
-        # verify that the reset_result was really called.
-        def _slaveConversationEnded():
-            d = self._realslaveConversationEnded()
-            return d.addCallback(
-                lambda ignored: self.assertEqual([slave], reset_result_calls))
-        self.manager.slaveConversationEnded = _slaveConversationEnded
-
-        self.manager.resumeAndDispatch(slave)
-
-    def test_failed_to_resume_slave_ready_for_reset(self):
-        # When a slave fails to resume, the manager has a Deferred in its
-        # Deferred list that is ready to fire with a ResetDispatchResult.
-
-        # Make a failing slave that is requesting a resume.
-        slave = RecordingSlave('foo', 'http://foo.buildd:8221/', 'foo.host')
-        slave.resume_requested = True
-        slave.resumeSlave = lambda: defer.fail(Failure(('out', 'err', 1)))
-
-        # We only care about this one slave. Reset the list of manager
-        # deferreds in case setUp did something unexpected.
-        self.manager._deferred_list = []
-        # Restore the slaveConversationEnded method. It's very relevant to
-        # this test.
-        self.manager.slaveConversationEnded = self._realslaveConversationEnded
-        self.manager.resumeAndDispatch(slave)
-        [d] = self.manager._deferred_list
-
-        # The Deferred for our failing slave should be ready to fire
-        # successfully with a ResetDispatchResult.
-        def check_result(result):
-            self.assertIsInstance(result, ResetDispatchResult)
-            self.assertEqual(slave, result.slave)
-            self.assertFalse(result.processed)
-        return d.addCallback(check_result)
-
-    def _setUpSlaveAndBuilder(self, builder_failure_count=None,
-                              job_failure_count=None):
-        # Helper function to set up a builder and its recording slave.
-        if builder_failure_count is None:
-            builder_failure_count = 0
-        if job_failure_count is None:
-            job_failure_count = 0
-        slave = RecordingSlave(
-            BOB_THE_BUILDER_NAME, self.fake_builder_url,
-            self.fake_builder_host)
-        bob_builder = getUtility(IBuilderSet)[slave.name]
-        bob_builder.failure_count = builder_failure_count
-        bob_builder.getCurrentBuildFarmJob().failure_count = job_failure_count
-        return slave, bob_builder
-
-    def test_checkDispatch_success(self):
-        # SlaveScanner.checkDispatch returns None for a successful
-        # dispatch.
-
-        """
-        If the dispatch request fails or a unknown method is given, it
-        returns a `FailDispatchResult` (in the test context) that will
-        be evaluated later.
-
-        Builders will be marked as failed if the following responses
-        categories are received.
-
-         * Legitimate slave failures: when the response is a list with 2
-           elements but the first element ('status') does not correspond to
-           the expected 'success' result. See `buildd_success_result_map`.
-
-         * Unexpected (code) failures: when the given 'method' is unknown
-           or the response isn't a 2-element list or Failure instance.
-
-        Communication failures (a twisted `Failure` instance) will simply
-        cause the builder to be reset, a `ResetDispatchResult` object is
-        returned. In other words, network failures are ignored in this
-        stage, broken builders will be identified and marked as so
-        during 'scan()' stage.
-
-        On success dispatching it returns None.
-        """
-        slave, bob_builder = self._setUpSlaveAndBuilder(
-            builder_failure_count=0, job_failure_count=0)
-
-        # Successful legitimate response, None is returned.
-        successful_response = [
-            buildd_success_result_map.get('ensurepresent'), 'cool builder']
-        result = self.manager.checkDispatch(
-            successful_response, 'ensurepresent', slave)
-        self.assertEqual(
-            None, result, 'Successful dispatch checks should return None')
-
-    def test_checkDispatch_first_fail(self):
-        # Failed legitimate response, results in FailDispatchResult and
-        # failure_count on the job and the builder are both incremented.
-        slave, bob_builder = self._setUpSlaveAndBuilder(
-            builder_failure_count=0, job_failure_count=0)
-
-        failed_response = [False, 'uncool builder']
-        result = self.manager.checkDispatch(
-            failed_response, 'ensurepresent', slave)
-        self.assertIsDispatchFail(result)
-        self.assertEqual(
-            repr(result),
-            '<bob:%s> failure (uncool builder)' % self.fake_builder_url)
-        self.assertEqual(1, bob_builder.failure_count)
-        self.assertEqual(
-            1, bob_builder.getCurrentBuildFarmJob().failure_count)
-
-    def test_checkDispatch_second_reset_fail_by_builder(self):
-        # Twisted Failure response, results in a `FailDispatchResult`.
-        slave, bob_builder = self._setUpSlaveAndBuilder(
-            builder_failure_count=1, job_failure_count=0)
-
-        twisted_failure = Failure(ConnectionClosed('Boom!'))
-        result = self.manager.checkDispatch(
-            twisted_failure, 'ensurepresent', slave)
-        self.assertIsDispatchFail(result)
-        self.assertEqual(
-            '<bob:%s> failure (None)' % self.fake_builder_url, repr(result))
-        self.assertEqual(2, bob_builder.failure_count)
-        self.assertEqual(
-            1, bob_builder.getCurrentBuildFarmJob().failure_count)
-
-    def test_checkDispatch_second_comms_fail_by_builder(self):
-        # Unexpected response, results in a `FailDispatchResult`.
-        slave, bob_builder = self._setUpSlaveAndBuilder(
-            builder_failure_count=1, job_failure_count=0)
-
-        unexpected_response = [1, 2, 3]
-        result = self.manager.checkDispatch(
-            unexpected_response, 'build', slave)
-        self.assertIsDispatchFail(result)
-        self.assertEqual(
-            '<bob:%s> failure '
-            '(Unexpected response: [1, 2, 3])' % self.fake_builder_url,
-            repr(result))
-        self.assertEqual(2, bob_builder.failure_count)
-        self.assertEqual(
-            1, bob_builder.getCurrentBuildFarmJob().failure_count)
-
-    def test_checkDispatch_second_comms_fail_by_job(self):
-        # Unknown method was given, results in a `FailDispatchResult`.
-        # This could be caused by a faulty job which would fail the job.
-        slave, bob_builder = self._setUpSlaveAndBuilder(
-            builder_failure_count=0, job_failure_count=1)
-
-        successful_response = [
-            buildd_success_result_map.get('ensurepresent'), 'cool builder']
-        result = self.manager.checkDispatch(
-            successful_response, 'unknown-method', slave)
-        self.assertIsDispatchFail(result)
-        self.assertEqual(
-            '<bob:%s> failure '
-            '(Unknown slave method: unknown-method)' % self.fake_builder_url,
-            repr(result))
-        self.assertEqual(1, bob_builder.failure_count)
-        self.assertEqual(
-            2, bob_builder.getCurrentBuildFarmJob().failure_count)
-
-    def test_initiateDispatch(self):
-        """Check `dispatchBuild` in various scenarios.
-
-        When there are no recording slaves (i.e. no build got dispatched
-        in scan()) it simply finishes the cycle.
-
-        When there is a recording slave with pending slave calls, they are
-        performed and if they all succeed the cycle is finished with no
-        errors.
-
-        On slave call failure the chain is stopped immediately and an
-        FailDispatchResult is collected while finishing the cycle.
-        """
-        def check_no_events(results):
-            errors = [
-                r for s, r in results if isinstance(r, BaseDispatchResult)]
-            self.assertEqual(0, len(errors))
-
-        def check_events(results):
-            [error] = [r for s, r in results if r is not None]
-            self.assertEqual(
-                '<bob:%s> failure (very broken slave)'
-                    % self.fake_builder_url,
-                repr(error))
-            self.assertTrue(error.processed)
-
-        def _wait_on_deferreds_then_check_no_events():
-            dl = self._realslaveConversationEnded()
-            dl.addCallback(check_no_events)
-
-        def _wait_on_deferreds_then_check_events():
-            dl = self._realslaveConversationEnded()
-            dl.addCallback(check_events)
-
-        # A functional slave charged with some interactions.
-        slave = RecordingSlave(
-            BOB_THE_BUILDER_NAME, self.fake_builder_url,
-            self.fake_builder_host)
-        slave.ensurepresent('arg1', 'arg2', 'arg3')
-        slave.build('arg1', 'arg2', 'arg3')
-
-        # If the previous step (resuming) has failed nothing gets dispatched.
-        reset_result = ResetDispatchResult(slave)
-        result = self.manager.initiateDispatch(reset_result, slave)
-        self.assertTrue(result is reset_result)
-        self.assertFalse(slave.resume_requested)
-        self.assertEqual(0, len(self.manager._deferred_list))
-
-        # Operation with the default (funcional slave), no resets or
-        # failures results are triggered.
-        slave.resume()
-        result = self.manager.initiateDispatch(None, slave)
-        self.assertEqual(None, result)
-        self.assertTrue(slave.resume_requested)
-        self.assertEqual(
-            [('ensurepresent', 'arg1', 'arg2', 'arg3'),
-             ('build', 'arg1', 'arg2', 'arg3')],
-            self.test_proxy.calls)
-        self.assertEqual(2, len(self.manager._deferred_list))
-
-        # Monkey patch the slaveConversationEnded method so we can chain a
-        # callback to check the end of the result chain.
-        self.manager.slaveConversationEnded = \
-            _wait_on_deferreds_then_check_no_events
-        events = self.manager.slaveConversationEnded()
-
-        # Create a broken slave and insert interaction that will
-        # cause the builder to be marked as fail.
-        self.test_proxy = TestingXMLRPCProxy('very broken slave')
-        slave = RecordingSlave(
-            BOB_THE_BUILDER_NAME, self.fake_builder_url,
-            self.fake_builder_host)
-        slave.ensurepresent('arg1', 'arg2', 'arg3')
-        slave.build('arg1', 'arg2', 'arg3')
-
-        result = self.manager.initiateDispatch(None, slave)
-        self.assertEqual(None, result)
-        self.assertEqual(3, len(self.manager._deferred_list))
-        self.assertEqual(
-            [('ensurepresent', 'arg1', 'arg2', 'arg3')],
-            self.test_proxy.calls)
-
-        # Monkey patch the slaveConversationEnded method so we can chain a
-        # callback to check the end of the result chain.
-        self.manager.slaveConversationEnded = \
-            _wait_on_deferreds_then_check_events
-        events = self.manager.slaveConversationEnded()
-
-        return events
-
-
 class TestSlaveScannerScan(TrialTestCase):
     """Tests `SlaveScanner.scan` method.
 
     This method uses the old framework for scanning and dispatching builds.
     """
-    layer = LaunchpadZopelessLayer
+    layer = TwistedLaunchpadZopelessLayer
 
     def setUp(self):
         """Setup TwistedLayer, TrialTestCase and BuilddSlaveTest.
@@ -608,19 +75,18 @@
         Also adjust the sampledata in a way a build can be dispatched to
         'bob' builder.
         """
+        from lp.soyuz.tests.test_publishing import SoyuzTestPublisher
         TwistedLayer.testSetUp()
         TrialTestCase.setUp(self)
         self.slave = BuilddSlaveTestSetup()
         self.slave.setUp()
 
         # Creating the required chroots needed for dispatching.
-        login('foo.bar@xxxxxxxxxxxxx')
         test_publisher = SoyuzTestPublisher()
         ubuntu = getUtility(IDistributionSet).getByName('ubuntu')
         hoary = ubuntu.getSeries('hoary')
         test_publisher.setUpDefaultDistroSeries(hoary)
         test_publisher.addFakeChroots()
-        login(ANONYMOUS)
 
     def tearDown(self):
         self.slave.tearDown()
@@ -628,8 +94,7 @@
         TwistedLayer.testTearDown()
 
     def _resetBuilder(self, builder):
-        """Reset the given builder and it's job."""
-        login('foo.bar@xxxxxxxxxxxxx')
+        """Reset the given builder and its job."""
 
         builder.builderok = True
         job = builder.currentjob
@@ -637,7 +102,6 @@
             job.reset()
 
         transaction.commit()
-        login(ANONYMOUS)
 
     def assertBuildingJob(self, job, builder, logtail=None):
         """Assert the given job is building on the given builder."""
@@ -653,55 +117,25 @@
         self.assertEqual(build.status, BuildStatus.BUILDING)
         self.assertEqual(job.logtail, logtail)
 
-    def _getManager(self):
+    def _getScanner(self, builder_name=None):
         """Instantiate a SlaveScanner object.
 
         Replace its default logging handler by a testing version.
         """
-        manager = SlaveScanner(BOB_THE_BUILDER_NAME, BufferLogger())
-        manager.logger.name = 'slave-scanner'
+        if builder_name is None:
+            builder_name = BOB_THE_BUILDER_NAME
+        scanner = SlaveScanner(builder_name, QuietFakeLogger())
+        scanner.logger.name = 'slave-scanner'
 
-        return manager
+        return scanner
 
     def _checkDispatch(self, slave, builder):
-        """`SlaveScanner.scan` returns a `RecordingSlave`.
-
-        The single slave returned should match the given builder and
-        contain interactions that should be performed asynchronously for
-        properly dispatching the sampledata job.
-        """
-        self.assertFalse(
-            slave is None, "Unexpected recording_slaves.")
-
-        self.assertEqual(slave.name, builder.name)
-        self.assertEqual(slave.url, builder.url)
-        self.assertEqual(slave.vm_host, builder.vm_host)
+        # SlaveScanner.scan returns a slave when a dispatch was
+        # successful.  We also check that the builder has a job on it.
+
+        self.assertTrue(slave is not None, "Expected a slave.")
         self.assertEqual(0, builder.failure_count)
-
-        self.assertEqual(
-            [('ensurepresent',
-              ('0feca720e2c29dafb2c900713ba560e03b758711',
-               'http://localhost:58000/93/fake_chroot.tar.gz',
-               '', '')),
-             ('ensurepresent',
-              ('4e3961baf4f56fdbc95d0dd47f3c5bc275da8a33',
-               'http://localhost:58000/43/alsa-utils_1.0.9a-4ubuntu1.dsc',
-               '', '')),
-             ('build',
-              ('6358a89e2215e19b02bf91e2e4d009640fae5cf8',
-               'binarypackage', '0feca720e2c29dafb2c900713ba560e03b758711',
-               {'alsa-utils_1.0.9a-4ubuntu1.dsc':
-                '4e3961baf4f56fdbc95d0dd47f3c5bc275da8a33'},
-               {'arch_indep': True,
-                'arch_tag': 'i386',
-                'archive_private': False,
-                'archive_purpose': 'PRIMARY',
-                'archives':
-                ['deb http://ftpmaster.internal/ubuntu hoary main'],
-                'build_debug_symbols': False,
-                'ogrecomponent': 'main',
-                'suite': u'hoary'}))],
-            slave.calls, "Job was not properly dispatched.")
+        self.assertTrue(builder.currentjob is not None)
 
     def testScanDispatchForResetBuilder(self):
         # A job gets dispatched to the sampledata builder after it's reset.
@@ -709,26 +143,27 @@
         # Reset sampledata builder.
         builder = getUtility(IBuilderSet)[BOB_THE_BUILDER_NAME]
         self._resetBuilder(builder)
+        builder.setSlaveForTesting(OkSlave())
         # Set this to 1 here so that _checkDispatch can make sure it's
         # reset to 0 after a successful dispatch.
         builder.failure_count = 1
 
         # Run 'scan' and check its result.
-        LaunchpadZopelessLayer.switchDbUser(config.builddmaster.dbuser)
-        manager = self._getManager()
-        d = defer.maybeDeferred(manager.scan)
+        self.layer.txn.commit()
+        self.layer.switchDbUser(config.builddmaster.dbuser)
+        scanner = self._getScanner()
+        d = defer.maybeDeferred(scanner.scan)
         d.addCallback(self._checkDispatch, builder)
         return d
 
-    def _checkNoDispatch(self, recording_slave, builder):
+    def _checkNoDispatch(self, slave, builder):
         """Assert that no dispatch has occurred.
 
-        'recording_slave' is None, so no interations would be passed
+        'slave' is None, so no interations would be passed
         to the asynchonous dispatcher and the builder remained active
         and IDLE.
         """
-        self.assertTrue(
-            recording_slave is None, "Unexpected recording_slave.")
+        self.assertTrue(slave is None, "Unexpected slave.")
 
         builder = getUtility(IBuilderSet).get(builder.id)
         self.assertTrue(builder.builderok)
@@ -753,9 +188,9 @@
         login(ANONYMOUS)
 
         # Run 'scan' and check its result.
-        LaunchpadZopelessLayer.switchDbUser(config.builddmaster.dbuser)
-        manager = self._getManager()
-        d = defer.maybeDeferred(manager.scan)
+        self.layer.switchDbUser(config.builddmaster.dbuser)
+        scanner = self._getScanner()
+        d = defer.maybeDeferred(scanner.singleCycle)
         d.addCallback(self._checkNoDispatch, builder)
         return d
 
@@ -793,9 +228,9 @@
         login(ANONYMOUS)
 
         # Run 'scan' and check its result.
-        LaunchpadZopelessLayer.switchDbUser(config.builddmaster.dbuser)
-        manager = self._getManager()
-        d = defer.maybeDeferred(manager.scan)
+        self.layer.switchDbUser(config.builddmaster.dbuser)
+        scanner = self._getScanner()
+        d = defer.maybeDeferred(scanner.scan)
         d.addCallback(self._checkJobRescued, builder, job)
         return d
 
@@ -814,8 +249,6 @@
         self.assertBuildingJob(job, builder, logtail='This is a build log')
 
     def testScanUpdatesBuildingJobs(self):
-        # The job assigned to a broken builder is rescued.
-
         # Enable sampledata builder attached to an appropriate testing
         # slave. It will respond as if it was building the sampledata job.
         builder = getUtility(IBuilderSet)[BOB_THE_BUILDER_NAME]
@@ -830,188 +263,174 @@
         self.assertBuildingJob(job, builder)
 
         # Run 'scan' and check its result.
-        LaunchpadZopelessLayer.switchDbUser(config.builddmaster.dbuser)
-        manager = self._getManager()
-        d = defer.maybeDeferred(manager.scan)
+        self.layer.switchDbUser(config.builddmaster.dbuser)
+        scanner = self._getScanner()
+        d = defer.maybeDeferred(scanner.scan)
         d.addCallback(self._checkJobUpdated, builder, job)
         return d
 
-    def test_scan_assesses_failure_exceptions(self):
+    def test_scan_with_nothing_to_dispatch(self):
+        factory = LaunchpadObjectFactory()
+        builder = factory.makeBuilder()
+        builder.setSlaveForTesting(OkSlave())
+        scanner = self._getScanner(builder_name=builder.name)
+        d = scanner.scan()
+        return d.addCallback(self._checkNoDispatch, builder)
+
+    def test_scan_with_manual_builder(self):
+        # Reset sampledata builder.
+        builder = getUtility(IBuilderSet)[BOB_THE_BUILDER_NAME]
+        self._resetBuilder(builder)
+        builder.setSlaveForTesting(OkSlave())
+        builder.manual = True
+        scanner = self._getScanner()
+        d = scanner.scan()
+        d.addCallback(self._checkNoDispatch, builder)
+        return d
+
+    def test_scan_with_not_ok_builder(self):
+        # Reset sampledata builder.
+        builder = getUtility(IBuilderSet)[BOB_THE_BUILDER_NAME]
+        self._resetBuilder(builder)
+        builder.setSlaveForTesting(OkSlave())
+        builder.builderok = False
+        scanner = self._getScanner()
+        d = scanner.scan()
+        # Because the builder is not ok, we can't use _checkNoDispatch.
+        d.addCallback(
+            lambda ignored: self.assertIdentical(None, builder.currentjob))
+        return d
+
+    def test_scan_of_broken_slave(self):
+        builder = getUtility(IBuilderSet)[BOB_THE_BUILDER_NAME]
+        self._resetBuilder(builder)
+        builder.setSlaveForTesting(BrokenSlave())
+        builder.failure_count = 0
+        scanner = self._getScanner(builder_name=builder.name)
+        d = scanner.scan()
+        return self.assertFailure(d, xmlrpclib.Fault)
+
+    def _assertFailureCounting(self, builder_count, job_count,
+                               expected_builder_count, expected_job_count):
         # If scan() fails with an exception, failure_counts should be
-        # incremented and tested.
+        # incremented.  What we do with the results of the failure
+        # counts is tested below separately, this test just makes sure that
+        # scan() is setting the counts.
         def failing_scan():
-            raise Exception("fake exception")
-        manager = self._getManager()
-        manager.scan = failing_scan
-        manager.scheduleNextScanCycle = FakeMethod()
+            return defer.fail(Exception("fake exception"))
+        scanner = self._getScanner()
+        scanner.scan = failing_scan
         from lp.buildmaster import manager as manager_module
         self.patch(manager_module, 'assessFailureCounts', FakeMethod())
-        builder = getUtility(IBuilderSet)[manager.builder_name]
-
-        # Failure counts start at zero.
-        self.assertEqual(0, builder.failure_count)
-        self.assertEqual(
-            0, builder.currentjob.specific_job.build.failure_count)
-
-        # startCycle() calls scan() which is our fake one that throws an
+        builder = getUtility(IBuilderSet)[scanner.builder_name]
+
+        builder.failure_count = builder_count
+        builder.currentjob.specific_job.build.failure_count = job_count
+        # The _scanFailed() calls abort, so make sure our existing
+        # failure counts are persisted.
+        self.layer.txn.commit()
+
+        # singleCycle() calls scan() which is our fake one that throws an
         # exception.
-        manager.startCycle()
+        d = scanner.singleCycle()
 
         # Failure counts should be updated, and the assessment method
-        # should have been called.
-        self.assertEqual(1, builder.failure_count)
-        self.assertEqual(
-            1, builder.currentjob.specific_job.build.failure_count)
-
-        self.assertEqual(
-            1, manager_module.assessFailureCounts.call_count)
-
-
-class TestDispatchResult(LaunchpadTestCase):
-    """Tests `BaseDispatchResult` variations.
-
-    Variations of `BaseDispatchResult` when evaluated update the database
-    information according to their purpose.
-    """
-
-    layer = LaunchpadZopelessLayer
-
-    def _getBuilder(self, name):
-        """Return a fixed `IBuilder` instance from the sampledata.
-
-        Ensure it's active (builderok=True) and it has a in-progress job.
-        """
-        login('foo.bar@xxxxxxxxxxxxx')
-
-        builder = getUtility(IBuilderSet)[name]
-        builder.builderok = True
-
-        job = builder.currentjob
-        build = getUtility(IBinaryPackageBuildSet).getByQueueEntry(job)
-        self.assertEqual(
-            'i386 build of mozilla-firefox 0.9 in ubuntu hoary RELEASE',
-            build.title)
-
-        self.assertEqual('BUILDING', build.status.name)
-        self.assertNotEqual(None, job.builder)
-        self.assertNotEqual(None, job.date_started)
-        self.assertNotEqual(None, job.logtail)
-
-        transaction.commit()
-
-        return builder, job.id
-
-    def assertBuildqueueIsClean(self, buildqueue):
-        # Check that the buildqueue is reset.
-        self.assertEqual(None, buildqueue.builder)
-        self.assertEqual(None, buildqueue.date_started)
-        self.assertEqual(None, buildqueue.logtail)
-
-    def assertBuilderIsClean(self, builder):
-        # Check that the builder is ready for a new build.
-        self.assertTrue(builder.builderok)
-        self.assertIs(None, builder.failnotes)
-        self.assertIs(None, builder.currentjob)
-
-    def testResetDispatchResult(self):
-        # Test that `ResetDispatchResult` resets the builder and job.
-        builder, job_id = self._getBuilder(BOB_THE_BUILDER_NAME)
-        buildqueue_id = builder.currentjob.id
-        builder.builderok = True
-        builder.failure_count = 1
-
-        # Setup a interaction to satisfy 'write_transaction' decorator.
-        login(ANONYMOUS)
-        slave = RecordingSlave(builder.name, builder.url, builder.vm_host)
-        result = ResetDispatchResult(slave)
-        result()
-
-        buildqueue = getUtility(IBuildQueueSet).get(buildqueue_id)
-        self.assertBuildqueueIsClean(buildqueue)
-
-        # XXX Julian
-        # Disabled test until bug 586362 is fixed.
-        #self.assertFalse(builder.builderok)
-        self.assertBuilderIsClean(builder)
-
-    def testFailDispatchResult(self):
-        # Test that `FailDispatchResult` calls assessFailureCounts() so
-        # that we know the builders and jobs are failed as necessary
-        # when a FailDispatchResult is called at the end of the dispatch
-        # chain.
-        builder, job_id = self._getBuilder(BOB_THE_BUILDER_NAME)
-
-        # Setup a interaction to satisfy 'write_transaction' decorator.
-        login(ANONYMOUS)
-        slave = RecordingSlave(builder.name, builder.url, builder.vm_host)
-        result = FailDispatchResult(slave, 'does not work!')
-        result.assessFailureCounts = FakeMethod()
-        self.assertEqual(0, result.assessFailureCounts.call_count)
-        result()
-        self.assertEqual(1, result.assessFailureCounts.call_count)
-
-    def _setup_failing_dispatch_result(self):
-        # assessFailureCounts should fail jobs or builders depending on
-        # whether it sees the failure_counts on each increasing.
-        builder, job_id = self._getBuilder(BOB_THE_BUILDER_NAME)
-        slave = RecordingSlave(builder.name, builder.url, builder.vm_host)
-        result = FailDispatchResult(slave, 'does not work!')
-        return builder, result
-
-    def test_assessFailureCounts_equal_failures(self):
-        # Basic case where the failure counts are equal and the job is
-        # reset to try again & the builder is not failed.
-        builder, result = self._setup_failing_dispatch_result()
-        buildqueue = builder.currentjob
-        build = buildqueue.specific_job.build
-        builder.failure_count = 2
-        build.failure_count = 2
-        result.assessFailureCounts()
-
-        self.assertBuilderIsClean(builder)
-        self.assertEqual('NEEDSBUILD', build.status.name)
-        self.assertBuildqueueIsClean(buildqueue)
-
-    def test_assessFailureCounts_job_failed(self):
-        # Case where the job has failed more than the builder.
-        builder, result = self._setup_failing_dispatch_result()
-        buildqueue = builder.currentjob
-        build = buildqueue.specific_job.build
-        build.failure_count = 2
-        builder.failure_count = 1
-        result.assessFailureCounts()
-
-        self.assertBuilderIsClean(builder)
-        self.assertEqual('FAILEDTOBUILD', build.status.name)
-        # The buildqueue should have been removed entirely.
-        self.assertEqual(
-            None, getUtility(IBuildQueueSet).getByBuilder(builder),
-            "Buildqueue was not removed when it should be.")
-
-    def test_assessFailureCounts_builder_failed(self):
-        # Case where the builder has failed more than the job.
-        builder, result = self._setup_failing_dispatch_result()
-        buildqueue = builder.currentjob
-        build = buildqueue.specific_job.build
-        build.failure_count = 2
-        builder.failure_count = 3
-        result.assessFailureCounts()
-
-        self.assertFalse(builder.builderok)
-        self.assertEqual('does not work!', builder.failnotes)
-        self.assertTrue(builder.currentjob is None)
-        self.assertEqual('NEEDSBUILD', build.status.name)
-        self.assertBuildqueueIsClean(buildqueue)
+        # should have been called.  The actual behaviour is tested below
+        # in TestFailureAssessments.
+        def got_scan(ignored):
+            self.assertEqual(expected_builder_count, builder.failure_count)
+            self.assertEqual(
+                expected_job_count,
+                builder.currentjob.specific_job.build.failure_count)
+            self.assertEqual(
+                1, manager_module.assessFailureCounts.call_count)
+
+        return d.addCallback(got_scan)
+
+    def test_scan_first_fail(self):
+        # The first failure of a job should result in the failure_count
+        # on the job and the builder both being incremented.
+        self._assertFailureCounting(
+            builder_count=0, job_count=0, expected_builder_count=1,
+            expected_job_count=1)
+
+    def test_scan_second_builder_fail(self):
+        # The first failure of a job should result in the failure_count
+        # on the job and the builder both being incremented.
+        self._assertFailureCounting(
+            builder_count=1, job_count=0, expected_builder_count=2,
+            expected_job_count=1)
+
+    def test_scan_second_job_fail(self):
+        # The first failure of a job should result in the failure_count
+        # on the job and the builder both being incremented.
+        self._assertFailureCounting(
+            builder_count=0, job_count=1, expected_builder_count=1,
+            expected_job_count=2)
+
+    def test_scanFailed_handles_lack_of_a_job_on_the_builder(self):
+        def failing_scan():
+            return defer.fail(Exception("fake exception"))
+        scanner = self._getScanner()
+        scanner.scan = failing_scan
+        builder = getUtility(IBuilderSet)[scanner.builder_name]
+        builder.failure_count = Builder.FAILURE_THRESHOLD
+        builder.currentjob.reset()
+        self.layer.txn.commit()
+
+        d = scanner.singleCycle()
+
+        def scan_finished(ignored):
+            self.assertFalse(builder.builderok)
+
+        return d.addCallback(scan_finished)
+
+    def test_fail_to_resume_slave_resets_job(self):
+        # If an attempt to resume and dispatch a slave fails, it should
+        # reset the job via job.reset()
+
+        # Make a slave with a failing resume() method.
+        slave = OkSlave()
+        slave.resume = lambda: deferLater(
+            reactor, 0, defer.fail, Failure(('out', 'err', 1)))
+
+        # Reset sampledata builder.
+        builder = removeSecurityProxy(
+            getUtility(IBuilderSet)[BOB_THE_BUILDER_NAME])
+        self._resetBuilder(builder)
+        self.assertEqual(0, builder.failure_count)
+        builder.setSlaveForTesting(slave)
+        builder.vm_host = "fake_vm_host"
+
+        scanner = self._getScanner()
+
+        # Get the next job that will be dispatched.
+        job = removeSecurityProxy(builder._findBuildCandidate())
+        job.virtualized = True
+        builder.virtualized = True
+        d = scanner.singleCycle()
+
+        def check(ignored):
+            # The failure_count will have been incremented on the
+            # builder, we can check that to see that a dispatch attempt
+            # did indeed occur.
+            self.assertEqual(1, builder.failure_count)
+            # There should also be no builder set on the job.
+            self.assertTrue(job.builder is None)
+            build = getUtility(IBinaryPackageBuildSet).getByQueueEntry(job)
+            self.assertEqual(build.status, BuildStatus.NEEDSBUILD)
+
+        return d.addCallback(check)
 
 
 class TestBuilddManager(TrialTestCase):
 
-    layer = LaunchpadZopelessLayer
+    layer = TwistedLaunchpadZopelessLayer
 
     def _stub_out_scheduleNextScanCycle(self):
         # stub out the code that adds a callLater, so that later tests
         # don't get surprises.
-        self.patch(SlaveScanner, 'scheduleNextScanCycle', FakeMethod())
+        self.patch(SlaveScanner, 'startCycle', FakeMethod())
 
     def test_addScanForBuilders(self):
         # Test that addScanForBuilders generates NewBuildersScanner objects.
@@ -1040,10 +459,62 @@
         self.assertNotEqual(0, manager.new_builders_scanner.scan.call_count)
 
 
+class TestFailureAssessments(TestCaseWithFactory):
+
+    layer = ZopelessDatabaseLayer
+
+    def setUp(self):
+        TestCaseWithFactory.setUp(self)
+        self.builder = self.factory.makeBuilder()
+        self.build = self.factory.makeSourcePackageRecipeBuild()
+        self.buildqueue = self.build.queueBuild()
+        self.buildqueue.markAsBuilding(self.builder)
+
+    def test_equal_failures_reset_job(self):
+        self.builder.gotFailure()
+        self.builder.getCurrentBuildFarmJob().gotFailure()
+
+        assessFailureCounts(self.builder, "failnotes")
+        self.assertIs(None, self.builder.currentjob)
+        self.assertEqual(self.build.status, BuildStatus.NEEDSBUILD)
+
+    def test_job_failing_more_than_builder_fails_job(self):
+        self.builder.getCurrentBuildFarmJob().gotFailure()
+
+        assessFailureCounts(self.builder, "failnotes")
+        self.assertIs(None, self.builder.currentjob)
+        self.assertEqual(self.build.status, BuildStatus.FAILEDTOBUILD)
+
+    def test_builder_failing_more_than_job_but_under_fail_threshold(self):
+        self.builder.failure_count = Builder.FAILURE_THRESHOLD - 1
+
+        assessFailureCounts(self.builder, "failnotes")
+        self.assertIs(None, self.builder.currentjob)
+        self.assertEqual(self.build.status, BuildStatus.NEEDSBUILD)
+        self.assertTrue(self.builder.builderok)
+
+    def test_builder_failing_more_than_job_but_over_fail_threshold(self):
+        self.builder.failure_count = Builder.FAILURE_THRESHOLD
+
+        assessFailureCounts(self.builder, "failnotes")
+        self.assertIs(None, self.builder.currentjob)
+        self.assertEqual(self.build.status, BuildStatus.NEEDSBUILD)
+        self.assertFalse(self.builder.builderok)
+        self.assertEqual("failnotes", self.builder.failnotes)
+
+    def test_builder_failing_with_no_attached_job(self):
+        self.buildqueue.reset()
+        self.builder.failure_count = Builder.FAILURE_THRESHOLD
+
+        assessFailureCounts(self.builder, "failnotes")
+        self.assertFalse(self.builder.builderok)
+        self.assertEqual("failnotes", self.builder.failnotes)
+
+
 class TestNewBuilders(TrialTestCase):
     """Test detecting of new builders."""
 
-    layer = LaunchpadZopelessLayer
+    layer = TwistedLaunchpadZopelessLayer
 
     def _getScanner(self, manager=None, clock=None):
         return NewBuildersScanner(manager=manager, clock=clock)
@@ -1084,11 +555,8 @@
             new_builders, builder_scanner.checkForNewBuilders())
 
     def test_scan(self):
-        # See if scan detects new builders and schedules the next scan.
+        # See if scan detects new builders.
 
-        # stub out the addScanForBuilders and scheduleScan methods since
-        # they use callLater; we only want to assert that they get
-        # called.
         def fake_checkForNewBuilders():
             return "new_builders"
 
@@ -1104,9 +572,6 @@
         builder_scanner.scan()
         advance = NewBuildersScanner.SCAN_INTERVAL + 1
         clock.advance(advance)
-        self.assertNotEqual(
-            0, builder_scanner.scheduleScan.call_count,
-            "scheduleScan did not get called")
 
 
 def is_file_growing(filepath, poll_interval=1, poll_repeat=10):
@@ -1147,7 +612,7 @@
     return False
 
 
-class TestBuilddManagerScript(LaunchpadTestCase):
+class TestBuilddManagerScript(TestCaseWithFactory):
 
     layer = LaunchpadScriptLayer
 
@@ -1156,6 +621,7 @@
         fixture = BuilddManagerTestSetup()
         fixture.setUp()
         fixture.tearDown()
+        self.layer.force_dirty_database()
 
     # XXX Julian 2010-08-06 bug=614275
     # These next 2 tests are in the wrong place, they should be near the

=== modified file 'lib/lp/buildmaster/tests/test_packagebuild.py'
--- lib/lp/buildmaster/tests/test_packagebuild.py	2010-10-02 11:41:43 +0000
+++ lib/lp/buildmaster/tests/test_packagebuild.py	2010-10-27 02:13:03 +0000
@@ -3,8 +3,6 @@
 
 """Tests for `IPackageBuild`."""
 
-from __future__ import with_statement
-
 __metaclass__ = type
 
 from datetime import datetime
@@ -99,6 +97,8 @@
         self.assertRaises(
             NotImplementedError, self.package_build.verifySuccessfulUpload)
         self.assertRaises(NotImplementedError, self.package_build.notify)
+        # XXX 2010-10-18 bug=662631
+        # Change this to do non-blocking IO.
         self.assertRaises(
             NotImplementedError, self.package_build.handleStatus,
             None, None, None)
@@ -311,6 +311,8 @@
         # A filemap with plain filenames should not cause a problem.
         # The call to handleStatus will attempt to get the file from
         # the slave resulting in a URL error in this test case.
+        # XXX 2010-10-18 bug=662631
+        # Change this to do non-blocking IO.
         self.build.handleStatus('OK', None, {
                 'filemap': {'myfile.py': 'test_file_hash'},
                 })
@@ -321,6 +323,8 @@
     def test_handleStatus_OK_absolute_filepath(self):
         # A filemap that tries to write to files outside of
         # the upload directory will result in a failed upload.
+        # XXX 2010-10-18 bug=662631
+        # Change this to do non-blocking IO.
         self.build.handleStatus('OK', None, {
             'filemap': {'/tmp/myfile.py': 'test_file_hash'},
             })
@@ -331,6 +335,8 @@
     def test_handleStatus_OK_relative_filepath(self):
         # A filemap that tries to write to files outside of
         # the upload directory will result in a failed upload.
+        # XXX 2010-10-18 bug=662631
+        # Change this to do non-blocking IO.
         self.build.handleStatus('OK', None, {
             'filemap': {'../myfile.py': 'test_file_hash'},
             })
@@ -341,6 +347,8 @@
         # The build log is set during handleStatus.
         removeSecurityProxy(self.build).log = None
         self.assertEqual(None, self.build.log)
+        # XXX 2010-10-18 bug=662631
+        # Change this to do non-blocking IO.
         self.build.handleStatus('OK', None, {
                 'filemap': {'myfile.py': 'test_file_hash'},
                 })
@@ -350,6 +358,8 @@
         # The date finished is updated during handleStatus_OK.
         removeSecurityProxy(self.build).date_finished = None
         self.assertEqual(None, self.build.date_finished)
+        # XXX 2010-10-18 bug=662631
+        # Change this to do non-blocking IO.
         self.build.handleStatus('OK', None, {
                 'filemap': {'myfile.py': 'test_file_hash'},
                 })

=== modified file 'lib/lp/code/browser/branchlisting.py'
--- lib/lp/code/browser/branchlisting.py	2010-10-20 16:04:58 +0000
+++ lib/lp/code/browser/branchlisting.py	2010-10-27 02:13:03 +0000
@@ -94,6 +94,7 @@
     PersonActiveReviewsView,
     PersonProductActiveReviewsView,
     )
+from lp.code.browser.branchvisibilitypolicy import BranchVisibilityPolicyMixin
 from lp.code.browser.summary import BranchCountSummaryView
 from lp.code.enums import (
     BranchLifecycleStatus,
@@ -532,7 +533,8 @@
             return "listing sortable"
 
 
-class BranchListingView(LaunchpadFormView, FeedsMixin):
+class BranchListingView(LaunchpadFormView, FeedsMixin,
+                        BranchVisibilityPolicyMixin):
     """A base class for views of branch listings."""
     schema = IBranchListingFilter
     field_names = ['lifecycle', 'sort_by']

=== modified file 'lib/lp/code/browser/branchvisibilitypolicy.py'
--- lib/lp/code/browser/branchvisibilitypolicy.py	2010-08-31 11:11:09 +0000
+++ lib/lp/code/browser/branchvisibilitypolicy.py	2010-10-27 02:13:03 +0000
@@ -8,6 +8,7 @@
 __all__ = [
     'AddBranchVisibilityTeamPolicyView',
     'RemoveBranchVisibilityTeamPolicyView',
+    'BranchVisibilityPolicyMixin',
     'BranchVisibilityPolicyView',
     ]
 
@@ -38,6 +39,8 @@
     BranchVisibilityRule,
     TeamBranchVisibilityRule,
     )
+from lp.code.interfaces.branchnamespace import IBranchNamespacePolicy
+from lp.code.interfaces.branchtarget import IBranchTarget
 from lp.code.interfaces.branchvisibilitypolicy import (
     IBranchVisibilityTeamPolicy,
     )
@@ -155,7 +158,24 @@
             self.context.removeTeamFromBranchVisibilityPolicy(item.team)
 
 
-class BranchVisibilityPolicyView(LaunchpadView):
+class BranchVisibilityPolicyMixin:
+    """Mixin class providing visibility rules."""
+    @property
+    def base_visibility_rule(self):
+        return self.context.getBaseBranchVisibilityRule()
+
+    @property
+    def team_policies(self):
+        """The policy items that have a valid team."""
+        return [item for item in self.items if item.team is not None]
+
+    @cachedproperty
+    def items(self):
+        return self.context.getBranchVisibilityTeamPolicies()
+
+
+class BranchVisibilityPolicyView(LaunchpadView,
+                                 BranchVisibilityPolicyMixin):
     """Simple view for displaying branch visibility policies."""
 
     @property
@@ -163,14 +183,6 @@
         name = self.context.displayname
         return 'Set branch visibility policy for %s' % name
 
-    @cachedproperty
-    def items(self):
-        return self.context.getBranchVisibilityTeamPolicies()
-
-    @property
-    def base_visibility_rule(self):
-        return self.context.getBaseBranchVisibilityRule()
-
     @property
     def can_remove_items(self):
         """You cannot remove items if using inherited policy or
@@ -178,8 +190,3 @@
         """
         return (len(self.items) > 0 and
                 not self.context.isUsingInheritedBranchVisibilityPolicy())
-
-    @property
-    def team_policies(self):
-        """The policy items that have a valid team."""
-        return [item for item in self.items if item.team is not None]

=== modified file 'lib/lp/code/browser/sourcepackagerecipe.py'
--- lib/lp/code/browser/sourcepackagerecipe.py	2010-10-03 15:30:06 +0000
+++ lib/lp/code/browser/sourcepackagerecipe.py	2010-10-27 02:13:03 +0000
@@ -77,7 +77,7 @@
     'We\'re still working on source package recipes. '
     'We would love for you to try them out, and if you have '
     'any issues, please '
-    '<a href="http://bugs.edge.launchpad.net/launchpad-code";>'
+    '<a href="http://bugs.launchpad.net/launchpad-code";>'
     'file a bug</a>.  We\'ll be happy to fix any problems you encounter.')
 
 

=== modified file 'lib/lp/code/browser/tests/test_branch.py'
--- lib/lp/code/browser/tests/test_branch.py	2010-10-04 19:50:45 +0000
+++ lib/lp/code/browser/tests/test_branch.py	2010-10-27 02:13:03 +0000
@@ -3,8 +3,6 @@
 
 """Unit tests for BranchView."""
 
-from __future__ import with_statement
-
 __metaclass__ = type
 
 from datetime import (

=== modified file 'lib/lp/code/browser/tests/test_branchlisting.py'
--- lib/lp/code/browser/tests/test_branchlisting.py	2010-10-20 13:53:15 +0000
+++ lib/lp/code/browser/tests/test_branchlisting.py	2010-10-27 02:13:03 +0000
@@ -3,8 +3,6 @@
 
 """Tests for branch listing."""
 
-from __future__ import with_statement
-
 __metaclass__ = type
 
 from datetime import timedelta
@@ -33,11 +31,15 @@
     GroupedDistributionSourcePackageBranchesView,
     SourcePackageBranchesView,
     )
+from lp.code.enums import BranchVisibilityRule
 from lp.code.interfaces.seriessourcepackagebranch import (
     IMakeOfficialBranchLinks,
     )
 from lp.code.model.branch import Branch
-from lp.registry.interfaces.person import PersonVisibility
+from lp.registry.interfaces.person import (
+    IPersonSet,
+    PersonVisibility,
+    )
 from lp.registry.interfaces.pocket import PackagePublishingPocket
 from lp.registry.model.person import Owner
 from lp.registry.model.product import Product
@@ -51,6 +53,10 @@
     time_counter,
     )
 from lp.testing.factory import remove_security_proxy_and_shout_at_engineer
+from lp.testing.sampledata import (
+    ADMIN_EMAIL,
+    COMMERCIAL_ADMIN_EMAIL,
+    )
 from lp.testing.views import create_initialized_view
 
 
@@ -422,35 +428,91 @@
         self.assertIs(None, branches)
 
 
-class TestProjectBranchListing(TestCaseWithFactory):
+class TestProjectGroupBranches(TestCaseWithFactory):
+    """Test for the project group branches page."""
 
     layer = DatabaseFunctionalLayer
 
     def setUp(self):
-        super(TestProjectBranchListing, self).setUp()
+        TestCaseWithFactory.setUp(self)
         self.project = self.factory.makeProject()
-        self.product = self.factory.makeProduct(project=self.project)
+
+    def test_project_with_no_branch_visibility_rule(self):
+        view = create_initialized_view(
+            self.project, name="+branches", rootsite='code')
+        privacy_portlet = find_tag_by_id(view(), 'privacy')
+        text = extract_text(privacy_portlet)
+        expected = """
+            Inherited branch visibility for all projects in .* is Public.
+            """
+        self.assertTextMatchesExpressionIgnoreWhitespace(
+            expected, text)
+
+    def test_project_with_private_branch_visibility_rule(self):
+        self.project.setBranchVisibilityTeamPolicy(
+            None, BranchVisibilityRule.FORBIDDEN)
+        view = create_initialized_view(
+            self.project, name="+branches", rootsite='code')
+        privacy_portlet = find_tag_by_id(view(), 'privacy')
+        text = extract_text(privacy_portlet)
+        expected = """
+            Inherited branch visibility for all projects in .* is Forbidden.
+            """
+        self.assertTextMatchesExpressionIgnoreWhitespace(
+            expected, text)
+
+    def _testBranchVisibilityLink(self, user):
+        login_person(user)
+        view = create_initialized_view(
+            self.project, name="+branches", rootsite='code',
+            principal=user)
+        action_portlet = find_tag_by_id(view(), 'action-portlet')
+        text = extract_text(action_portlet)
+        expected = '.*Define branch visibility.*'
+        self.assertTextMatchesExpressionIgnoreWhitespace(
+            expected, text)
+
+    def test_branch_visibility_link_admin(self):
+        # An admin will be displayed a link to define branch visibility in the
+        # action portlet.
+        admin = getUtility(IPersonSet).getByEmail(ADMIN_EMAIL)
+        self._testBranchVisibilityLink(admin)
+
+    def test_branch_visibility_link_commercial_admin(self):
+        # A commercial admin will be displayed a link to define branch
+        # visibility in the action portlet.
+        admin = getUtility(IPersonSet).getByEmail(COMMERCIAL_ADMIN_EMAIL)
+        self._testBranchVisibilityLink(admin)
+
+    def test_branch_visibility_link_non_admin(self):
+        # A non-admin will not see the action portlet.
+        view = create_initialized_view(
+            self.project, name="+branches", rootsite='code')
+        action_portlet = find_tag_by_id(view(), 'action-portlet')
+        self.assertIs(None, action_portlet)
 
     def test_no_branches_gets_message_not_listing(self):
         # If there are no product branches on the project's products, then
         # the view shows the no code hosting message instead of a listing.
-        browser = self.getUserBrowser(
-            canonical_url(self.project, rootsite='code'))
+        self.factory.makeProduct(project=self.project)
+        view = create_initialized_view(
+            self.project, name='+branches', rootsite='code')
         displayname = self.project.displayname
         expected_text = normalize_whitespace(
-                            ("Launchpad does not know where any of %s's "
-                             "projects host their code." % displayname))
-        no_branch_div = find_tag_by_id(browser.contents, "no-branchtable")
+            ("Launchpad does not know where any of %s's "
+             "projects host their code." % displayname))
+        no_branch_div = find_tag_by_id(view(), "no-branchtable")
         text = normalize_whitespace(extract_text(no_branch_div))
         self.assertEqual(expected_text, text)
 
     def test_branches_get_listing(self):
         # If a product has a branch, then the project view has a branch
         # listing.
-        branch = self.factory.makeProductBranch(product=self.product)
-        browser = self.getUserBrowser(
-            canonical_url(self.project, rootsite='code'))
-        table = find_tag_by_id(browser.contents, "branchtable")
+        product = self.factory.makeProduct(project=self.project)
+        self.factory.makeProductBranch(product=product)
+        view = create_initialized_view(
+            self.project, name='+branches', rootsite='code')
+        table = find_tag_by_id(view(), "branchtable")
         self.assertIsNot(None, table)
 
 

=== modified file 'lib/lp/code/browser/tests/test_codereviewcomment.py'
--- lib/lp/code/browser/tests/test_codereviewcomment.py	2010-10-06 18:53:53 +0000
+++ lib/lp/code/browser/tests/test_codereviewcomment.py	2010-10-27 02:13:03 +0000
@@ -3,8 +3,6 @@
 
 """Unit tests for CodeReviewComments."""
 
-from __future__ import with_statement
-
 __metaclass__ = type
 
 import unittest

=== modified file 'lib/lp/code/browser/tests/test_codereviewvote.py'
--- lib/lp/code/browser/tests/test_codereviewvote.py	2010-10-04 19:50:45 +0000
+++ lib/lp/code/browser/tests/test_codereviewvote.py	2010-10-27 02:13:03 +0000
@@ -3,8 +3,6 @@
 
 """Unit tests for CodeReviewVoteReferences."""
 
-from __future__ import with_statement
-
 __metaclass__ = type
 
 

=== modified file 'lib/lp/code/browser/tests/test_sourcepackagerecipe.py'
--- lib/lp/code/browser/tests/test_sourcepackagerecipe.py	2010-10-04 19:50:45 +0000
+++ lib/lp/code/browser/tests/test_sourcepackagerecipe.py	2010-10-27 02:13:03 +0000
@@ -4,8 +4,6 @@
 
 """Tests for the source package recipe view classes and templates."""
 
-from __future__ import with_statement
-
 __metaclass__ = type
 
 

=== modified file 'lib/lp/code/mail/codehandler.py'
--- lib/lp/code/mail/codehandler.py	2010-09-23 20:38:59 +0000
+++ lib/lp/code/mail/codehandler.py	2010-10-27 02:13:03 +0000
@@ -1,9 +1,6 @@
 # Copyright 2009 Canonical Ltd.  This software is licensed under the
 # GNU Affero General Public License version 3 (see the file LICENSE).
 
-from __future__ import with_statement
-
-
 __metaclass__ = type
 
 

=== modified file 'lib/lp/code/mail/tests/test_codehandler.py'
--- lib/lp/code/mail/tests/test_codehandler.py	2010-10-18 01:55:45 +0000
+++ lib/lp/code/mail/tests/test_codehandler.py	2010-10-27 02:13:03 +0000
@@ -3,8 +3,6 @@
 
 """Testing the CodeHandler."""
 
-from __future__ import with_statement
-
 __metaclass__ = type
 
 from difflib import unified_diff

=== modified file 'lib/lp/code/model/branchmergeproposaljob.py'
--- lib/lp/code/model/branchmergeproposaljob.py	2010-10-18 01:57:32 +0000
+++ lib/lp/code/model/branchmergeproposaljob.py	2010-10-27 02:13:03 +0000
@@ -9,9 +9,6 @@
 """
 
 
-from __future__ import with_statement
-
-
 __metaclass__ = type
 
 

=== modified file 'lib/lp/code/model/directbranchcommit.py'
--- lib/lp/code/model/directbranchcommit.py	2010-10-06 11:46:51 +0000
+++ lib/lp/code/model/directbranchcommit.py	2010-10-27 02:13:03 +0000
@@ -3,8 +3,6 @@
 
 """Commit files straight to bzr branch."""
 
-from __future__ import with_statement
-
 __metaclass__ = type
 __all__ = [
     'ConcurrentUpdateError',

=== modified file 'lib/lp/code/model/recipebuilder.py'
--- lib/lp/code/model/recipebuilder.py	2010-08-20 20:31:18 +0000
+++ lib/lp/code/model/recipebuilder.py	2010-10-27 02:13:03 +0000
@@ -117,38 +117,42 @@
             raise CannotBuild("Unable to find distroarchseries for %s in %s" %
                 (self._builder.processor.name,
                 self.build.distroseries.displayname))
-
+        args = self._extraBuildArgs(distroarchseries, logger)
         chroot = distroarchseries.getChroot()
         if chroot is None:
             raise CannotBuild("Unable to find a chroot for %s" %
                               distroarchseries.displayname)
-        self._builder.slave.cacheFile(logger, chroot)
-
-        # Generate a string which can be used to cross-check when obtaining
-        # results so we know we are referring to the right database object in
-        # subsequent runs.
-        buildid = "%s-%s" % (self.build.id, build_queue_id)
-        cookie = self.buildfarmjob.generateSlaveBuildCookie()
-        chroot_sha1 = chroot.content.sha1
-        logger.debug(
-            "Initiating build %s on %s" % (buildid, self._builder.url))
-
-        args = self._extraBuildArgs(distroarchseries, logger)
-        status, info = self._builder.slave.build(
-            cookie, "sourcepackagerecipe", chroot_sha1, {}, args)
-        message = """%s (%s):
-        ***** RESULT *****
-        %s
-        %s: %s
-        ******************
-        """ % (
-            self._builder.name,
-            self._builder.url,
-            args,
-            status,
-            info,
-            )
-        logger.info(message)
+        d = self._builder.slave.cacheFile(logger, chroot)
+
+        def got_cache_file(ignored):
+            # Generate a string which can be used to cross-check when obtaining
+            # results so we know we are referring to the right database object in
+            # subsequent runs.
+            buildid = "%s-%s" % (self.build.id, build_queue_id)
+            cookie = self.buildfarmjob.generateSlaveBuildCookie()
+            chroot_sha1 = chroot.content.sha1
+            logger.debug(
+                "Initiating build %s on %s" % (buildid, self._builder.url))
+
+            return self._builder.slave.build(
+                cookie, "sourcepackagerecipe", chroot_sha1, {}, args)
+
+        def log_build_result((status, info)):
+            message = """%s (%s):
+            ***** RESULT *****
+            %s
+            %s: %s
+            ******************
+            """ % (
+                self._builder.name,
+                self._builder.url,
+                args,
+                status,
+                info,
+                )
+            logger.info(message)
+
+        return d.addCallback(got_cache_file).addCallback(log_build_result)
 
     def verifyBuildRequest(self, logger):
         """Assert some pre-build checks.

=== modified file 'lib/lp/code/model/tests/test_branch.py'
--- lib/lp/code/model/tests/test_branch.py	2010-10-04 19:50:45 +0000
+++ lib/lp/code/model/tests/test_branch.py	2010-10-27 02:13:03 +0000
@@ -6,8 +6,6 @@
 """Tests for Branches."""
 
 
-from __future__ import with_statement
-
 __metaclass__ = type
 
 from datetime import (

=== modified file 'lib/lp/code/model/tests/test_branchjob.py'
--- lib/lp/code/model/tests/test_branchjob.py	2010-10-07 22:46:08 +0000
+++ lib/lp/code/model/tests/test_branchjob.py	2010-10-27 02:13:03 +0000
@@ -3,8 +3,6 @@
 
 """Tests for BranchJobs."""
 
-from __future__ import with_statement
-
 __metaclass__ = type
 
 import datetime

=== modified file 'lib/lp/code/model/tests/test_branchlookup.py'
--- lib/lp/code/model/tests/test_branchlookup.py	2010-09-28 21:27:42 +0000
+++ lib/lp/code/model/tests/test_branchlookup.py	2010-10-27 02:13:03 +0000
@@ -310,7 +310,7 @@
         """test_getByURL works with production values."""
         branch_set = getUtility(IBranchLookup)
         branch = self.makeProductBranch()
-        self.pushConfig('codehosting', lp_url_hosts='edge,production,,')
+        self.pushConfig('codehosting', lp_url_hosts='production,,')
         branch2 = branch_set.getByUrl('lp://staging/~aa/b/c')
         self.assertIs(None, branch2)
         branch2 = branch_set.getByUrl('lp://asdf/~aa/b/c')
@@ -319,8 +319,6 @@
         self.assertEqual(branch, branch2)
         branch2 = branch_set.getByUrl('lp://production/~aa/b/c')
         self.assertEqual(branch, branch2)
-        branch2 = branch_set.getByUrl('lp://edge/~aa/b/c')
-        self.assertEqual(branch, branch2)
 
     def test_getByUrls(self):
         # getByUrls returns a dictionary mapping branches to URLs.

=== modified file 'lib/lp/code/model/tests/test_branchmergeproposal.py'
--- lib/lp/code/model/tests/test_branchmergeproposal.py	2010-10-19 00:44:24 +0000
+++ lib/lp/code/model/tests/test_branchmergeproposal.py	2010-10-27 02:13:03 +0000
@@ -3,8 +3,6 @@
 
 # pylint: disable-msg=F0401
 
-from __future__ import with_statement
-
 """Tests for BranchMergeProposals."""
 
 __metaclass__ = type

=== modified file 'lib/lp/code/model/tests/test_branchmergeproposaljobs.py'
--- lib/lp/code/model/tests/test_branchmergeproposaljobs.py	2010-10-20 01:17:42 +0000
+++ lib/lp/code/model/tests/test_branchmergeproposaljobs.py	2010-10-27 02:13:03 +0000
@@ -3,8 +3,6 @@
 
 """Tests for branch merge proposal jobs."""
 
-from __future__ import with_statement
-
 __metaclass__ = type
 
 from datetime import (

=== modified file 'lib/lp/code/model/tests/test_branchsubscription.py'
--- lib/lp/code/model/tests/test_branchsubscription.py	2010-08-20 20:31:18 +0000
+++ lib/lp/code/model/tests/test_branchsubscription.py	2010-10-27 02:13:03 +0000
@@ -3,8 +3,6 @@
 
 """Tests for the BranchSubscrptions model object.."""
 
-from __future__ import with_statement
-
 __metaclass__ = type
 
 import unittest

=== modified file 'lib/lp/code/model/tests/test_diff.py'
--- lib/lp/code/model/tests/test_diff.py	2010-10-04 19:50:45 +0000
+++ lib/lp/code/model/tests/test_diff.py	2010-10-27 02:13:03 +0000
@@ -3,8 +3,6 @@
 
 """Tests for Diff, etc."""
 
-from __future__ import with_statement
-
 __metaclass__ = type
 
 

=== modified file 'lib/lp/code/model/tests/test_recipebuilder.py'
--- lib/lp/code/model/tests/test_recipebuilder.py	2010-10-04 19:50:45 +0000
+++ lib/lp/code/model/tests/test_recipebuilder.py	2010-10-27 02:13:03 +0000
@@ -3,8 +3,6 @@
 
 """Test RecipeBuildBehavior."""
 
-from __future__ import with_statement
-
 # pylint: disable-msg=F0401
 
 __metaclass__ = type

=== modified file 'lib/lp/code/model/tests/test_sourcepackagerecipe.py'
--- lib/lp/code/model/tests/test_sourcepackagerecipe.py	2010-09-22 21:03:48 +0000
+++ lib/lp/code/model/tests/test_sourcepackagerecipe.py	2010-10-27 02:13:03 +0000
@@ -3,8 +3,6 @@
 
 """Tests for the SourcePackageRecipe content type."""
 
-from __future__ import with_statement
-
 __metaclass__ = type
 
 from datetime import (

=== modified file 'lib/lp/code/model/tests/test_sourcepackagerecipebuild.py'
--- lib/lp/code/model/tests/test_sourcepackagerecipebuild.py	2010-10-06 11:46:51 +0000
+++ lib/lp/code/model/tests/test_sourcepackagerecipebuild.py	2010-10-27 02:13:03 +0000
@@ -3,8 +3,6 @@
 
 """Tests for source package builds."""
 
-from __future__ import with_statement
-
 __metaclass__ = type
 
 import datetime

=== modified file 'lib/lp/code/scripts/tests/test_scan_branches.py'
--- lib/lp/code/scripts/tests/test_scan_branches.py	2010-10-04 19:50:45 +0000
+++ lib/lp/code/scripts/tests/test_scan_branches.py	2010-10-27 02:13:03 +0000
@@ -6,8 +6,6 @@
 """Test the scan_branches script."""
 
 
-from __future__ import with_statement
-
 from storm.locals import Store
 import transaction
 

=== modified file 'lib/lp/code/scripts/tests/test_sendbranchmail.py'
--- lib/lp/code/scripts/tests/test_sendbranchmail.py	2010-10-04 19:50:45 +0000
+++ lib/lp/code/scripts/tests/test_sendbranchmail.py	2010-10-27 02:13:03 +0000
@@ -5,8 +5,6 @@
 
 """Test the sendbranchmail script"""
 
-from __future__ import with_statement
-
 import unittest
 
 import transaction

=== modified file 'lib/lp/code/stories/branches/xx-branch-visibility-policy.txt'
--- lib/lp/code/stories/branches/xx-branch-visibility-policy.txt	2009-07-20 18:22:54 +0000
+++ lib/lp/code/stories/branches/xx-branch-visibility-policy.txt	2010-10-27 02:13:03 +0000
@@ -1,7 +1,8 @@
-= Branch Visibility Policy Pages =
+Branch Visibility Policy Pages
+==============================
 
 Controlling the branch visibility policies for products and projects is only
-available to launchpad admins and launchpad commercial admins.
+available to Launchpad admins and Launchpad commercial admins.
 
 Not to anonymous people.
 
@@ -64,12 +65,36 @@
     >>> print commercial_browser.url
     http://launchpad.dev/firefox/+branchvisibility
 
-    >>> commercial_browser.getLink('Customise policy for Mozilla Firefox').click()
+    >>> commercial_browser.getLink(
+    ...     'Customise policy for Mozilla Firefox').click()
     >>> print commercial_browser.url
     http://launchpad.dev/firefox/+addbranchvisibilitypolicy
 
-
-== Default policies ==
+Admins can define branch visibility on projects, too.
+
+    >>> admin_browser.open('http://code.launchpad.dev/mozilla')
+    >>> admin_browser.getLink('Define branch visibility').click()
+    >>> print admin_browser.url
+    http://launchpad.dev/mozilla/+branchvisibility
+
+    >>> admin_browser.getLink('Set policy for a team').click()
+    >>> print admin_browser.url
+    http://launchpad.dev/mozilla/+addbranchvisibilitypolicy
+
+As can commercial admins.
+
+    >>> commercial_browser.open('http://code.launchpad.dev/mozilla')
+    >>> commercial_browser.getLink('Define branch visibility').click()
+    >>> print commercial_browser.url
+    http://launchpad.dev/mozilla/+branchvisibility
+
+    >>> commercial_browser.getLink('Set policy for a team').click()
+    >>> print commercial_browser.url
+    http://launchpad.dev/mozilla/+addbranchvisibilitypolicy
+
+
+Default policies
+----------------
 
 The default policies are to have all branches public.  When the branch policy
 objects are created for products they are constructed with the branch policy
@@ -79,10 +104,12 @@
 
     >>> admin_browser.open('http://launchpad.dev/firefox/+branchvisibility')
 
-    >>> print extract_text(find_tag_by_id(admin_browser.contents, 'inherited'))
+    >>> print extract_text(
+    ...     find_tag_by_id(admin_browser.contents, 'inherited'))
     Using inherited policy from the Mozilla Project.
 
-    >>> print extract_text(find_tag_by_id(admin_browser.contents, 'default-policy'))
+    >>> print extract_text(
+    ...     find_tag_by_id(admin_browser.contents, 'default-policy'))
     Default branch visibility for all branches in Mozilla Firefox is Public.
 
 When the project is using the inherited policy, the user can either
@@ -100,8 +127,11 @@
     >>> admin_browser.getLink('Edit inherited policy').click()
     >>> print find_tag_by_id(admin_browser.contents, 'inherited')
     None
-    >>> print extract_text(find_tag_by_id(admin_browser.contents, 'default-policy'))
-    Default branch visibility for all branches in the Mozilla Project is Public.
+    >>> print extract_text(
+    ...     find_tag_by_id(admin_browser.contents, 'default-policy'))
+    Default branch visibility for all branches
+    in the Mozilla Project is Public.
+
     >>> actions = find_tag_by_id(admin_browser.contents, 'policy-actions')
     >>> for anchor in actions.fetch('a'):
     ...     print '%s -> %s' % (anchor.renderContents(), anchor['href'])
@@ -109,11 +139,13 @@
 
 Products that don't have an associated project look similar to projects.
 
-    >>> admin_browser.open('http://launchpad.dev/alsa-utils/+branchvisibility')
+    >>> admin_browser.open(
+    ...     'http://launchpad.dev/alsa-utils/+branchvisibility')
 
     >>> print find_tag_by_id(admin_browser.contents, 'inherited')
     None
-    >>> print extract_text(find_tag_by_id(admin_browser.contents, 'default-policy'))
+    >>> print extract_text(
+    ...     find_tag_by_id(admin_browser.contents, 'default-policy'))
     Default branch visibility for all branches in alsa-utils is Public.
     >>> actions = find_tag_by_id(admin_browser.contents, 'policy-actions')
     >>> for anchor in actions.fetch('a'):
@@ -121,7 +153,8 @@
     Set policy for a team -> +addbranchvisibilitypolicy
 
 
-== Overriding the inherited policy ==
+Overriding the inherited policy
+-------------------------------
 
 Setting any policy item overrides the use of an inherited policy, even if
 it new policy item just specifies public branches for everyone.
@@ -173,7 +206,8 @@
     Ubuntu Gnome Team: Private
 
 
-== Removing policy items ==
+Removing policy items
+---------------------
 
 When removing the policy items, the defined items are shown as a list
 of checkboxes.  Any number of these can be selected, and when the
@@ -206,7 +240,8 @@
 Before we remove them, let's ensure that the commercial admins can see
 the removal page.
 
-    >>> commercial_browser.open('http://launchpad.dev/firefox/+branchvisibility')
+    >>> commercial_browser.open(
+    ...     'http://launchpad.dev/firefox/+branchvisibility')
     >>> commercial_browser.getLink('Remove policy items').click()
     >>> print commercial_browser.url
     http://launchpad.dev/firefox/+removebranchvisibilitypolicy
@@ -228,7 +263,8 @@
 Firefox will go back to inheriting the polices of Mozilla. Let's let
 the commercial admin do the removal to ensure he has the permission.
 
-    >>> commercial_browser.open('http://launchpad.dev/firefox/+branchvisibility')
+    >>> commercial_browser.open(
+    ...     'http://launchpad.dev/firefox/+branchvisibility')
     >>> commercial_browser.getLink('Remove policy items').click()
     >>> commercial_browser.getControl('Ubuntu Gnome Team: Private').click()
     >>> commercial_browser.getControl('Remove Selected Policy Items').click()

=== modified file 'lib/lp/code/templates/branch-visibility.pt'
--- lib/lp/code/templates/branch-visibility.pt	2009-08-24 02:09:05 +0000
+++ lib/lp/code/templates/branch-visibility.pt	2010-10-27 02:13:03 +0000
@@ -41,7 +41,7 @@
   </div>
 
   <div style="padding-left: 1em" id="policy-actions">
-  <tal:using-inhertied-policy condition="context/isUsingInheritedBranchVisibilityPolicy">
+  <tal:using-inherited-policy condition="context/isUsingInheritedBranchVisibilityPolicy">
       <p>
         <img src="/@@/edit" alt="edit" />
         <a tal:define="inherited_url context/project/fmt:url"
@@ -56,7 +56,7 @@
           </tal:displayname>
         </a>
       </p>
-  </tal:using-inhertied-policy>
+  </tal:using-inherited-policy>
 
   <tal:no-inhertied-policy condition="not: context/isUsingInheritedBranchVisibilityPolicy">
       <p>

=== modified file 'lib/lp/code/templates/project-branches.pt'
--- lib/lp/code/templates/project-branches.pt	2010-10-18 21:32:32 +0000
+++ lib/lp/code/templates/project-branches.pt	2010-10-27 02:13:03 +0000
@@ -3,20 +3,49 @@
   xmlns:tal="http://xml.zope.org/namespaces/tal";
   xmlns:metal="http://xml.zope.org/namespaces/metal";
   xmlns:i18n="http://xml.zope.org/namespaces/i18n";
-  metal:use-macro="view/macro:page/main_only"
+  metal:use-macro="view/macro:page/main_side"
   i18n:domain="launchpad">
 
   <body>
 
+    <metal:side fill-slot="side"
+                tal:define="context_menu context/menu:context"
+                tal:condition="not:
+                context/codehosting_usage/enumvalue:UNKNOWN">
+      <div id="privacy"
+           tal:define="priv not:view/base_visibility_rule/enumvalue:PUBLIC"
+           tal:attributes="class python: priv and 'first portlet private' or 'first portlet public'">
+        <p id="default-policy" style="margin-bottom: 0;">
+          Inherited branch visibility for all projects in
+          <strong tal:content="context/displayname">Project</strong> is
+          <strong tal:content="view/base_visibility_rule/title">Public</strong>.
+        </p>
+
+        <tal:has-policies condition="view/team_policies">
+          <p>Except for the following teams:</p>
+          <ul id="team-policies">
+            <li tal:repeat="item view/team_policies">
+              <tal:team replace="structure item/team/fmt:link:mainsite"
+                        condition="item/team">Team Name</tal:team>:
+              <tal:team condition="not: item/team">Everyone</tal:team>
+              <tal:policy replace="item/rule/title">Public</tal:policy>
+            </li>
+          </ul>
+        </tal:has-policies>
+      </div>
+
+      <div id="action-portlet"
+           class="portlet"
+           tal:define="menu context/menu:overview;
+                       link menu/branch_visibility"
+           tal:condition="link/enabled">
+        <div tal:content="structure link/render" />
+      </div>
+    </metal:side>
+
     <div metal:fill-slot="main"
          tal:define="branches view/branches">
 
-      <div style="float:right" id="floating-links"
-           tal:define="menu context/menu:overview">
-        <div tal:define="link menu/branch_visibility"
-             tal:condition="link/enabled"
-             tal:content="structure link/render" />
-      </div>
       <tal:no-branches
         condition="not:context/has_branches">
         <div id="no-branchtable">
@@ -35,7 +64,7 @@
               <li>
                 <a tal:attributes="href product/@@+code-index/configure_codehosting/fmt:url"
                    tal:content="product/title" />
-              </li>  
+              </li>
             </ul>
         </div>
         </div>

=== modified file 'lib/lp/code/xmlrpc/tests/test_branch.py'
--- lib/lp/code/xmlrpc/tests/test_branch.py	2010-10-04 19:50:45 +0000
+++ lib/lp/code/xmlrpc/tests/test_branch.py	2010-10-27 02:13:03 +0000
@@ -3,8 +3,6 @@
 
 """Unit tests for the public codehosting API."""
 
-from __future__ import with_statement
-
 __metaclass__ = type
 __all__ = []
 

=== modified file 'lib/lp/codehosting/codeimport/tests/servers.py'
--- lib/lp/codehosting/codeimport/tests/servers.py	2010-09-02 12:09:05 +0000
+++ lib/lp/codehosting/codeimport/tests/servers.py	2010-10-27 02:13:03 +0000
@@ -3,8 +3,6 @@
 
 """Server classes that know how to create various kinds of foreign archive."""
 
-from __future__ import with_statement
-
 __all__ = [
     'CVSServer',
     'GitServer',

=== modified file 'lib/lp/codehosting/puller/tests/test_acceptance.py'
--- lib/lp/codehosting/puller/tests/test_acceptance.py	2010-10-04 19:50:45 +0000
+++ lib/lp/codehosting/puller/tests/test_acceptance.py	2010-10-27 02:13:03 +0000
@@ -28,13 +28,13 @@
 from zope.security.proxy import removeSecurityProxy
 
 from canonical.config import config
-from canonical.launchpad.interfaces import IScriptActivitySet
 from canonical.testing.layers import ZopelessAppServerLayer
 from lp.code.enums import BranchType
 from lp.code.interfaces.branchtarget import IBranchTarget
 from lp.codehosting.puller.tests import PullerBranchTestCase
 from lp.codehosting.tests.helpers import LoomTestMixin
 from lp.codehosting.vfs import get_lp_server
+from lp.services.scripts.interfaces.scriptactivity import IScriptActivitySet
 
 
 class TestBranchPuller(PullerBranchTestCase, LoomTestMixin):

=== modified file 'lib/lp/codehosting/scanner/tests/test_buglinks.py'
--- lib/lp/codehosting/scanner/tests/test_buglinks.py	2010-10-04 13:55:16 +0000
+++ lib/lp/codehosting/scanner/tests/test_buglinks.py	2010-10-27 02:13:03 +0000
@@ -3,8 +3,6 @@
 
 """Tests for creating BugBranch items based on Bazaar revisions."""
 
-from __future__ import with_statement
-
 __metaclass__ = type
 
 import unittest
@@ -15,12 +13,10 @@
 from zope.security.proxy import removeSecurityProxy
 
 from canonical.config import config
-from canonical.launchpad.interfaces import (
-    IBugBranchSet,
-    IBugSet,
-    )
 from canonical.testing.layers import LaunchpadZopelessLayer
 from lp.app.errors import NotFoundError
+from lp.bugs.interfaces.bug import IBugSet
+from lp.bugs.interfaces.bugbranch import IBugBranchSet
 from lp.code.interfaces.revision import IRevisionSet
 from lp.codehosting.scanner import events
 from lp.codehosting.scanner.buglinks import BugBranchLinker
@@ -36,14 +32,15 @@
 class RevisionPropertyParsing(TestCase):
     """Tests for parsing the bugs revision property.
 
-    The bugs revision property holds information about Launchpad bugs which are
-    affected by a revision. A given revision may affect multiple bugs in
-    different ways. A revision may indicate work has begin on a bug, or that it
-    constitutes a fix for a bug.
+    The bugs revision property holds information about Launchpad bugs which
+    are affected by a revision. A given revision may affect multiple bugs in
+    different ways. A revision may indicate work has begin on a bug, or that
+    it constitutes a fix for a bug.
 
-    The bugs property is formatted as a newline-separated list of entries. Each
-    entry is of the form '<bug_id> <status>', where '<bug_id>' is the URL for a
-    page that describes the bug, and status is one of 'fixed' or 'inprogress'.
+    The bugs property is formatted as a newline-separated list of entries.
+    Each entry is of the form '<bug_id> <status>', where '<bug_id>' is the URL
+    for a page that describes the bug, and status is one of 'fixed' or
+    'inprogress'.
 
     In general, the parser skips over any lines with errors.
 
@@ -129,8 +126,8 @@
         self.bug1.addTask(self.bug1.owner, distro)
         self.bug2 = self.factory.makeBug()
         self.new_db_branch = self.factory.makeAnyBranch()
-        removeSecurityProxy(distro).max_bug_heat = 0;
-        removeSecurityProxy(dsp).max_bug_heat = 0;
+        removeSecurityProxy(distro).max_bug_heat = 0
+        removeSecurityProxy(dsp).max_bug_heat = 0
         self.layer.txn.commit()
 
     def getBugURL(self, bug):

=== modified file 'lib/lp/codehosting/scanner/tests/test_bzrsync.py'
--- lib/lp/codehosting/scanner/tests/test_bzrsync.py	2010-10-15 18:33:07 +0000
+++ lib/lp/codehosting/scanner/tests/test_bzrsync.py	2010-10-27 02:13:03 +0000
@@ -5,8 +5,6 @@
 
 # pylint: disable-msg=W0141
 
-from __future__ import with_statement
-
 import datetime
 import os
 import random

=== modified file 'lib/lp/codehosting/scanner/tests/test_mergedetection.py'
--- lib/lp/codehosting/scanner/tests/test_mergedetection.py	2010-10-08 20:25:27 +0000
+++ lib/lp/codehosting/scanner/tests/test_mergedetection.py	2010-10-27 02:13:03 +0000
@@ -3,8 +3,6 @@
 
 """Tests for the scanner's merge detection."""
 
-from __future__ import with_statement
-
 __metaclass__ = type
 
 import logging
@@ -16,7 +14,7 @@
 from zope.event import notify
 
 from canonical.config import config
-from canonical.launchpad.interfaces import IStore
+from canonical.launchpad.interfaces.lpstorm import IStore
 from canonical.testing.layers import LaunchpadZopelessLayer
 from lp.code.enums import (
     BranchLifecycleStatus,
@@ -118,7 +116,8 @@
             BranchMergeProposalStatus.REJECTED,
             proposal.queue_status)
 
-    def test_auto_merge_proposals_rejected_proposal_target_scanned_first(self):
+    def test_auto_merge_proposals_rejected_proposal_target_scanned_first(
+                                                                        self):
         # If there is a merge proposal where the tip of the source is in the
         # ancestry of the target but the proposal is in a final state the
         # proposal is not marked as merged.

=== modified file 'lib/lp/codehosting/tests/servers.py'
--- lib/lp/codehosting/tests/servers.py	2010-10-04 13:55:16 +0000
+++ lib/lp/codehosting/tests/servers.py	2010-10-27 02:13:03 +0000
@@ -25,12 +25,11 @@
 from canonical.config import config
 from canonical.database.sqlbase import commit
 from canonical.launchpad.daemons.tachandler import TacTestSetup
-from canonical.launchpad.interfaces import (
+from lp.registry.interfaces.person import (
     IPersonSet,
-    ISSHKeySet,
-    SSHKeyType,
     TeamSubscriptionPolicy,
     )
+from lp.registry.interfaces.ssh import ISSHKeySet
 
 
 def set_up_test_user(test_user, test_team):
@@ -47,7 +46,7 @@
     testUser.join(testTeam)
     ssh_key_set = getUtility(ISSHKeySet)
     ssh_key_set.new(
-        testUser, 
+        testUser,
         'ssh-dss AAAAB3NzaC1kc3MAAABBAL5VoWG5sy3CnLYeOw47L8m9A15hA/PzdX2u'
         '0B7c2Z1ktFPcEaEuKbLqKVSkXpYm7YwKj9y88A9Qm61CdvI0c50AAAAVAKGY0YON'
         '9dEFH3DzeVYHVEBGFGfVAAAAQCoe0RhBcefm4YiyQVwMAxwTlgySTk7FSk6GZ95E'

=== modified file 'lib/lp/codehosting/tests/test_branchdistro.py'
--- lib/lp/codehosting/tests/test_branchdistro.py	2010-09-29 02:20:44 +0000
+++ lib/lp/codehosting/tests/test_branchdistro.py	2010-10-27 02:13:03 +0000
@@ -4,8 +4,6 @@
 """Tests for making new source package branches just after a distro release.
 """
 
-from __future__ import with_statement
-
 __metaclass__ = type
 
 import os

=== modified file 'lib/lp/codehosting/tests/test_jobs.py'
--- lib/lp/codehosting/tests/test_jobs.py	2010-10-04 19:50:45 +0000
+++ lib/lp/codehosting/tests/test_jobs.py	2010-10-27 02:13:03 +0000
@@ -3,8 +3,6 @@
 
 """Tests for Job-running facilities."""
 
-from __future__ import with_statement
-
 from unittest import TestLoader
 
 from canonical.config import config

=== modified file 'lib/lp/codehosting/tests/test_sftp.py'
--- lib/lp/codehosting/tests/test_sftp.py	2010-08-20 20:31:18 +0000
+++ lib/lp/codehosting/tests/test_sftp.py	2010-10-27 02:13:03 +0000
@@ -3,7 +3,6 @@
 
 """Tests for the transport-backed SFTP server implementation."""
 
-from __future__ import with_statement
 from contextlib import closing
 import os
 import unittest

=== modified file 'lib/lp/coop/answersbugs/tests/test_doc.py'
--- lib/lp/coop/answersbugs/tests/test_doc.py	2010-10-04 19:50:45 +0000
+++ lib/lp/coop/answersbugs/tests/test_doc.py	2010-10-27 02:13:03 +0000
@@ -5,9 +5,7 @@
 Run the doctests and pagetests.
 """
 
-import logging
 import os
-import unittest
 
 from zope.component import getUtility
 
@@ -19,14 +17,6 @@
     uploaderSetUp,
     uploadQueueSetUp,
     )
-from canonical.launchpad.interfaces import (
-    CreateBugParams,
-    IBugTaskSet,
-    IDistributionSet,
-    ILanguageSet,
-    IPersonSet,
-    )
-from canonical.launchpad.testing.pages import PageTestSuite
 from canonical.launchpad.testing.systemdocs import (
     LayeredDocFileSuite,
     setUp,
@@ -36,7 +26,12 @@
     DatabaseFunctionalLayer,
     LaunchpadZopelessLayer,
     )
+from lp.bugs.interfaces.bug import CreateBugParams
+from lp.bugs.interfaces.bugtask import IBugTaskSet
+from lp.registry.interfaces.distribution import IDistributionSet
+from lp.registry.interfaces.person import IPersonSet
 from lp.services.testing import build_test_suite
+from lp.services.worlddata.interfaces.language import ILanguageSet
 from lp.testing.mail_helpers import pop_notifications
 
 
@@ -70,10 +65,13 @@
     notifications = pop_notifications()
     return ubuntu_bugtask.id
 
+
 def bugLinkedToQuestionSetUp(test):
     """Setup the question and linked bug for testing."""
+
     def get_bugtask_linked_to_question():
         return getUtility(IBugTaskSet).get(bugtask_id)
+
     setUp(test)
     bugtask_id = _createUbuntuBugTaskLinkedToQuestion()
     test.globs['get_bugtask_linked_to_question'] = (
@@ -90,6 +88,7 @@
     uploaderSetUp(test)
     login(ANONYMOUS)
 
+
 def uploadQueueBugLinkedToQuestionSetUp(test):
     LaunchpadZopelessLayer.switchDbUser('launchpad')
     bugLinkedToQuestionSetUp(test)
@@ -104,26 +103,22 @@
             LayeredDocFileSuite(
             'notifications-linked-private-bug.txt',
             setUp=bugLinkedToQuestionSetUp, tearDown=tearDown,
-            layer=DatabaseFunctionalLayer
-            ),
+            layer=DatabaseFunctionalLayer),
     'notifications-linked-bug.txt': LayeredDocFileSuite(
             'notifications-linked-bug.txt',
             setUp=bugLinkedToQuestionSetUp, tearDown=tearDown,
-            layer=DatabaseFunctionalLayer
-            ),
+            layer=DatabaseFunctionalLayer),
     'notifications-linked-bug.txt-uploader':
             LayeredDocFileSuite(
                 'notifications-linked-bug.txt',
                 setUp=uploaderBugLinkedToQuestionSetUp,
                 tearDown=tearDown,
-                layer=LaunchpadZopelessLayer
-                ),
+                layer=LaunchpadZopelessLayer),
     'notifications-linked-bug.txt-queued': LayeredDocFileSuite(
             'notifications-linked-bug.txt',
             setUp=uploadQueueBugLinkedToQuestionSetUp,
             tearDown=tearDown,
-            layer=LaunchpadZopelessLayer
-            ),
+            layer=LaunchpadZopelessLayer),
     }
 
 

=== modified file 'lib/lp/registry/browser/person.py'
--- lib/lp/registry/browser/person.py	2010-10-14 20:20:47 +0000
+++ lib/lp/registry/browser/person.py	2010-10-27 02:13:03 +0000
@@ -206,10 +206,6 @@
 from canonical.launchpad.webapp.login import logoutPerson
 from canonical.launchpad.webapp.menu import get_current_view
 from canonical.launchpad.webapp.publisher import LaunchpadView
-from lp.app.browser.tales import (
-    DateTimeFormatterAPI,
-    PersonFormatterAPI,
-    )
 from canonical.lazr.utils import smartquote
 from canonical.widgets import (
     LaunchpadDropdownWidget,
@@ -226,6 +222,10 @@
 from lp.answers.interfaces.questionenums import QuestionParticipation
 from lp.answers.interfaces.questionsperson import IQuestionsPerson
 from lp.app.browser.stringformatter import FormattersAPI
+from lp.app.browser.tales import (
+    DateTimeFormatterAPI,
+    PersonFormatterAPI,
+    )
 from lp.app.errors import (
     NotFoundError,
     UnexpectedFormData,
@@ -311,7 +311,7 @@
 from lp.services.openid.interfaces.openidrpsummary import IOpenIDRPSummarySet
 from lp.services.propertycache import (
     cachedproperty,
-    IPropertyCache,
+    get_property_cache,
     )
 from lp.services.salesforce.interfaces import (
     ISalesforceVoucherProxy,
@@ -5736,7 +5736,7 @@
     def _reset_state(self):
         """Reset the cache because the recipients changed."""
         self._count_recipients = None
-        del IPropertyCache(self)._all_recipients
+        del get_property_cache(self)._all_recipients
 
     def _getPrimaryReason(self, person_or_team):
         """Return the primary reason enumeration.

=== modified file 'lib/lp/registry/browser/project.py'
--- lib/lp/registry/browser/project.py	2010-09-23 03:17:10 +0000
+++ lib/lp/registry/browser/project.py	2010-10-27 02:13:03 +0000
@@ -254,7 +254,7 @@
             'RDF</abbr> metadata')
         return Link('+rdf', text, icon='download-icon')
 
-    @enabled_with_permission('launchpad.Admin')
+    @enabled_with_permission('launchpad.Commercial')
     def branch_visibility(self):
         text = 'Define branch visibility'
         return Link('+branchvisibility', text, icon='edit', site='mainsite')

=== modified file 'lib/lp/registry/browser/tests/test_distroseriesdifference_views.py'
--- lib/lp/registry/browser/tests/test_distroseriesdifference_views.py	2010-09-28 08:30:16 +0000
+++ lib/lp/registry/browser/tests/test_distroseriesdifference_views.py	2010-10-27 02:13:03 +0000
@@ -3,8 +3,6 @@
 
 """Unit tests for the DistroSeriesDifference views."""
 
-from __future__ import with_statement
-
 __metaclass__ = type
 
 from BeautifulSoup import BeautifulSoup

=== modified file 'lib/lp/registry/browser/tests/test_distroseriesdifference_webservice.py'
--- lib/lp/registry/browser/tests/test_distroseriesdifference_webservice.py	2010-09-29 09:53:15 +0000
+++ lib/lp/registry/browser/tests/test_distroseriesdifference_webservice.py	2010-10-27 02:13:03 +0000
@@ -1,8 +1,6 @@
 # Copyright 2010 Canonical Ltd.  This software is licensed under the
 # GNU Affero General Public License version 3 (see the file LICENSE).
 
-from __future__ import with_statement
-
 __metaclass__ = type
 
 import transaction

=== modified file 'lib/lp/registry/browser/tests/test_mailinglists.py'
--- lib/lp/registry/browser/tests/test_mailinglists.py	2010-08-20 20:31:18 +0000
+++ lib/lp/registry/browser/tests/test_mailinglists.py	2010-10-27 02:13:03 +0000
@@ -2,8 +2,6 @@
 # Copyright 2010 Canonical Ltd.  This software is licensed under the
 # GNU Affero General Public License version 3 (see the file LICENSE).
 
-from __future__ import with_statement
-
 """Test harness for mailinglist views unit tests."""
 
 __metaclass__ = type

=== modified file 'lib/lp/registry/browser/tests/test_milestone.py'
--- lib/lp/registry/browser/tests/test_milestone.py	2010-09-16 04:43:45 +0000
+++ lib/lp/registry/browser/tests/test_milestone.py	2010-10-27 02:13:03 +0000
@@ -1,8 +1,6 @@
 # Copyright 2010 Canonical Ltd.  This software is licensed under the
 # GNU Affero General Public License version 3 (see the file LICENSE).
 
-from __future__ import with_statement
-
 """Test milestone views."""
 
 __metaclass__ = type

=== modified file 'lib/lp/registry/browser/tests/test_peoplemerge.py'
--- lib/lp/registry/browser/tests/test_peoplemerge.py	2010-10-04 19:50:45 +0000
+++ lib/lp/registry/browser/tests/test_peoplemerge.py	2010-10-27 02:13:03 +0000
@@ -2,8 +2,6 @@
 # GNU Affero General Public License version 3 (see the file LICENSE).
 """Test the peoplemerge browser module."""
 
-from __future__ import with_statement
-
 __metaclass__ = type
 
 from zope.component import getUtility

=== modified file 'lib/lp/registry/browser/tests/test_series_views.py'
--- lib/lp/registry/browser/tests/test_series_views.py	2010-10-06 18:53:53 +0000
+++ lib/lp/registry/browser/tests/test_series_views.py	2010-10-27 02:13:03 +0000
@@ -1,8 +1,6 @@
 # Copyright 2010 Canonical Ltd.  This software is licensed under the
 # GNU Affero General Public License version 3 (see the file LICENSE).
 
-from __future__ import with_statement
-
 __metaclass__ = type
 
 from BeautifulSoup import BeautifulSoup

=== modified file 'lib/lp/registry/doc/message-holds.txt'
--- lib/lp/registry/doc/message-holds.txt	2010-10-18 22:24:59 +0000
+++ lib/lp/registry/doc/message-holds.txt	2010-10-27 02:13:03 +0000
@@ -292,10 +292,10 @@
     ...     print message_id, list_name, subject
 
     >>> def print_messages(status):
-    ...     held_messages = sorted(hold_set.getHeldMessagesWithStatus(status),
-    ...                            key=attrgetter('message_id'))
-    ...     for message_hold in held_messages:
-    ...         print_hold(message_hold)
+    ...     held_messages = sorted(hold_set.getHeldMessagesWithStatus(status))
+    ...     for message_id, team_name in held_messages:
+    ...         held_message = hold_set.getMessageByMessageID(message_id)
+    ...         print_hold(held_message)
 
 Here are all the messages pending approval...
 

=== modified file 'lib/lp/registry/doc/personlocation.txt'
--- lib/lp/registry/doc/personlocation.txt	2010-10-17 15:44:08 +0000
+++ lib/lp/registry/doc/personlocation.txt	2010-10-27 02:13:03 +0000
@@ -120,9 +120,9 @@
 have been pre-cached so that we don't hit the database everytime we
 access a person's .location property.
 
-    >>> from lp.services.propertycache import IPropertyCache
+    >>> from lp.services.propertycache import get_property_cache
     >>> for mapped in guadamen.getMappedParticipants():
-    ...     cache = IPropertyCache(mapped)
+    ...     cache = get_property_cache(mapped)
     ...     if ("location" not in cache or
     ...         not verifyObject(IPersonLocation, cache.location)):
     ...         print 'No cached location on %s' % mapped.name

=== modified file 'lib/lp/registry/doc/structural-subscriptions.txt'
--- lib/lp/registry/doc/structural-subscriptions.txt	2010-10-17 15:44:08 +0000
+++ lib/lp/registry/doc/structural-subscriptions.txt	2010-10-27 02:13:03 +0000
@@ -86,117 +86,6 @@
 When notifying subscribers of bug activity, both subscribers to the
 target and to the target's parent are notified.
 
-    >>> from canonical.launchpad.ftests import syncUpdate
-    >>> from lp.registry.enum import BugNotificationLevel
-    >>> from lp.registry.interfaces.structuralsubscription import BlueprintNotificationLevel
-    >>> from lp.bugs.mail.bugnotificationrecipients import (
-    ...     BugNotificationRecipients)
-
-We define some utility functions for printing out bug subscriptions and
-the recipients for the notifications they generate.
-
-    >>> def print_bug_subscribers(bug_subscribers):
-    ...     subscriber_names = sorted(subscriber.name
-    ...                               for subscriber in bug_subscribers)
-    ...     for name in subscriber_names:
-    ...         print name
-    >>> def print_bug_subscriptions(bug_subscriptions):
-    ...     for subscription in bug_subscriptions:
-    ...         print subscription.subscriber.name
-    >>> def print_bug_recipients(recipients):
-    ...     for recipient in recipients:
-    ...         reason = recipients.getReason(recipient)
-    ...         print '%s "%s"' % (recipient.name, reason[1])
-
-Sample person has a subscription to Ubuntu and to the Evolution package
-in Ubuntu. We set the bug notification level for both subscriptions.
-
-    >>> ubuntu_sub.bug_notification_level = BugNotificationLevel.COMMENTS
-    >>> evolution_sub.bug_notification_level = BugNotificationLevel.COMMENTS
-
-`getBugNotificationsRecipients` returns all the bug subscribers to the
-target and its parent, and adds the rationale for the subscriptions to
-the recipients set. Each subscriber is only added once.
-
-    >>> recipients = BugNotificationRecipients()
-    >>> bug_subscribers = evolution_package.getBugNotificationsRecipients(
-    ...     recipients=recipients)
-    >>> print_bug_subscriptions(ubuntu.bug_subscriptions)
-    name12
-    >>> print_bug_subscriptions(evolution_package.bug_subscriptions)
-    name12
-    >>> print_bug_subscribers(bug_subscribers)
-    name12
-    >>> print_bug_recipients(recipients)
-    name12 "Subscriber (evolution in ubuntu)"
-
-Foo Bar subscribes to Ubuntu.
-
-    >>> login('foo.bar@xxxxxxxxxxxxx')
-    >>> foobar_subscription = ubuntu.addBugSubscription(foobar, foobar)
-    >>> recipients = BugNotificationRecipients()
-
-The set of subscribers to the evolution package for ubuntu now includes
-both subscribers to the package, and subscribers to the distribution.
-
-    >>> bug_subscribers = evolution_package.getBugNotificationsRecipients(
-    ...     recipients=recipients)
-    >>> print_bug_recipients(recipients)
-    name16 "Subscriber (Ubuntu)"
-    name12 "Subscriber (evolution in ubuntu)"
-
-We can pass the parameter `level` to getBugNotificationsRecipients().
-Subscribers whose subscription level is lower than the given parameter
-are not returned.
-
-    >>> foobar_subscription.bug_notification_level = (
-    ...     BugNotificationLevel.METADATA)
-    >>> recipients = BugNotificationRecipients()
-    >>> bug_subscribers = evolution_package.getBugNotificationsRecipients(
-    ...     recipients=recipients, level=BugNotificationLevel.COMMENTS)
-    >>> print_bug_recipients(recipients)
-    name12 "Subscriber (evolution in ubuntu)"
-
-We remove Sample Person's bug subscription to the package.
-
-    >>> evolution_sub.blueprint_notification_level = (
-    ...     BlueprintNotificationLevel.METADATA)
-    >>> evolution_package.removeBugSubscription(sampleperson, sampleperson)
-    >>> ubuntu.removeBugSubscription(sampleperson, sampleperson)
-    >>> syncUpdate(evolution_sub)
-
-Sample Person is no longer a subscriber to the package, but Foo Bar
-is still a subscriber, by being subscribed to Ubuntu.
-
-    >>> print_bug_subscribers(
-    ...     evolution_package.getBugNotificationsRecipients(
-    ...         recipients=recipients))
-    name16
-
-A project is the parent of each of its products.
-
-Fireox does not have any subscribers.
-
-    >>> print_bug_subscribers(firefox.getBugNotificationsRecipients())
-
-Mozilla is the parent of Fireox.
-
-    >>> from lp.registry.interfaces.projectgroup import IProjectGroupSet
-    >>> mozilla = getUtility(IProjectGroupSet).getByName('mozilla')
-    >>> print firefox.parent_subscription_target.displayname
-    the Mozilla Project
-
-Foobar subscribes to bug notificatios for Mozilla.
-
-    >>> mozilla.addBugSubscription(foobar, foobar)
-    <StructuralSubscription at ...>
-
-As a result of subscribing to Mozilla, Foobar is now a subscriber of
-Firefox.
-
-    >>> print_bug_subscribers(firefox.getBugNotificationsRecipients())
-    name16
-
 
 Target type display
 ===================

=== modified file 'lib/lp/registry/doc/teammembership.txt'
--- lib/lp/registry/doc/teammembership.txt	2010-10-19 18:44:31 +0000
+++ lib/lp/registry/doc/teammembership.txt	2010-10-27 02:13:03 +0000
@@ -983,8 +983,8 @@
     >>> from canonical.launchpad.interfaces.lpstorm import IMasterObject
     >>> IMasterObject(bad_user.account).status = AccountStatus.SUSPENDED
     >>> IMasterObject(bad_user.preferredemail).status = EmailAddressStatus.OLD
-    >>> from lp.services.propertycache import IPropertyCache
-    >>> del IPropertyCache(removeSecurityProxy(bad_user)).preferredemail
+    >>> from lp.services.propertycache import get_property_cache
+    >>> del get_property_cache(removeSecurityProxy(bad_user)).preferredemail
     >>> transaction.commit()
 
     >>> [m.displayname for m in t3.allmembers]

=== modified file 'lib/lp/registry/errors.py'
--- lib/lp/registry/errors.py	2010-09-30 15:13:57 +0000
+++ lib/lp/registry/errors.py	2010-10-27 02:13:03 +0000
@@ -3,19 +3,20 @@
 
 __metaclass__ = type
 __all__ = [
-    'PrivatePersonLinkageError',
-    'NameAlreadyTaken',
-    'NoSuchDistroSeries',
-    'UserCannotChangeMembershipSilently',
-    'NoSuchSourcePackageName',
     'CannotTransitionToCountryMirror',
     'CountryMirrorAlreadySet',
+    'DeleteSubscriptionError',
+    'JoinNotAllowed',
     'MirrorNotOfficial',
     'MirrorHasNoHTTPURL',
     'MirrorNotProbed',
-    'DeleteSubscriptionError',
+    'NameAlreadyTaken',
+    'NoSuchDistroSeries',
+    'NoSuchSourcePackageName',
+    'PrivatePersonLinkageError',
+    'TeamMembershipTransitionError',
+    'UserCannotChangeMembershipSilently',
     'UserCannotSubscribePerson',
-    'TeamMembershipTransitionError',
     ]
 
 import httplib
@@ -114,3 +115,8 @@
     or an invalid transition (e.g. unicorn).
     """
     webservice_error(httplib.BAD_REQUEST)
+
+
+class JoinNotAllowed(Exception):
+    """User is not allowed to join a given team."""
+    webservice_error(httplib.BAD_REQUEST)

=== modified file 'lib/lp/registry/help/home-page-staging-help.html'
--- lib/lp/registry/help/home-page-staging-help.html	2010-01-12 16:16:27 +0000
+++ lib/lp/registry/help/home-page-staging-help.html	2010-10-27 02:13:03 +0000
@@ -18,8 +18,8 @@
     <p>However, there are a few things to note about staging:</p>
 
     <ul>
-      <li>Every 24 hours, staging's database is replaced with a fresh snapshot of Launchpad's production database: you will lose anything you do on staging.</li>
-      <li>Staging runs the latest bleeding edge code from the Launchpad developers &mdash; if things go wrong, <a target="_blank" href="https://bugs.launchpad.net/launchpad/+filebug";>please let us know</a>.</li>
+      <li>Every week, staging's database is replaced with a fresh snapshot of Launchpad's production database: you will lose anything you do on staging.</li>
+      <li>Staging runs the latest code with changed database models from the Launchpad developers &mdash; if things go wrong, <a target="_blank" href="https://bugs.launchpad.net/launchpad/+filebug";>please let us know</a>.</li>
       <li>You can't create a new account on staging &mdash; instead, create one in <a href="https://launchpad.net/";>Launchpad's production environment</a> and then wait up to 24 hours for your account to be available on staging.</li>
       <li>Staging does not send email.</li>
       <li>You can upload translations and templates but not export them &mdash; uploaded translations/templates will disappear after 24 hours.</li>

=== modified file 'lib/lp/registry/interfaces/distributionsourcepackage.py'
--- lib/lp/registry/interfaces/distributionsourcepackage.py	2010-08-23 16:51:11 +0000
+++ lib/lp/registry/interfaces/distributionsourcepackage.py	2010-10-27 02:13:03 +0000
@@ -176,7 +176,7 @@
             - The latest distroseries wins
             - updates > security > release
 
-        See https://bugs.edge.launchpad.net/soyuz/+bug/236922 for a plan
+        See https://bugs.launchpad.net/soyuz/+bug/236922 for a plan
         on how this criteria will be centrally encoded.
         """)
 

=== modified file 'lib/lp/registry/interfaces/mailinglist.py'
--- lib/lp/registry/interfaces/mailinglist.py	2010-09-17 03:58:05 +0000
+++ lib/lp/registry/interfaces/mailinglist.py	2010-10-27 02:13:03 +0000
@@ -808,6 +808,17 @@
         :rtype: sequence of MessageApproval
         """
 
+    def acknowledgeMessagesWithStatus(status):
+        """Acknowledge all the MessageApprovals with the matching status.
+
+        This changes the statuses APPROVAL_PENDING to APPROVED,
+        REJECTION_PENDING to REJECTED and DISCARD_PENDING to DISCARD.  It is
+        illegal to call this function when the status is not one of these
+        states.
+
+        :param status: A PostedMessageStatus enum value.
+        """
+
 
 class IHeldMessageDetails(Interface):
     """Details on a held message.

=== modified file 'lib/lp/registry/interfaces/person.py'
--- lib/lp/registry/interfaces/person.py	2010-10-07 14:03:32 +0000
+++ lib/lp/registry/interfaces/person.py	2010-10-27 02:13:03 +0000
@@ -26,7 +26,6 @@
     'ITeamReassignment',
     'ImmutableVisibilityError',
     'InvalidName',
-    'JoinNotAllowed',
     'NoSuchPerson',
     'PersonCreationRationale',
     'PersonVisibility',
@@ -2146,10 +2145,6 @@
     """XMLRPC application root for ISoftwareCenterAgentAPI."""
 
 
-class JoinNotAllowed(Exception):
-    """User is not allowed to join a given team."""
-
-
 class ImmutableVisibilityError(Exception):
     """A change in team membership visibility is not allowed."""
 

=== modified file 'lib/lp/registry/interfaces/structuralsubscription.py'
--- lib/lp/registry/interfaces/structuralsubscription.py	2010-10-07 10:06:55 +0000
+++ lib/lp/registry/interfaces/structuralsubscription.py	2010-10-27 02:13:03 +0000
@@ -181,20 +181,6 @@
     def getSubscription(person):
         """Return the subscription for `person`, if it exists."""
 
-    def getBugNotificationsRecipients(recipients=None, level=None):
-        """Return the set of bug subscribers to this target.
-
-        :param recipients: If recipients is not None, a rationale
-            is added for each subscriber.
-        :type recipients: `INotificationRecipientSet`
-        'param level: If level is not None, only strucutral
-            subscribers with a subscrition level greater or equal
-            to the given value are returned.
-        :type level: `BugNotificationLevel`
-        :return: An `INotificationRecipientSet` instance containing
-            the bug subscribers.
-        """
-
     target_type_display = Attribute("The type of the target, for display.")
 
     def userHasBugSubscriptions(user):

=== modified file 'lib/lp/registry/model/distribution.py'
--- lib/lp/registry/model/distribution.py	2010-10-17 09:03:43 +0000
+++ lib/lp/registry/model/distribution.py	2010-10-27 02:13:03 +0000
@@ -151,7 +151,7 @@
     )
 from lp.services.propertycache import (
     cachedproperty,
-    IPropertyCache,
+    get_property_cache,
     )
 from lp.soyuz.enums import (
     ArchivePurpose,
@@ -1752,7 +1752,7 @@
 
         # May wish to add this to the series rather than clearing the cache --
         # RBC 20100816.
-        del IPropertyCache(self).series
+        del get_property_cache(self).series
 
         return series
 

=== modified file 'lib/lp/registry/model/distributionsourcepackage.py'
--- lib/lp/registry/model/distributionsourcepackage.py	2010-09-21 09:37:06 +0000
+++ lib/lp/registry/model/distributionsourcepackage.py	2010-10-27 02:13:03 +0000
@@ -235,7 +235,7 @@
         # latest relevant publication. It relies on ordering of status
         # and pocket enum values, which is arguably evil but much faster
         # than CASE sorting; at any rate this can be fixed when
-        # https://bugs.edge.launchpad.net/soyuz/+bug/236922 is.
+        # https://bugs.launchpad.net/soyuz/+bug/236922 is.
         spph = SourcePackagePublishingHistory.selectFirst("""
             SourcePackagePublishingHistory.distroseries = DistroSeries.id AND
             DistroSeries.distribution = %s AND

=== modified file 'lib/lp/registry/model/distroseriesdifference.py'
--- lib/lp/registry/model/distroseriesdifference.py	2010-09-28 14:42:41 +0000
+++ lib/lp/registry/model/distroseriesdifference.py	2010-10-27 02:13:03 +0000
@@ -41,11 +41,12 @@
     IDistroSeriesDifferenceCommentSource,
     )
 from lp.registry.model.distroseriesdifferencecomment import (
-    DistroSeriesDifferenceComment)
+    DistroSeriesDifferenceComment,
+    )
 from lp.registry.model.sourcepackagename import SourcePackageName
 from lp.services.propertycache import (
     cachedproperty,
-    IPropertyCacheManager,
+    clear_property_cache,
     )
 
 
@@ -189,7 +190,7 @@
         # won't cause a hard-to find bug if a script ever creates a
         # difference, copies/publishes a new version and then calls
         # update() (like the tests for this method do).
-        IPropertyCacheManager(self).clear()
+        clear_property_cache(self)
         self._updateType()
         updated = self._updateVersionsAndStatus()
         return updated

=== modified file 'lib/lp/registry/model/mailinglist.py'
--- lib/lp/registry/model/mailinglist.py	2010-09-17 03:58:05 +0000
+++ lib/lp/registry/model/mailinglist.py	2010-10-27 02:13:03 +0000
@@ -73,6 +73,7 @@
     EmailAddressStatus,
     IEmailAddressSet,
     )
+from canonical.launchpad.interfaces.lpstorm import IMasterStore
 from canonical.launchpad.webapp.interfaces import (
     IStoreSelector,
     MAIN_STORE,
@@ -887,7 +888,34 @@
 
     def getHeldMessagesWithStatus(self, status):
         """See `IMessageApprovalSet`."""
-        return MessageApproval.selectBy(status=status)
+        # Use the master store as the messages will also be acknowledged and
+        # we want to make sure we are acknowledging the same messages that we
+        # iterate over.
+        return IMasterStore(MessageApproval).find(
+            (Message.rfc822msgid, Person.name),
+            MessageApproval.status == status,
+            MessageApproval.message == Message.id,
+            MessageApproval.mailing_list == MailingList.id,
+            MailingList.team == Person.id)
+
+    def acknowledgeMessagesWithStatus(self, status):
+        """See `IMessageApprovalSet`."""
+        transitions = {
+            PostedMessageStatus.APPROVAL_PENDING:
+                PostedMessageStatus.APPROVED,
+            PostedMessageStatus.REJECTION_PENDING:
+                PostedMessageStatus.REJECTED,
+            PostedMessageStatus.DISCARD_PENDING:
+                PostedMessageStatus.DISCARDED,
+            }
+        try:
+            next_state = transitions[status]
+        except KeyError:
+            raise AssertionError(
+                'Not an acknowledgeable state: %s' % status)
+        approvals = IMasterStore(MessageApproval).find(
+            MessageApproval, MessageApproval.status == status)
+        approvals.set(status=next_state)
 
 
 class HeldMessageDetails:

=== modified file 'lib/lp/registry/model/person.py'
--- lib/lp/registry/model/person.py	2010-10-07 14:03:32 +0000
+++ lib/lp/registry/model/person.py	2010-10-27 02:13:03 +0000
@@ -1,7 +1,5 @@
 # Copyright 2009-2010 Canonical Ltd.  This software is licensed under the
 # GNU Affero General Public License version 3 (see the file LICENSE).
-from __future__ import with_statement
-
 # vars() causes W0612
 # pylint: disable-msg=E0611,W0212,W0612,C0322
 
@@ -191,7 +189,10 @@
     HasMergeProposalsMixin,
     HasRequestedReviewsMixin,
     )
-from lp.registry.errors import NameAlreadyTaken
+from lp.registry.errors import (
+    JoinNotAllowed,
+    NameAlreadyTaken,
+    )
 from lp.registry.interfaces.codeofconduct import ISignedCodeOfConductSet
 from lp.registry.interfaces.distribution import IDistribution
 from lp.registry.interfaces.gpg import IGPGKeySet
@@ -217,7 +218,6 @@
     IPerson,
     IPersonSet,
     ITeam,
-    JoinNotAllowed,
     PersonalStanding,
     PersonCreationRationale,
     PersonVisibility,
@@ -262,7 +262,7 @@
 from lp.services.openid.model.openididentifier import OpenIdIdentifier
 from lp.services.propertycache import (
     cachedproperty,
-    IPropertyCache,
+    get_property_cache,
     )
 from lp.services.salesforce.interfaces import (
     ISalesforceVoucherProxy,
@@ -309,7 +309,7 @@
     Note that it performs poorly at least some of the time, and if
     EmailAddress and Person are already being queried, its probably better to
     query Account directly. See bug
-    https://bugs.edge.launchpad.net/launchpad-registry/+bug/615237 for some
+    https://bugs.launchpad.net/launchpad-registry/+bug/615237 for some
     corroborating information.
     """
 
@@ -509,19 +509,19 @@
 
         :raises AttributeError: If the cache doesn't exist.
         """
-        return IPropertyCache(self).languages
+        return get_property_cache(self).languages
 
     def setLanguagesCache(self, languages):
         """Set this person's cached languages.
 
         Order them by name if necessary.
         """
-        IPropertyCache(self).languages = sorted(
+        get_property_cache(self).languages = sorted(
             languages, key=attrgetter('englishname'))
 
     def deleteLanguagesCache(self):
         """Delete this person's cached languages, if it exists."""
-        del IPropertyCache(self).languages
+        del get_property_cache(self).languages
 
     def addLanguage(self, language):
         """See `IPerson`."""
@@ -624,7 +624,7 @@
         """See `ISetLocation`."""
         assert not self.is_team, 'Cannot edit team location.'
         if self.location is None:
-            IPropertyCache(self).location = PersonLocation(
+            get_property_cache(self).location = PersonLocation(
                 person=self, visible=visible)
         else:
             self.location.visible = visible
@@ -643,7 +643,7 @@
             self.location.last_modified_by = user
             self.location.date_last_modified = UTC_NOW
         else:
-            IPropertyCache(self).location = PersonLocation(
+            get_property_cache(self).location = PersonLocation(
                 person=self, time_zone=time_zone, latitude=latitude,
                 longitude=longitude, last_modified_by=user)
 
@@ -1631,7 +1631,7 @@
             if not person:
                 return
             email = column
-            IPropertyCache(person).preferredemail = email
+            get_property_cache(person).preferredemail = email
 
         decorators.append(handleemail)
 
@@ -1645,7 +1645,7 @@
                 column is not None
                 # -- preferred email found
                 and person.preferredemail is not None)
-            IPropertyCache(person).is_valid_person = valid
+            get_property_cache(person).is_valid_person = valid
         decorators.append(handleaccount)
         return dict(
             joins=origins,
@@ -1735,7 +1735,7 @@
 
         def prepopulate_person(row):
             result = row[0]
-            cache = IPropertyCache(result)
+            cache = get_property_cache(result)
             index = 1
             #-- karma caching
             if need_karma:
@@ -1797,7 +1797,7 @@
         result = self._getMembersWithPreferredEmails()
         person_list = []
         for person, email in result:
-            IPropertyCache(person).preferredemail = email
+            get_property_cache(person).preferredemail = email
             person_list.append(person)
         return person_list
 
@@ -1932,7 +1932,7 @@
         # fetches the rows when they're needed.
         locations = self._getMappedParticipantsLocations(limit=limit)
         for location in locations:
-            IPropertyCache(location.person).location = location
+            get_property_cache(location.person).location = location
         participants = set(location.person for location in locations)
         # Cache the ValidPersonCache query for all mapped participants.
         if len(participants) > 0:
@@ -2074,7 +2074,7 @@
         self.account_status = AccountStatus.DEACTIVATED
         self.account_status_comment = comment
         IMasterObject(self.preferredemail).status = EmailAddressStatus.NEW
-        del IPropertyCache(self).preferredemail
+        del get_property_cache(self).preferredemail
         base_new_name = self.name + '-deactivatedaccount'
         self.name = self._ensureNewName(base_new_name)
 
@@ -2464,7 +2464,7 @@
         if email_address is not None:
             email_address.status = EmailAddressStatus.VALIDATED
             email_address.syncUpdate()
-        del IPropertyCache(self).preferredemail
+        del get_property_cache(self).preferredemail
 
     def setPreferredEmail(self, email):
         """See `IPerson`."""
@@ -2501,7 +2501,7 @@
         IMasterObject(email).syncUpdate()
 
         # Now we update our cache of the preferredemail.
-        IPropertyCache(self).preferredemail = email
+        get_property_cache(self).preferredemail = email
 
     @cachedproperty
     def preferredemail(self):
@@ -3075,7 +3075,7 @@
                 # Populate the previously empty 'preferredemail' cached
                 # property, so the Person record is up-to-date.
                 if master_email.status == EmailAddressStatus.PREFERRED:
-                    cache = IPropertyCache(account_person)
+                    cache = get_property_cache(account_person)
                     cache.preferredemail = master_email
                 return account_person
             # There is no associated `Person` to the email `Account`.

=== modified file 'lib/lp/registry/model/product.py'
--- lib/lp/registry/model/product.py	2010-10-07 22:14:07 +0000
+++ lib/lp/registry/model/product.py	2010-10-27 02:13:03 +0000
@@ -71,7 +71,6 @@
     MAIN_STORE,
     )
 from canonical.launchpad.webapp.sorting import sorted_version_numbers
-
 from lp.answers.interfaces.faqtarget import IFAQTarget
 from lp.answers.interfaces.questioncollection import (
     QUESTION_STATUS_DEFAULT_SEARCH,
@@ -161,7 +160,7 @@
 from lp.services.database.prejoin import prejoin
 from lp.services.propertycache import (
     cachedproperty,
-    IPropertyCache,
+    get_property_cache,
     )
 from lp.translations.interfaces.customlanguagecode import (
     IHasCustomLanguageCodes,
@@ -538,7 +537,7 @@
                 purchaser=purchaser,
                 sales_system_id=voucher,
                 whiteboard=whiteboard)
-            IPropertyCache(self).commercial_subscription = subscription
+            get_property_cache(self).commercial_subscription = subscription
         else:
             if current_datetime <= self.commercial_subscription.date_expires:
                 # Extend current subscription.

=== modified file 'lib/lp/registry/model/structuralsubscription.py'
--- lib/lp/registry/model/structuralsubscription.py	2010-10-07 10:06:55 +0000
+++ lib/lp/registry/model/structuralsubscription.py	2010-10-27 02:13:03 +0000
@@ -448,24 +448,6 @@
         return StructuralSubscription.select(
             query, orderBy='Person.displayname', clauseTables=['Person'])
 
-    def getBugNotificationsRecipients(self, recipients=None, level=None):
-        """See `IStructuralSubscriptionTarget`."""
-        if level is None:
-            subscriptions = self.bug_subscriptions
-        else:
-            subscriptions = self.getSubscriptions(
-                min_bug_notification_level=level)
-        subscribers = set(
-            subscription.subscriber for subscription in subscriptions)
-        if recipients is not None:
-            for subscriber in subscribers:
-                recipients.addStructuralSubscriber(subscriber, self)
-        parent = self.parent_subscription_target
-        if parent is not None:
-            subscribers.update(
-                parent.getBugNotificationsRecipients(recipients, level))
-        return subscribers
-
     @property
     def bug_subscriptions(self):
         """See `IStructuralSubscriptionTarget`."""

=== modified file 'lib/lp/registry/tests/mailinglists_helper.py'
--- lib/lp/registry/tests/mailinglists_helper.py	2010-09-20 19:21:57 +0000
+++ lib/lp/registry/tests/mailinglists_helper.py	2010-10-27 02:13:03 +0000
@@ -263,15 +263,10 @@
             mailing_list.transitionToStatus(MailingListStatus.ACTIVE)
         # Simulate acknowledging held messages.
         message_set = getUtility(IMessageApprovalSet)
-        message_ids = set()
         for status in (PostedMessageStatus.APPROVAL_PENDING,
                        PostedMessageStatus.REJECTION_PENDING,
                        PostedMessageStatus.DISCARD_PENDING):
-            for message in message_set.getHeldMessagesWithStatus(status):
-                message_ids.add(message.message_id)
-        for message_id in message_ids:
-            message = message_set.getMessageByMessageID(message_id)
-            message.acknowledge()
+            message_set.acknowledgeMessagesWithStatus(status)
 
 
 mailman = MailmanStub()

=== modified file 'lib/lp/registry/tests/test_add_member.py'
--- lib/lp/registry/tests/test_add_member.py	2010-10-04 19:50:45 +0000
+++ lib/lp/registry/tests/test_add_member.py	2010-10-27 02:13:03 +0000
@@ -3,8 +3,6 @@
 
 """Test team membership changes."""
 
-from __future__ import with_statement
-
 __metaclass__ = type
 
 from canonical.testing.layers import DatabaseFunctionalLayer

=== modified file 'lib/lp/registry/tests/test_distribution.py'
--- lib/lp/registry/tests/test_distribution.py	2010-09-29 14:16:22 +0000
+++ lib/lp/registry/tests/test_distribution.py	2010-10-27 02:13:03 +0000
@@ -18,7 +18,7 @@
 from lp.registry.tests.test_distroseries import (
     TestDistroSeriesCurrentSourceReleases,
     )
-from lp.services.propertycache import IPropertyCache
+from lp.services.propertycache import get_property_cache
 from lp.soyuz.interfaces.distributionsourcepackagerelease import (
     IDistributionSourcePackageRelease,
     )
@@ -84,7 +84,7 @@
         distribution = removeSecurityProxy(
             self.factory.makeDistribution('foo'))
 
-        cache = IPropertyCache(distribution)
+        cache = get_property_cache(distribution)
 
         # Not yet cached.
         self.assertNotIn("series", cache)

=== modified file 'lib/lp/registry/tests/test_distroseries.py'
--- lib/lp/registry/tests/test_distroseries.py	2010-10-04 20:46:55 +0000
+++ lib/lp/registry/tests/test_distroseries.py	2010-10-27 02:13:03 +0000
@@ -3,8 +3,6 @@
 
 """Tests for distroseries."""
 
-from __future__ import with_statement
-
 __metaclass__ = type
 
 import transaction

=== modified file 'lib/lp/registry/tests/test_distroseriesdifference.py'
--- lib/lp/registry/tests/test_distroseriesdifference.py	2010-10-06 18:53:53 +0000
+++ lib/lp/registry/tests/test_distroseriesdifference.py	2010-10-27 02:13:03 +0000
@@ -3,8 +3,6 @@
 
 """Model tests for the DistroSeriesDifference class."""
 
-from __future__ import with_statement
-
 __metaclass__ = type
 
 import unittest
@@ -26,7 +24,7 @@
     IDistroSeriesDifference,
     IDistroSeriesDifferenceSource,
     )
-from lp.services.propertycache import IPropertyCacheManager
+from lp.services.propertycache import get_property_cache
 from lp.soyuz.interfaces.publishing import PackagePublishingStatus
 from lp.testing import (
     person_logged_in,
@@ -419,7 +417,7 @@
         ds_diff.source_pub
         ds_diff.parent_source_pub
 
-        cache = IPropertyCacheManager(ds_diff).cache
+        cache = get_property_cache(ds_diff)
 
         self.assertContentEqual(
             ['source_pub', 'parent_source_pub'], cache)

=== modified file 'lib/lp/registry/tests/test_mailinglist.py'
--- lib/lp/registry/tests/test_mailinglist.py	2010-10-04 19:50:45 +0000
+++ lib/lp/registry/tests/test_mailinglist.py	2010-10-27 02:13:03 +0000
@@ -1,8 +1,6 @@
 # Copyright 2009 Canonical Ltd.  This software is licensed under the
 # GNU Affero General Public License version 3 (see the file LICENSE).
 
-from __future__ import with_statement
-
 __metaclass__ = type
 __all__ = []
 

=== modified file 'lib/lp/registry/tests/test_mailinglistapi.py'
--- lib/lp/registry/tests/test_mailinglistapi.py	2010-10-04 19:50:45 +0000
+++ lib/lp/registry/tests/test_mailinglistapi.py	2010-10-27 02:13:03 +0000
@@ -3,8 +3,6 @@
 
 """Unit tests for the private MailingList API."""
 
-from __future__ import with_statement
-
 __metaclass__ = type
 __all__ = []
 

=== modified file 'lib/lp/registry/tests/test_person.py'
--- lib/lp/registry/tests/test_person.py	2010-09-29 14:16:22 +0000
+++ lib/lp/registry/tests/test_person.py	2010-10-27 02:13:03 +0000
@@ -1,8 +1,6 @@
 # Copyright 2009-2010 Canonical Ltd.  This software is licensed under the
 # GNU Affero General Public License version 3 (see the file LICENSE).
 
-from __future__ import with_statement
-
 __metaclass__ = type
 
 from datetime import datetime

=== modified file 'lib/lp/registry/tests/test_sourcepackage.py'
--- lib/lp/registry/tests/test_sourcepackage.py	2010-08-24 15:29:01 +0000
+++ lib/lp/registry/tests/test_sourcepackage.py	2010-10-27 02:13:03 +0000
@@ -3,8 +3,6 @@
 
 """Unit tests for ISourcePackage implementations."""
 
-from __future__ import with_statement
-
 __metaclass__ = type
 
 import unittest

=== modified file 'lib/lp/registry/tests/test_team_webservice.py'
--- lib/lp/registry/tests/test_team_webservice.py	2010-10-04 20:46:55 +0000
+++ lib/lp/registry/tests/test_team_webservice.py	2010-10-27 02:13:03 +0000
@@ -4,14 +4,20 @@
 __metaclass__ = type
 
 import httplib
-import unittest
+
+from zope.security.proxy import removeSecurityProxy
 
 from lazr.restfulclient.errors import HTTPError
 
 from canonical.testing.layers import DatabaseFunctionalLayer
-from lp.registry.interfaces.person import PersonVisibility
+from lp.registry.interfaces.person import (
+    PersonVisibility,
+    TeamSubscriptionPolicy,
+    )
 from lp.testing import (
     launchpadlib_for,
+    login_person,
+    logout,
     TestCaseWithFactory,
     )
 
@@ -48,5 +54,43 @@
         self.assertEqual(httplib.FORBIDDEN, api_error.response.status)
 
 
-def test_suite():
-    return unittest.TestLoader().loadTestsFromName(__name__)
+class TestTeamJoining(TestCaseWithFactory):
+
+    layer = DatabaseFunctionalLayer
+
+    def test_restricted_rejects_membership(self):
+        # Calling person.join with a team that has a restricted membership
+        # subscription policy should raise an HTTP error with BAD_REQUEST
+        self.person = self.factory.makePerson(name='test-person')
+        self.team = self.factory.makeTeam(name='test-team')
+        login_person(self.team.teamowner)
+        self.team.subscriptionpolicy = TeamSubscriptionPolicy.RESTRICTED
+        logout()
+
+        launchpad = launchpadlib_for("test", self.person)
+        person = launchpad.people['test-person']
+        api_error = self.assertRaises(
+            HTTPError,
+            person.join,
+            team='test-team')
+        self.assertEqual(httplib.BAD_REQUEST, api_error.response.status)
+
+    def test_open_accepts_membership(self):
+        # Calling person.join with a team that has an open membership
+        # subscription policy should add that that user to the team.
+        self.person = self.factory.makePerson(name='test-person')
+        self.team = self.factory.makeTeam(name='test-team')
+        login_person(self.team.teamowner)
+        self.team.subscriptionpolicy = TeamSubscriptionPolicy.OPEN
+        logout()
+
+        launchpad = launchpadlib_for("test", self.person)
+        test_person = launchpad.people['test-person']
+        test_team = launchpad.people['test-team']
+        test_person.join(team=test_team.self_link)
+        login_person(self.team.teamowner)
+        self.assertEqual(
+            ['test-team'],
+            [membership.team.name
+                for membership in self.person.team_memberships])
+        logout()

=== modified file 'lib/lp/registry/tests/test_xmlrpc.py'
--- lib/lp/registry/tests/test_xmlrpc.py	2010-08-25 14:14:58 +0000
+++ lib/lp/registry/tests/test_xmlrpc.py	2010-10-27 02:13:03 +0000
@@ -12,8 +12,8 @@
 from zope.security.proxy import removeSecurityProxy
 
 from canonical.functional import XMLRPCTestTransport
-from canonical.launchpad.interfaces import IPrivateApplication
 from canonical.launchpad.interfaces.account import AccountStatus
+from canonical.launchpad.interfaces.launchpad import IPrivateApplication
 from canonical.launchpad.webapp.servers import LaunchpadTestRequest
 from canonical.testing.layers import LaunchpadFunctionalLayer
 from lp.registry.interfaces.person import (

=== modified file 'lib/lp/registry/xmlrpc/mailinglist.py'
--- lib/lp/registry/xmlrpc/mailinglist.py	2010-10-03 15:30:06 +0000
+++ lib/lp/registry/xmlrpc/mailinglist.py	2010-10-27 02:13:03 +0000
@@ -275,9 +275,8 @@
             (PostedMessageStatus.DISCARD_PENDING, 'discard'),
             )
         for status, disposition in status_dispositions:
-            for held_message in message_set.getHeldMessagesWithStatus(status):
-                held_message.acknowledge()
-                response[held_message.message_id] = (
-                    removeSecurityProxy(held_message.mailing_list.team).name,
-                    disposition)
+            held_messages = message_set.getHeldMessagesWithStatus(status)
+            for message_id, team_name in held_messages:
+                response[message_id] = (team_name, disposition)
+            message_set.acknowledgeMessagesWithStatus(status)
         return response

=== modified file 'lib/lp/scripts/tests/test_garbo.py'
--- lib/lp/scripts/tests/test_garbo.py	2010-10-17 22:51:50 +0000
+++ lib/lp/scripts/tests/test_garbo.py	2010-10-27 02:13:03 +0000
@@ -32,13 +32,8 @@
 from canonical.launchpad.database.message import Message
 from canonical.launchpad.database.oauth import OAuthNonce
 from canonical.launchpad.database.openidconsumer import OpenIDConsumerNonce
-from canonical.launchpad.interfaces import IMasterStore
 from canonical.launchpad.interfaces.emailaddress import EmailAddressStatus
-from lp.scripts.garbo import (
-    DailyDatabaseGarbageCollector,
-    HourlyDatabaseGarbageCollector,
-    OpenIDConsumerAssociationPruner,
-    )
+from canonical.launchpad.interfaces.lpstorm import IMasterStore
 from canonical.launchpad.scripts.logger import QuietFakeLogger
 from canonical.launchpad.scripts.tests import run_script
 from canonical.launchpad.webapp.interfaces import (
@@ -71,6 +66,11 @@
     IPersonSet,
     PersonCreationRationale,
     )
+from lp.scripts.garbo import (
+    DailyDatabaseGarbageCollector,
+    HourlyDatabaseGarbageCollector,
+    OpenIDConsumerAssociationPruner,
+    )
 from lp.services.job.model.job import Job
 from lp.testing import (
     TestCase,
@@ -267,7 +267,8 @@
         machine = self.factory.makeCodeImportMachine()
         requester = self.factory.makePerson()
         # Create 6 code import events for this machine, 3 on each side of 30
-        # days. Use the event set to the extra event data rows get created too.
+        # days. Use the event set to the extra event data rows get created
+        # too.
         event_set = getUtility(ICodeImportEventSet)
         for age in (35, 33, 31, 29, 27, 15):
             event_set.newOnline(

=== modified file 'lib/lp/scripts/utilities/pageperformancereport.py'
--- lib/lp/scripts/utilities/pageperformancereport.py	2010-08-20 20:31:18 +0000
+++ lib/lp/scripts/utilities/pageperformancereport.py	2010-10-27 02:13:03 +0000
@@ -13,7 +13,10 @@
 import re
 import subprocess
 from textwrap import dedent
+import sqlite3
+import tempfile
 import time
+import warnings
 
 import numpy
 import simplejson as json
@@ -24,6 +27,9 @@
 from canonical.launchpad.scripts.logger import log
 from lp.scripts.helpers import LPOptionParser
 
+# We don't care about conversion to nan, they are expected.
+warnings.filterwarnings(
+    'ignore', '.*converting a masked element to nan.', UserWarning)
 
 class Request(zc.zservertracelog.tracereport.Request):
     url = None
@@ -52,19 +58,14 @@
 
     Requests belong to a Category if the URL matches a regular expression.
     """
-    def __init__(self, title, regexp, timeout):
+    def __init__(self, title, regexp):
         self.title = title
         self.regexp = regexp
         self._compiled_regexp = re.compile(regexp, re.I | re.X)
-        self.times = Times(timeout)
-
-    def add(self, request):
-        """Add a request to a Category if it belongs.
-
-        Does nothing if the request does not belong in this Category.
-        """
-        if self._compiled_regexp.search(request.url) is not None:
-            self.times.add(request)
+
+    def match(self, request):
+        """Return true when the request match this category."""
+        return self._compiled_regexp.search(request.url) is not None
 
     def __cmp__(self, other):
         return cmp(self.title.lower(), other.title.lower())
@@ -81,7 +82,6 @@
     mean = 0 # Mean time per hit.
     median = 0 # Median time per hit.
     std = 0 # Standard deviation per hit.
-    var = 0 # Variance per hit.
     ninetyninth_percentile_time = 0
     histogram = None # # Request times histogram.
 
@@ -89,46 +89,16 @@
     mean_sqltime = 0 # Mean time spend waiting for SQL to process.
     median_sqltime = 0 # Median time spend waiting for SQL to process.
     std_sqltime = 0 # Standard deviation of SQL time.
-    var_sqltime = 0 # Variance of SQL time
 
     total_sqlstatements = 0 # Total number of SQL statements issued.
     mean_sqlstatements = 0
     median_sqlstatements = 0
     std_sqlstatements = 0
-    var_sqlstatements = 0
-
-empty_stats = Stats() # Singleton.
-
-
-class Times:
-    """Collection of request times."""
-    def __init__(self, timeout):
-        self.total_hits = 0
-        self.total_time = 0
-        self.request_times = []
-        self.sql_statements = []
-        self.sql_times = []
-        self.ticks = []
-        self.histogram_width = int(1.5*timeout)
-
-    def add(self, request):
-        """Add the application time from the request to the collection."""
-        self.total_hits += 1
-        self.total_time += request.app_seconds
-        self.request_times.append(request.app_seconds)
-        if request.sql_statements is not None:
-            self.sql_statements.append(request.sql_statements)
-        if request.sql_seconds is not None:
-            self.sql_times.append(request.sql_seconds)
-        if request.ticks is not None:
-            self.ticks.append(request.ticks)
-
-    _stats = None
-
-    def stats(self):
-        """Generate statistics about our request times.
-
-        Returns a `Stats` instance.
+
+    def __init__(self, times, timeout):
+        """Compute the stats based on times.
+
+        Times is a list of (app_time, sql_statements, sql_times).
 
         The histogram is a list of request counts per 1 second bucket.
         ie. histogram[0] contains the number of requests taking between 0 and
@@ -136,67 +106,201 @@
         1 and 2 seconds etc. histogram is None if there are no requests in
         this Category.
         """
-        if not self.total_hits:
-            return empty_stats
-
-        if self._stats is not None:
-            return self._stats
-
-        stats = Stats()
-
-        stats.total_hits = self.total_hits
-
-        # Time stats
-        array = numpy.asarray(self.request_times, numpy.float32)
-        stats.total_time = numpy.sum(array)
-        stats.mean = numpy.mean(array)
-        stats.median = numpy.median(array)
-        stats.std = numpy.std(array)
-        stats.var = numpy.var(array)
+        if not times:
+            return
+
+        self.total_hits = len(times)
+
+        # Ignore missing values (-1) in computation.
+        times_array = numpy.ma.masked_values(
+            numpy.asarray(times, dtype=numpy.float32), -1.)
+
+        self.total_time, self.total_sqlstatements, self.total_sqltime = (
+            times_array.sum(axis=0))
+
+        self.mean, self.mean_sqlstatements, self.mean_sqltime = (
+            times_array.mean(axis=0))
+
+        self.median, self.median_sqlstatements, self.median_sqltime = (
+            numpy.median(times_array, axis=0))
+
+        self.std, self.std_sqlstatements, self.std_sqltime = (
+            numpy.std(times_array, axis=0))
+
         # This is an approximation which may not be true: we don't know if we
         # have a std distribution or not. We could just find the 99th
         # percentile by counting. Shock. Horror; however this appears pretty
         # good based on eyeballing things so far - once we're down in the 2-3
         # second range for everything we may want to revisit.
-        stats.ninetyninth_percentile_time = stats.mean + stats.std*3
-        capped_times = (min(a_time, self.histogram_width) for a_time in
-            self.request_times)
-        array = numpy.fromiter(capped_times, numpy.float32,
-            len(self.request_times))
+        self.ninetyninth_percentile_time = self.mean + self.std*3
+
+        histogram_width = int(timeout*1.5)
+        histogram_times = numpy.clip(times_array[:,0], 0, histogram_width)
         histogram = numpy.histogram(
-            array, normed=True,
-            range=(0, self.histogram_width), bins=self.histogram_width)
-        stats.histogram = zip(histogram[1], histogram[0])
-
-        # SQL time stats.
-        array = numpy.asarray(self.sql_times, numpy.float32)
-        stats.total_sqltime = numpy.sum(array)
-        stats.mean_sqltime = numpy.mean(array)
-        stats.median_sqltime = numpy.median(array)
-        stats.std_sqltime = numpy.std(array)
-        stats.var_sqltime = numpy.var(array)
-
-        # SQL query count.
-        array = numpy.asarray(self.sql_statements, numpy.int)
-        stats.total_sqlstatements = int(numpy.sum(array))
-        stats.mean_sqlstatements = numpy.mean(array)
-        stats.median_sqlstatements = numpy.median(array)
-        stats.std_sqlstatements = numpy.std(array)
-        stats.var_sqlstatements = numpy.var(array)
-
-        # Cache for next invocation.
-        self._stats = stats
-        return stats
-
-    def __str__(self):
-        results = self.stats()
-        total, mean, median, std, histogram = results
-        hstr = " ".join("%2d" % v for v in histogram)
-        return "%2.2f %2.2f %2.2f %s" % (
-            total, mean, median, std, hstr)
-
-    def __cmp__(self, b):
-        return cmp(self.total_time, b.total_time)
+            histogram_times, normed=True, range=(0, histogram_width),
+            bins=histogram_width)
+        self.histogram = zip(histogram[1], histogram[0])
+
+
+class SQLiteRequestTimes:
+    """SQLite-based request times computation."""
+
+    def __init__(self, categories, options):
+        if options.db_file is None:
+            fd, self.filename = tempfile.mkstemp(suffix='.db', prefix='ppr')
+            os.close(fd)
+        else:
+            self.filename = options.db_file
+        self.con = sqlite3.connect(self.filename, isolation_level='EXCLUSIVE')
+        log.debug('Using request database %s' % self.filename)
+        # Some speed optimization.
+        self.con.execute('PRAGMA synchronous = off')
+        self.con.execute('PRAGMA journal_mode = off')
+
+        self.categories = categories
+        self.store_all_request = options.pageids or options.top_urls
+        self.timeout = options.timeout
+        self.cur = self.con.cursor()
+
+        # Create the tables, ignore errors about them being already present.
+        try:
+            self.cur.execute('''
+                CREATE TABLE category_request (
+                    category INTEGER,
+                    time REAL,
+                    sql_statements INTEGER,
+                    sql_time REAL)
+                    ''');
+        except sqlite3.OperationalError, e:
+            if 'already exists' in str(e):
+                pass
+            else:
+                raise
+
+        if self.store_all_request:
+            try:
+                self.cur.execute('''
+                    CREATE TABLE request (
+                        pageid TEXT,
+                        url TEXT,
+                        time REAL,
+                        sql_statements INTEGER,
+                        sql_time REAL)
+                        ''');
+            except sqlite3.OperationalError, e:
+                if 'already exists' in str(e):
+                    pass
+                else:
+                    raise
+
+    def add_request(self, request):
+        """Add a request to the cache."""
+        sql_statements = request.sql_statements
+        sql_seconds = request.sql_seconds
+
+        # Store missing value as -1, as it makes dealing with those
+        # easier with numpy.
+        if sql_statements is None:
+            sql_statements = -1
+        if sql_seconds is None:
+            sql_seconds = -1
+        for idx, category in enumerate(self.categories):
+            if category.match(request):
+                self.con.execute(
+                    "INSERT INTO category_request VALUES (?,?,?,?)",
+                    (idx, request.app_seconds, sql_statements, sql_seconds))
+
+        if self.store_all_request:
+            pageid = request.pageid or 'Unknown'
+            self.con.execute(
+                "INSERT INTO request VALUES (?,?,?,?,?)", 
+                (pageid, request.url, request.app_seconds, sql_statements,
+                    sql_seconds))
+
+    def commit(self):
+        """Call commit on the underlying connection."""
+        self.con.commit()
+
+    def get_category_times(self):
+        """Return the times for each category."""
+        category_query = 'SELECT * FROM category_request ORDER BY category'
+
+        empty_stats = Stats([], 0)
+        categories = dict(self.get_times(category_query))
+        return [
+            (category, categories.get(idx, empty_stats))
+            for idx, category in enumerate(self.categories)]
+
+    def get_top_urls_times(self, top_n):
+        """Return the times for the Top URL by total time"""
+        top_url_query = '''
+            SELECT url, time, sql_statements, sql_time
+            FROM request WHERE url IN (
+                SELECT url FROM (SELECT url, sum(time) FROM request
+                    GROUP BY url
+                    ORDER BY sum(time) DESC
+                    LIMIT %d))
+            ORDER BY url
+        ''' % top_n
+        # Sort the result by total time
+        return sorted(
+            self.get_times(top_url_query), key=lambda x: x[1].total_time,
+            reverse=True)
+
+    def get_pageid_times(self):
+        """Return the times for the pageids."""
+        pageid_query = '''
+            SELECT pageid, time, sql_statements, sql_time
+            FROM request
+            ORDER BY pageid
+        '''
+        return self.get_times(pageid_query)
+
+    def get_times(self, query):
+        """Return a list of key, stats based on the query.
+
+        The query should return rows of the form:
+            [key, app_time, sql_statements, sql_times]
+
+        And should be sorted on key.
+        """
+        times = []
+        current_key = None
+        results = []
+        self.cur.execute(query)
+        while True:
+            rows = self.cur.fetchmany()
+            if len(rows) == 0:
+                break
+            for row in rows:
+                # We are encountering a new group...
+                if row[0] != current_key:
+                    # Compute the stats of the previous group
+                    if current_key != None:
+                        results.append(
+                            (current_key, Stats(times, self.timeout)))
+                    # Initialize the new group.
+                    current_key = row[0]
+                    times = []
+
+                times.append(row[1:])
+        # Compute the stats of the last group
+        if current_key != None:
+            results.append((current_key, Stats(times, self.timeout)))
+
+        return results
+
+    def close(self, remove=False):
+        """Close the SQLite connection.
+
+        :param remove: If true, the DB file will be removed.
+        """
+        self.con.close()
+        if remove:
+            log.debug('Deleting request database.')
+            os.unlink(self.filename)
+        else:
+            log.debug('Keeping request database %s.' % self.filename)
 
 
 def main():
@@ -235,13 +339,17 @@
         # Default to 12: the staging timeout.
         default=12, type="int",
         help="The configured timeout value : determines high risk page ids.")
+    parser.add_option(
+        "--db-file", dest="db_file",
+        default=None, metavar="FILE",
+        help="Do not parse the records, generate reports from the DB file.")
 
     options, args = parser.parse_args()
 
     if not os.path.isdir(options.directory):
         parser.error("Directory %s does not exist" % options.directory)
 
-    if len(args) == 0:
+    if len(args) == 0 and options.db_file is None:
         parser.error("At least one zserver tracelog file must be provided")
 
     if options.from_ts is not None and options.until_ts is not None:
@@ -266,7 +374,7 @@
     for option in script_config.options('categories'):
         regexp = script_config.get('categories', option)
         try:
-            categories.append(Category(option, regexp, options.timeout))
+            categories.append(Category(option, regexp))
         except sre_constants.error, x:
             log.fatal("Unable to compile regexp %r (%s)" % (regexp, x))
             return 1
@@ -275,18 +383,23 @@
     if len(categories) == 0:
         parser.error("No data in [categories] section of configuration.")
 
-    pageid_times = {}
-    url_times = {}
-
-    parse(args, categories, pageid_times, url_times, options)
-
-    # Truncate the URL times to the top N.
+    times = SQLiteRequestTimes(categories, options)
+
+    if len(args) > 0:
+        parse(args, times, options)
+        times.commit()
+
+    log.debug('Generating category statistics...')
+    category_times = times.get_category_times()
+
+    pageid_times = []
+    url_times= []
     if options.top_urls:
-        sorted_urls = sorted(
-            ((times, url) for url, times in url_times.items()
-                if times.total_hits > 0), reverse=True)
-        url_times = [(url, times)
-            for times, url in sorted_urls[:options.top_urls]]
+        log.debug('Generating top %d urls statistics...' % options.top_urls)
+        url_times = times.get_top_urls_times(options.top_urls)
+    if options.pageids:
+        log.debug('Generating pageid statistics...')
+        pageid_times = times.get_pageid_times()
 
     def _report_filename(filename):
         return os.path.join(options.directory, filename)
@@ -295,7 +408,7 @@
     if options.categories:
         report_filename = _report_filename('categories.html')
         log.info("Generating %s", report_filename)
-        html_report(open(report_filename, 'w'), categories, None, None)
+        html_report(open(report_filename, 'w'), category_times, None, None)
 
     # Pageid only report.
     if options.pageids:
@@ -313,7 +426,8 @@
     if options.categories and options.pageids:
         report_filename = _report_filename('combined.html')
         html_report(
-            open(report_filename, 'w'), categories, pageid_times, url_times)
+            open(report_filename, 'w'),
+            category_times, pageid_times, url_times)
 
     # Report of likely timeout candidates
     report_filename = _report_filename('timeout-candidates.html')
@@ -322,6 +436,7 @@
         open(report_filename, 'w'), None, pageid_times, None,
         options.timeout - 2)
 
+    times.close(options.db_file is None)
     return 0
 
 
@@ -363,7 +478,7 @@
         *(int(elem) for elem in match.groups() if elem is not None))
 
 
-def parse(tracefiles, categories, pageid_times, url_times, options):
+def parse(tracefiles, times, options):
     requests = {}
     total_requests = 0
     for tracefile in tracefiles:
@@ -444,35 +559,7 @@
                         log.debug("Parsed %d requests", total_requests)
 
                     # Add the request to any matching categories.
-                    if options.categories:
-                        for category in categories:
-                            category.add(request)
-
-                    # Add the request to the times for that pageid.
-                    if options.pageids:
-                        pageid = request.pageid
-                        try:
-                            times = pageid_times[pageid]
-                        except KeyError:
-                            times = Times(options.timeout)
-                            pageid_times[pageid] = times
-                        times.add(request)
-
-                    # Add the request to the times for that URL.
-                    if options.top_urls:
-                        url = request.url
-                        # Hack to remove opstats from top N report. This
-                        # should go into a config file if we end up with
-                        # more pages that need to be ignored because
-                        # they are just noise.
-                        if not (url is None or url.endswith('+opstats')):
-                            try:
-                                times = url_times[url]
-                            except KeyError:
-                                times = Times(options.timeout)
-                                url_times[url] = times
-                            times.add(request)
-
+                    times.add_request(request)
                 else:
                     raise MalformedLine('Unknown record type %s', record_type)
             except MalformedLine, x:
@@ -491,7 +578,6 @@
     elif prefix == 't':
         if len(args) != 4:
             raise MalformedLine("Wrong number of arguments %s" % (args,))
-        request.ticks = int(args[1])
         request.sql_statements = int(args[2])
         request.sql_seconds = float(args[3]) / 1000
     else:
@@ -500,12 +586,12 @@
 
 
 def html_report(
-    outf, categories, pageid_times, url_times,
+    outf, category_times, pageid_times, url_times,
     ninetyninth_percentile_threshold=None):
     """Write an html report to outf.
 
     :param outf: A file object to write the report to.
-    :param categories: Categories to report.
+    :param category_times: The time statistics for categories.
     :param pageid_times: The time statistics for pageids.
     :param url_times: The time statistics for the top XXX urls.
     :param ninetyninth_percentile_threshold: Lower threshold for inclusion of
@@ -575,20 +661,17 @@
 
             <th class="clickable">Mean Time (secs)</th>
             <th class="clickable">Time Standard Deviation</th>
-            <th class="clickable">Time Variance</th>
             <th class="clickable">Median Time (secs)</th>
             <th class="sorttable_nosort">Time Distribution</th>
 
             <th class="clickable">Total SQL Time (secs)</th>
             <th class="clickable">Mean SQL Time (secs)</th>
             <th class="clickable">SQL Time Standard Deviation</th>
-            <th class="clickable">SQL Time Variance</th>
             <th class="clickable">Median SQL Time (secs)</th>
 
             <th class="clickable">Total SQL Statements</th>
             <th class="clickable">Mean SQL Statements</th>
             <th class="clickable">SQL Statement Standard Deviation</th>
-            <th class="clickable">SQL Statement Variance</th>
             <th class="clickable">Median SQL Statements</th>
 
             </tr>
@@ -600,8 +683,7 @@
     # Store our generated histograms to output Javascript later.
     histograms = []
 
-    def handle_times(html_title, times):
-        stats = times.stats()
+    def handle_times(html_title, stats):
         histograms.append(stats.histogram)
         print >> outf, dedent("""\
             <tr>
@@ -611,7 +693,6 @@
             <td class="numeric 99pc_under">%.2f</td>
             <td class="numeric mean_time">%.2f</td>
             <td class="numeric std_time">%.2f</td>
-            <td class="numeric var_time">%.2f</td>
             <td class="numeric median_time">%.2f</td>
             <td>
                 <div class="histogram" id="histogram%d"></div>
@@ -619,30 +700,27 @@
             <td class="numeric total_sqltime">%.2f</td>
             <td class="numeric mean_sqltime">%.2f</td>
             <td class="numeric std_sqltime">%.2f</td>
-            <td class="numeric var_sqltime">%.2f</td>
             <td class="numeric median_sqltime">%.2f</td>
 
-            <td class="numeric total_sqlstatements">%d</td>
+            <td class="numeric total_sqlstatements">%.f</td>
             <td class="numeric mean_sqlstatements">%.2f</td>
             <td class="numeric std_sqlstatements">%.2f</td>
-            <td class="numeric var_sqlstatements">%.2f</td>
             <td class="numeric median_sqlstatements">%.2f</td>
             </tr>
             """ % (
                 html_title,
                 stats.total_hits, stats.total_time,
                 stats.ninetyninth_percentile_time,
-                stats.mean, stats.std, stats.var, stats.median,
+                stats.mean, stats.std, stats.median,
                 len(histograms) - 1,
                 stats.total_sqltime, stats.mean_sqltime,
-                stats.std_sqltime, stats.var_sqltime, stats.median_sqltime,
+                stats.std_sqltime, stats.median_sqltime,
                 stats.total_sqlstatements, stats.mean_sqlstatements,
-                stats.std_sqlstatements, stats.var_sqlstatements,
-                stats.median_sqlstatements))
+                stats.std_sqlstatements, stats.median_sqlstatements))
 
     # Table of contents
     print >> outf, '<ol>'
-    if categories:
+    if category_times:
         print >> outf, '<li><a href="#catrep">Category Report</a></li>'
     if pageid_times:
         print >> outf, '<li><a href="#pageidrep">Pageid Report</a></li>'
@@ -650,22 +728,21 @@
         print >> outf, '<li><a href="#topurlrep">Top URL Report</a></li>'
     print >> outf, '</ol>'
 
-    if categories:
+    if category_times:
         print >> outf, '<h2 id="catrep">Category Report</h2>'
         print >> outf, table_header
-        for category in categories:
+        for category, times in category_times:
             html_title = '%s<br/><span class="regexp">%s</span>' % (
                 html_quote(category.title), html_quote(category.regexp))
-            handle_times(html_title, category.times)
+            handle_times(html_title, times)
         print >> outf, table_footer
 
     if pageid_times:
         print >> outf, '<h2 id="pageidrep">Pageid Report</h2>'
         print >> outf, table_header
-        for pageid, times in sorted(pageid_times.items()):
-            pageid = pageid or 'None'
+        for pageid, times in pageid_times:
             if (ninetyninth_percentile_threshold is not None and
-                (times.stats().ninetyninth_percentile_time <
+                (times.ninetyninth_percentile_time <
                 ninetyninth_percentile_threshold)):
                 continue
             handle_times(html_quote(pageid), times)

=== modified file 'lib/lp/services/configure.zcml'
--- lib/lp/services/configure.zcml	2010-09-12 15:15:16 +0000
+++ lib/lp/services/configure.zcml	2010-10-27 02:13:03 +0000
@@ -3,9 +3,6 @@
 -->
 
 <configure xmlns="http://namespaces.zope.org/zope";>
-  <adapter factory=".propertycache.get_default_cache"/>
-  <adapter factory=".propertycache.PropertyCacheManager"/>
-  <adapter factory=".propertycache.DefaultPropertyCacheManager"/>
   <include package=".comments" />
   <include package=".database" />
   <include package=".features" />

=== modified file 'lib/lp/services/database/tests/test_prejoin.py'
--- lib/lp/services/database/tests/test_prejoin.py	2010-10-03 15:30:06 +0000
+++ lib/lp/services/database/tests/test_prejoin.py	2010-10-27 02:13:03 +0000
@@ -8,7 +8,7 @@
 
 import unittest
 
-from canonical.launchpad.interfaces import IMasterStore
+from canonical.launchpad.interfaces.lpstorm import IMasterStore
 from canonical.testing.layers import LaunchpadZopelessLayer
 from lp.registry.model.person import Person
 from lp.registry.model.product import Product

=== modified file 'lib/lp/services/doc/propertycache.txt'
--- lib/lp/services/doc/propertycache.txt	2010-09-06 09:11:43 +0000
+++ lib/lp/services/doc/propertycache.txt	2010-10-27 02:13:03 +0000
@@ -3,8 +3,9 @@
 
     >>> from lp.services.propertycache import (
     ...     cachedproperty,
+    ...     clear_property_cache,
+    ...     get_property_cache,
     ...     IPropertyCache,
-    ...     IPropertyCacheManager,
     ...     )
 
 Cached properties are for situations where a property is computed once
@@ -20,9 +21,19 @@
 
     >>> foo = Foo()
 
-The property cache can be obtained via adaption.
-
-    >>> cache = IPropertyCache(foo)
+The property cache can be obtained with `get_property_cache()`.
+
+    >>> cache = get_property_cache(foo)
+
+Calling `get_property_cache()` on a cache returns the cache:
+
+    >>> get_property_cache(cache) is cache
+    True
+
+Caches provide the `IPropertyCache` interface.
+
+    >>> IPropertyCache.providedBy(cache)
+    True
 
 Initially it is empty. Caches can be iterated over to reveal the names
 of the values cached within.
@@ -82,25 +93,27 @@
     >>> del cache.bar
     >>> del cache.bar
 
-A cache manager can be used to empty the cache.
-
-    >>> manager = IPropertyCacheManager(cache)
+The cache can be cleared with `clear_property_cache()`.
 
     >>> cache.bar = 123
     >>> cache.baz = 456
     >>> sorted(cache)
     ['bar', 'baz']
 
-    >>> manager.clear()
-    >>> list(cache)
-    []
-
-A cache manager can be obtained by adaption from non-cache objects
-too.
-
-    >>> manager = IPropertyCacheManager(foo)
-    >>> manager.cache is cache
-    True
+    >>> clear_property_cache(cache)
+    >>> list(cache)
+    []
+
+For convenience, the property cache for an object can also be cleared
+by passing the object itself into `clear_property_cache()`.
+
+    >>> cache.bar = 123
+    >>> list(cache)
+    ['bar']
+
+    >>> clear_property_cache(foo)
+    >>> list(cache)
+    []
 
 
 The cachedproperty decorator
@@ -134,7 +147,7 @@
 
     >>> foo.a
     1234
-    >>> IPropertyCache(foo).a_in_cache
+    >>> get_property_cache(foo).a_in_cache
     1234
 
 `b` was defined without an explicit name so it is known as "b" in the
@@ -149,5 +162,5 @@
 
     >>> foo.b
     5678
-    >>> IPropertyCache(foo).b
+    >>> get_property_cache(foo).b
     5678

=== modified file 'lib/lp/services/features/__init__.py'
--- lib/lp/services/features/__init__.py	2010-09-29 08:36:22 +0000
+++ lib/lp/services/features/__init__.py	2010-10-27 02:13:03 +0000
@@ -27,7 +27,7 @@
 Flags are defined by a I{name} that typically looks like a Python
 identifier, for example C{notification.global.text}.  A definition is
 given for a particular I{scope}, which also looks like a dotted identifier,
-for example C{user.beta} or C{server.edge}.  This is just a naming
+for example C{user.beta} or C{server.lpnet}.  This is just a naming
 convention, and they do not need to correspond to Python modules.
 
 The value is stored in the database as just a Unicode string, and it might

=== modified file 'lib/lp/services/features/browser/tests/test_feature_editor.py'
--- lib/lp/services/features/browser/tests/test_feature_editor.py	2010-09-29 07:37:38 +0000
+++ lib/lp/services/features/browser/tests/test_feature_editor.py	2010-10-27 02:13:03 +0000
@@ -6,37 +6,18 @@
 __metaclass__ = type
 
 
-from testtools.matchers import (
-    Equals,
-    )
-
+from testtools.matchers import Equals
 from zope.component import getUtility
 from zope.security.interfaces import Unauthorized
 
-from canonical.launchpad.interfaces import (
-    ILaunchpadCelebrities,
-    ILaunchpadRoot,
-    )
+from canonical.launchpad.interfaces.launchpad import ILaunchpadCelebrities
 from canonical.launchpad.webapp import canonical_url
+from canonical.launchpad.webapp.interfaces import ILaunchpadRoot
 from canonical.testing.layers import DatabaseFunctionalLayer
+from lp.services.features.rulesource import StormFeatureRuleSource
 from lp.testing import (
     BrowserTestCase,
-    TestCase,
-    TestCaseWithFactory,
-    celebrity_logged_in,
-    login_person,
     person_logged_in,
-    time_counter,
-    )
-
-from lp.services.features import (
-    get_relevant_feature_controller,
-    )
-from lp.services.features.browser.edit import (
-    FeatureControlView,
-    )
-from lp.services.features.rulesource import (
-    StormFeatureRuleSource,
     )
 
 
@@ -46,7 +27,7 @@
 
     def getUserBrowserAsTeamMember(self, teams):
         """Make a TestBrowser authenticated as a team member.
-        
+
         :param teams: List of teams to add the new user to.
         """
         # XXX MartinPool 2010-09-23 bug=646563: To make a UserBrowser, you
@@ -83,7 +64,7 @@
 
     def test_feature_page_from_database(self):
         StormFeatureRuleSource().setAllRules([
-            ('ui.icing', 'default',   100, u'3.0'),
+            ('ui.icing', 'default', 100, u'3.0'),
             ('ui.icing', 'beta_user', 300, u'4.0'),
             ])
         browser = self.getUserBrowserAsAdmin()

=== modified file 'lib/lp/services/features/tests/test_db_settings.py'
--- lib/lp/services/features/tests/test_db_settings.py	2010-08-20 20:31:18 +0000
+++ lib/lp/services/features/tests/test_db_settings.py	2010-10-27 02:13:03 +0000
@@ -4,7 +4,6 @@
 """Tests for feature settings coming from the database"""
 
 
-from __future__ import with_statement
 __metaclass__ = type
 
 from canonical.testing import layers

=== modified file 'lib/lp/services/features/tests/test_flags.py'
--- lib/lp/services/features/tests/test_flags.py	2010-09-28 09:29:46 +0000
+++ lib/lp/services/features/tests/test_flags.py	2010-10-27 02:13:03 +0000
@@ -4,7 +4,6 @@
 """Tests for feature flags."""
 
 
-from __future__ import with_statement
 __metaclass__ = type
 
 import os

=== modified file 'lib/lp/services/features/webapp.py'
--- lib/lp/services/features/webapp.py	2010-09-25 09:27:30 +0000
+++ lib/lp/services/features/webapp.py	2010-10-27 02:13:03 +0000
@@ -25,7 +25,7 @@
 
         Currently supports the following scopes:
          - default
-         - is_edge/is_lpnet etc (thunks through to the config)
+         - server.lpnet etc (thunks through to the config is_lpnet)
          - pageid:
            This scope works on a namespace model: for a page
            with pageid SomeType:+view#subselector

=== modified file 'lib/lp/services/geoip/doc/geoip.txt'
--- lib/lp/services/geoip/doc/geoip.txt	2010-09-11 19:54:26 +0000
+++ lib/lp/services/geoip/doc/geoip.txt	2010-10-27 02:13:03 +0000
@@ -108,5 +108,5 @@
     -23...
     >>> geoip_request.longitude
     -45...
-    >>> geoip_request.time_zone
-    'Brazil/Acre'
+    >>> geoip_request.time_zone in ('Brazil/Acre', 'America/Sao_Paulo')
+    True

=== modified file 'lib/lp/services/job/runner.py'
--- lib/lp/services/job/runner.py	2010-10-07 16:51:23 +0000
+++ lib/lp/services/job/runner.py	2010-10-27 02:13:03 +0000
@@ -4,7 +4,6 @@
 """Facilities for running Jobs."""
 
 
-from __future__ import with_statement
 __metaclass__ = type
 
 

=== modified file 'lib/lp/services/job/tests/test_runner.py'
--- lib/lp/services/job/tests/test_runner.py	2010-10-06 18:53:53 +0000
+++ lib/lp/services/job/tests/test_runner.py	2010-10-27 02:13:03 +0000
@@ -4,8 +4,6 @@
 """Tests for job-running facilities."""
 
 
-from __future__ import with_statement
-
 import sys
 from time import sleep
 from unittest import TestLoader

=== modified file 'lib/lp/services/log/tests/test_uniquefileallocator.py'
--- lib/lp/services/log/tests/test_uniquefileallocator.py	2010-10-13 16:35:06 +0000
+++ lib/lp/services/log/tests/test_uniquefileallocator.py	2010-10-27 02:13:03 +0000
@@ -3,7 +3,6 @@
 
 """Tests for the unique log naming facility."""
 
-from __future__ import with_statement
 __metaclass__ = type
 
 import datetime

=== modified file 'lib/lp/services/mail/tests/incomingmail.txt'
--- lib/lp/services/mail/tests/incomingmail.txt	2010-10-18 22:24:59 +0000
+++ lib/lp/services/mail/tests/incomingmail.txt	2010-10-27 02:13:03 +0000
@@ -119,7 +119,7 @@
     Apologies for the inconvenience.
     <BLANKLINE>
     If this is blocking your work, please file a question at
-    https://answers.edge.launchpad.net/launchpad/+addquestion
+    https://answers.launchpad.net/launchpad/+addquestion
     and include the error ID OOPS-... in the description.
     ...
     From: Sample Person <test@xxxxxxxxxxxxx>
@@ -256,7 +256,7 @@
     Apologies for the inconvenience.
     <BLANKLINE>
     If this is blocking your work, please file a question at
-    https://answers.edge.launchpad.net/launchpad/+addquestion
+    https://answers.launchpad.net/launchpad/+addquestion
     and include the error ID OOPS-...TEMAIL... in the description.
     ...
     From: Foo Bar <foo.bar@xxxxxxxxxxxxx>
@@ -301,7 +301,7 @@
     Apologies for the inconvenience.
     <BLANKLINE>
     If this is blocking your work, please file a question at
-    https://answers.edge.launchpad.net/launchpad/+addquestion
+    https://answers.launchpad.net/launchpad/+addquestion
     and include the error ID OOPS-...TEMAIL... in the description.
     ...
     From: Foo Bar <foo.bar@xxxxxxxxxxxxx>

=== modified file 'lib/lp/services/mailman/tests/test_lpmoderate.py'
--- lib/lp/services/mailman/tests/test_lpmoderate.py	2010-10-06 11:46:51 +0000
+++ lib/lp/services/mailman/tests/test_lpmoderate.py	2010-10-27 02:13:03 +0000
@@ -2,8 +2,6 @@
 # GNU Affero General Public License version 3 (see the file LICENSE).
 """Test the lpmoderate monekypatches"""
 
-from __future__ import with_statement
-
 __metaclass__ = type
 __all__ = []
 

=== modified file 'lib/lp/services/memcache/doc/tales-cache.txt'
--- lib/lp/services/memcache/doc/tales-cache.txt	2010-10-18 22:24:59 +0000
+++ lib/lp/services/memcache/doc/tales-cache.txt	2010-10-27 02:13:03 +0000
@@ -32,8 +32,7 @@
 
 We also see some comments showing that we had a cache hit, and what the
 configuration was for it ("public").  These comments are only present
-when Launchpad's config is not configured as edge or production (``is_edge``
-or ``is_lpnet``, respectively).
+when Launchpad's config is not configured as production (``is_lpnet``).
 
 If we clear the cache, it will be rendered as expected.
 

=== modified file 'lib/lp/services/memcache/tales.py'
--- lib/lp/services/memcache/tales.py	2010-09-12 05:52:41 +0000
+++ lib/lp/services/memcache/tales.py	2010-10-27 02:13:03 +0000
@@ -274,7 +274,7 @@
         self._memcache_expr = memcache_expr
 
     def __call__(self, value):
-        if not config.launchpad.is_lpnet and not config.launchpad.is_edge:
+        if not config.launchpad.is_lpnet:
             # For debugging and testing purposes, prepend a description of
             # the memcache expression used to the stored value.
             rule = '%s [%s seconds]' % (self._memcache_expr, self._max_age)

=== modified file 'lib/lp/services/propertycache.py'
--- lib/lp/services/propertycache.py	2010-09-02 12:54:05 +0000
+++ lib/lp/services/propertycache.py	2010-10-27 02:13:03 +0000
@@ -10,24 +10,17 @@
 
 __metaclass__ = type
 __all__ = [
-    'IPropertyCache',
-    'IPropertyCacheManager',
     'cachedproperty',
+    'clear_property_cache',
+    'get_property_cache',
     ]
 
 from functools import partial
 
-from zope.component import (
-    adapter,
-    adapts,
-    getGlobalSiteManager,
-    )
 from zope.interface import (
-    implementer,
     implements,
     Interface,
     )
-from zope.schema import Object
 from zope.security.proxy import removeSecurityProxy
 
 
@@ -55,19 +48,6 @@
         """Iterate over the cached names."""
 
 
-class IPropertyCacheManager(Interface):
-
-    cache = Object(IPropertyCache)
-
-    def clear():
-        """Empty the cache."""
-
-
-# Register adapters with the global site manager so that they work even when
-# ZCML has not been executed.
-registerAdapter = getGlobalSiteManager().registerAdapter
-
-
 class DefaultPropertyCache:
     """A simple cache."""
 
@@ -89,56 +69,22 @@
         return iter(self.__dict__)
 
 
-@adapter(Interface)
-@implementer(IPropertyCache)
-def get_default_cache(target):
-    """Adapter to obtain a `DefaultPropertyCache` for any object."""
-    naked_target = removeSecurityProxy(target)
-    try:
-        return naked_target._property_cache
-    except AttributeError:
-        naked_target._property_cache = DefaultPropertyCache()
-        return naked_target._property_cache
-
-registerAdapter(get_default_cache)
-
-
-class PropertyCacheManager:
-    """A simple `IPropertyCacheManager`.
-
-    Should work for any `IPropertyCache` instance.
-    """
-
-    implements(IPropertyCacheManager)
-    adapts(Interface)
-
-    def __init__(self, target):
-        self.cache = IPropertyCache(target)
-
-    def clear(self):
-        """See `IPropertyCacheManager`."""
-        for name in list(self.cache):
-            delattr(self.cache, name)
-
-registerAdapter(PropertyCacheManager)
-
-
-class DefaultPropertyCacheManager:
-    """A `IPropertyCacheManager` specifically for `DefaultPropertyCache`.
-
-    The implementation of `clear` is more efficient.
-    """
-
-    implements(IPropertyCacheManager)
-    adapts(DefaultPropertyCache)
-
-    def __init__(self, cache):
-        self.cache = cache
-
-    def clear(self):
-        self.cache.__dict__.clear()
-
-registerAdapter(DefaultPropertyCacheManager)
+def get_property_cache(target):
+    """Obtain a `DefaultPropertyCache` for any object."""
+    if IPropertyCache.providedBy(target):
+        return target
+    else:
+        naked_target = removeSecurityProxy(target)
+        try:
+            return naked_target._property_cache
+        except AttributeError:
+            naked_target._property_cache = DefaultPropertyCache()
+            return naked_target._property_cache
+
+
+def clear_property_cache(target):
+    """Clear the property cache."""
+    get_property_cache(target).__dict__.clear()
 
 
 class CachedProperty:
@@ -163,7 +109,7 @@
     def __get__(self, instance, cls):
         if instance is None:
             return self
-        cache = IPropertyCache(instance)
+        cache = get_property_cache(instance)
         try:
             return getattr(cache, self.name)
         except AttributeError:
@@ -184,32 +130,3 @@
         name = name_or_function.__name__
         populate = name_or_function
         return CachedProperty(name=name, populate=populate)
-
-
-# XXX: GavinPanella 2010-09-02 bug=628762: There are some weird adaption
-# failures when running the full test suite. All that follows is a temporary,
-# mostly non-Zope, workaround.
-
-_IPropertyCache = IPropertyCache
-_IPropertyCacheManager = IPropertyCacheManager
-
-def IPropertyCache(target):
-    """Return the `IPropertyCache` for `target`.
-
-    Note: this is a workaround; see bug 628762.
-    """
-    if _IPropertyCache.providedBy(target):
-        return target
-    else:
-        return get_default_cache(target)
-
-def IPropertyCacheManager(target):
-    """Return the `IPropertyCacheManager` for `target`.
-
-    Note: this is a workaround; see bug 628762.
-    """
-    cache = IPropertyCache(target)
-    if isinstance(cache, DefaultPropertyCache):
-        return DefaultPropertyCacheManager(cache)
-    else:
-        return PropertyCacheManager(cache)

=== modified file 'lib/lp/services/spriteutils.py'
--- lib/lp/services/spriteutils.py	2010-08-20 20:31:18 +0000
+++ lib/lp/services/spriteutils.py	2010-10-27 02:13:03 +0000
@@ -6,8 +6,6 @@
 
 """Library to create sprites."""
 
-from __future__ import with_statement
-
 __metaclass__ = type
 
 __all__ = [

=== modified file 'lib/lp/services/utils.py'
--- lib/lp/services/utils.py	2010-08-20 20:31:18 +0000
+++ lib/lp/services/utils.py	2010-10-27 02:13:03 +0000
@@ -1,8 +1,6 @@
 # Copyright 2009 Canonical Ltd.  This software is licensed under the
 # GNU Affero General Public License version 3 (see the file LICENSE).
 
-from __future__ import with_statement
-
 """Generic Python utilities.
 
 Functions, lists and so forth. Nothing here that does system calls or network

=== modified file 'lib/lp/shipit.py'
--- lib/lp/shipit.py	2010-08-24 15:28:02 +0000
+++ lib/lp/shipit.py	2010-10-27 02:13:03 +0000
@@ -108,7 +108,7 @@
 from lp.services.mail.sendmail import simple_sendmail
 from lp.services.propertycache import (
     cachedproperty,
-    IPropertyCache,
+    get_property_cache,
     )
 from lp.services.scripts.base import (
     LaunchpadCronScript,
@@ -126,5 +126,3 @@
     )
 from lp.testing.factory import LaunchpadObjectFactory
 from lp.testing.publication import get_request_and_publication
-
-

=== modified file 'lib/lp/soyuz/browser/tests/test_archive_webservice.py'
--- lib/lp/soyuz/browser/tests/test_archive_webservice.py	2010-10-04 19:50:45 +0000
+++ lib/lp/soyuz/browser/tests/test_archive_webservice.py	2010-10-27 02:13:03 +0000
@@ -1,8 +1,6 @@
 # Copyright 2010 Canonical Ltd.  This software is licensed under the
 # GNU Affero General Public License version 3 (see the file LICENSE).
 
-from __future__ import with_statement
-
 __metaclass__ = type
 
 import unittest

=== modified file 'lib/lp/soyuz/browser/tests/test_builder_views.py'
--- lib/lp/soyuz/browser/tests/test_builder_views.py	2010-10-04 19:50:45 +0000
+++ lib/lp/soyuz/browser/tests/test_builder_views.py	2010-10-27 02:13:03 +0000
@@ -34,7 +34,7 @@
         return view
 
     def test_posting_form_doesnt_call_slave_xmlrpc(self):
-        # Posting the +edit for should not call is_available, which
+        # Posting the +edit for should not call isAvailable, which
         # would do xmlrpc to a slave builder and is explicitly forbidden
         # in a webapp process.
         view = self.initialize_view()

=== removed file 'lib/lp/soyuz/doc/buildd-dispatching.txt'
--- lib/lp/soyuz/doc/buildd-dispatching.txt	2010-10-18 22:24:59 +0000
+++ lib/lp/soyuz/doc/buildd-dispatching.txt	1970-01-01 00:00:00 +0000
@@ -1,371 +0,0 @@
-= Buildd Dispatching =
-
-    >>> import transaction
-    >>> import logging
-    >>> logger = logging.getLogger()
-    >>> logger.setLevel(logging.DEBUG)
-
-The buildd dispatching basically consists of finding a available
-slave in IDLE state, pushing any required files to it, then requesting
-that it starts the build procedure. These tasks are implemented by the
-BuilderSet and Builder classes.
-
-Setup the test builder:
-
-    >>> from canonical.buildd.tests import BuilddSlaveTestSetup
-    >>> fixture = BuilddSlaveTestSetup()
-    >>> fixture.setUp()
-
-Setup a suitable chroot for Hoary i386:
-
-    >>> from StringIO import StringIO
-    >>> from canonical.librarian.interfaces import ILibrarianClient
-    >>> librarian_client = getUtility(ILibrarianClient)
-
-    >>> content = 'anything'
-    >>> alias_id = librarian_client.addFile(
-    ...    'foo.tar.gz', len(content), StringIO(content), 'text/plain')
-
-    >>> from canonical.launchpad.interfaces.librarian import ILibraryFileAliasSet
-    >>> from lp.registry.interfaces.distribution import IDistributionSet
-    >>> from lp.registry.interfaces.pocket import PackagePublishingPocket
-
-    >>> hoary = getUtility(IDistributionSet)['ubuntu']['hoary']
-    >>> hoary_i386 = hoary['i386']
-
-    >>> chroot = getUtility(ILibraryFileAliasSet)[alias_id]
-    >>> pc = hoary_i386.addOrUpdateChroot(chroot=chroot)
-
-Activate builders present in sampledata, we need to be logged in as a
-member of launchpad-buildd-admin:
-
-    >>> from canonical.launchpad.ftests import login
-    >>> login('celso.providelo@xxxxxxxxxxxxx')
-
-Set IBuilder.builderok of all present builders:
-
-    >>> from lp.buildmaster.interfaces.builder import IBuilderSet
-    >>> builder_set = getUtility(IBuilderSet)
-
-    >>> builder_set.count()
-    2
-
-    >>> from canonical.launchpad.ftests import syncUpdate
-    >>> for b in builder_set:
-    ...     b.builderok = True
-    ...     syncUpdate(b)
-
-Clean up previous BuildQueue results from sampledata:
-
-    >>> from lp.buildmaster.interfaces.buildqueue import IBuildQueueSet
-    >>> lost_job = getUtility(IBuildQueueSet).get(1)
-    >>> lost_job.builder.name
-    u'bob'
-    >>> lost_job.destroySelf()
-    >>> transaction.commit()
-
-If the specified buildd slave reset command (used inside resumeSlaveHost())
-fails, the slave will still be marked as failed.
-
-    >>> from canonical.config import config
-    >>> reset_fail_config = '''
-    ...     [builddmaster]
-    ...     vm_resume_command: /bin/false'''
-    >>> config.push('reset fail', reset_fail_config)
-    >>> frog_builder = builder_set['frog']
-    >>> frog_builder.handleTimeout(logger, 'The universe just collapsed')
-    WARNING:root:Resetting builder: http://localhost:9221/ -- The universe just collapsed
-    ...
-    WARNING:root:Failed to reset builder: http://localhost:9221/ -- Resuming failed:
-    ...
-    WARNING:root:Disabling builder: http://localhost:9221/ -- The universe just collapsed
-    ...
-    <BLANKLINE>
-
-Since we were unable to reset the 'frog' builder it was marked as 'failed'.
-
-    >>> frog_builder.builderok
-    False
-
-Restore default value for resume command.
-
-    >>> ignored_config = config.pop('reset fail')
-
-The 'bob' builder is available for build jobs.
-
-    >>> bob_builder = builder_set['bob']
-    >>> bob_builder.name
-    u'bob'
-    >>> bob_builder.virtualized
-    False
-    >>> bob_builder.is_available
-    True
-    >>> bob_builder.builderok
-    True
-
-
-== Builder dispatching API ==
-
-Now let's check the build candidates which will be considered for the
-builder 'bob':
-
-    >>> from zope.security.proxy import removeSecurityProxy
-    >>> job = removeSecurityProxy(bob_builder)._findBuildCandidate()
-
-The single BuildQueue found is a non-virtual pending build:
-
-    >>> job.id
-    2
-    >>> from lp.soyuz.interfaces.binarypackagebuild import (
-    ...     IBinaryPackageBuildSet)
-    >>> build = getUtility(IBinaryPackageBuildSet).getByQueueEntry(job)
-    >>> build.status.name
-    'NEEDSBUILD'
-    >>> job.builder is None
-    True
-    >>> job.date_started is None
-    True
-    >>> build.is_virtualized
-    False
-
-The build start time is not set yet either.
-
-    >>> print build.date_first_dispatched
-    None
-
-Update the SourcePackageReleaseFile corresponding to this job:
-
-    >>> content = 'anything'
-    >>> alias_id = librarian_client.addFile(
-    ...    'foo.dsc', len(content), StringIO(content), 'application/dsc')
-
-    >>> sprf = build.source_package_release.files[0]
-    >>> naked_sprf = removeSecurityProxy(sprf)
-    >>> naked_sprf.libraryfile = getUtility(ILibraryFileAliasSet)[alias_id]
-    >>> flush_database_updates()
-
-Check the dispatching method itself:
-
-    >>> dispatched_job = bob_builder.findAndStartJob()
-    >>> job == dispatched_job
-    True
-    >>> bob_builder.builderok = True
-
-    >>> flush_database_updates()
-
-Verify if the job (BuildQueue) was updated appropriately:
-
-    >>> job.builder.id == bob_builder.id
-    True
-
-    >>> dispatched_build = getUtility(
-    ...     IBinaryPackageBuildSet).getByQueueEntry(job)
-    >>> dispatched_build == build
-    True
-
-    >>> build.status.name
-    'BUILDING'
-
-Shutdown builder, mark the build record as failed and remove the
-buildqueue record, so the build was eliminated:
-
-    >>> fixture.tearDown()
-
-    >>> from lp.buildmaster.enums import BuildStatus
-    >>> build.status = BuildStatus.FAILEDTOBUILD
-    >>> job.destroySelf()
-    >>> flush_database_updates()
-
-
-== PPA build dispatching ==
-
-Create a new Build record of the same source targeted for a PPA archive:
-
-    >>> from lp.registry.interfaces.person import IPersonSet
-    >>> cprov = getUtility(IPersonSet).getByName('cprov')
-
-    >>> ppa_build = sprf.sourcepackagerelease.createBuild(
-    ...     hoary_i386, PackagePublishingPocket.RELEASE, cprov.archive)
-
-Create BuildQueue record and inspect some parameters:
-
-    >>> ppa_job = ppa_build.queueBuild()
-    >>> ppa_job.id
-    3
-    >>> ppa_job.builder == None
-    True
-    >>> ppa_job.date_started == None
-    True
-
-The build job's archive requires virtualized builds.
-
-    >>> build = getUtility(IBinaryPackageBuildSet).getByQueueEntry(ppa_job)
-    >>> build.archive.require_virtualized
-    True
-
-But the builder is not virtualized.
-
-    >>> bob_builder.virtualized
-    False
-
-Hence, the builder will not be able to pick up the PPA build job created
-above.
-
-    >>> bob_builder.vm_host = 'localhost.ppa'
-    >>> syncUpdate(bob_builder)
-
-    >>> job = removeSecurityProxy(bob_builder)._findBuildCandidate()
-    >>> print job
-    None
-
-In order to enable 'bob' to find and build the PPA job, we have to
-change it to virtualized.  This is because PPA builds will only build
-on virtualized builders.  We also need to make sure this build's source
-is published, or it will also be ignored (by superseding it).  We can
-do this by copying the existing publication in Ubuntu.
-
-    >>> from lp.soyuz.model.publishing import (
-    ...     SourcePackagePublishingHistory)
-    >>> [old_pub] = SourcePackagePublishingHistory.selectBy(
-    ...    distroseries=build.distro_series,
-    ...    sourcepackagerelease=build.source_package_release)
-    >>> new_pub = old_pub.copyTo(
-    ...     old_pub.distroseries, old_pub.pocket, build.archive)
-
-    >>> bob_builder.virtualized = True
-    >>> syncUpdate(bob_builder)
-
-    >>> job = removeSecurityProxy(bob_builder)._findBuildCandidate()
-    >>> ppa_job.id == job.id
-    True
-
-For further details regarding IBuilder._findBuildCandidate() please see
-lib/lp/soyuz/tests/test_builder.py.
-
-Start buildd-slave to be able to dispatch jobs.
-
-    >>> fixture = BuilddSlaveTestSetup()
-    >>> fixture.setUp()
-
-Before dispatching we can check if the builder is protected against
-mistakes in code that results in a attempt to build a virtual job in
-a non-virtual build.
-
-    >>> bob_builder.virtualized = False
-    >>> flush_database_updates()
-    >>> removeSecurityProxy(bob_builder)._dispatchBuildCandidate(ppa_job)
-    Traceback (most recent call last):
-    ...
-    AssertionError: Attempt to build non-virtual item on a virtual builder.
-
-Mark the builder as virtual again, so we can dispatch the ppa job
-successfully.
-
-    >>> bob_builder.virtualized = True
-    >>> flush_database_updates()
-
-    >>> dispatched_job = bob_builder.findAndStartJob()
-    >>> ppa_job == dispatched_job
-    True
-
-    >>> flush_database_updates()
-
-PPA job is building.
-
-    >>> ppa_job.builder.name
-    u'bob'
-
-    >>> build.status.name
-    'BUILDING'
-
-Shutdown builder slave, mark the ppa build record as failed, remove the
-buildqueue record and make 'bob' builder non-virtual again,  so the
-environment is back to the initial state.
-
-    >>> fixture.tearDown()
-
-    >>> build.status = BuildStatus.FAILEDTOBUILD
-    >>> ppa_job.destroySelf()
-    >>> bob_builder.virtualized = False
-    >>> flush_database_updates()
-
-
-== Security build dispatching ==
-
-Setup chroot for warty/i386.
-
-    >>> warty = getUtility(IDistributionSet)['ubuntu']['warty']
-    >>> warty_i386 = warty['i386']
-    >>> pc = warty_i386.addOrUpdateChroot(chroot=chroot)
-
-Create a new Build record for test source targeted to warty/i386
-architecture and SECURITY pocket:
-
-    >>> sec_build = sprf.sourcepackagerelease.createBuild(
-    ...     warty_i386, PackagePublishingPocket.SECURITY, hoary.main_archive)
-
-Create BuildQueue record and inspect some parameters:
-
-    >>> sec_job = sec_build.queueBuild()
-    >>> sec_job.id
-    4
-    >>> print sec_job.builder
-    None
-    >>> print sec_job.date_started
-    None
-    >>> sec_build.is_virtualized
-    False
-
-In normal conditions the next available candidate would be the job
-targeted to SECURITY pocket. However, the builders are forbidden to
-accept such jobs until we have finished the EMBARGOED archive
-implementation.
-
-    >>> fixture = BuilddSlaveTestSetup()
-    >>> fixture.setUp()
-    >>> removeSecurityProxy(bob_builder)._dispatchBuildCandidate(sec_job)
-    Traceback (most recent call last):
-    ...
-    AssertionError: Soyuz is not yet capable of building SECURITY uploads.
-    >>> fixture.tearDown()
-
-To solve this problem temporarily until we start building security
-uploads, we will mark builds targeted to the SECURITY pocket as
-FAILEDTOBUILD during the _findBuildCandidate look-up.
-
-We will also create another build candidate in breezy-autotest/i386 to
-check if legitimate pending candidates will remain valid.
-
-    >>> breezy = getUtility(IDistributionSet)['ubuntu']['breezy-autotest']
-    >>> breezy_i386 = breezy['i386']
-    >>> pc = breezy_i386.addOrUpdateChroot(chroot=chroot)
-
-    >>> pending_build = sprf.sourcepackagerelease.createBuild(
-    ...     breezy_i386, PackagePublishingPocket.UPDATES, hoary.main_archive)
-    >>> pending_job = pending_build.queueBuild()
-
-We set the score of the security job to ensure it is considered
-before the legitimate job.
-
-    >>> sec_job.lastscore = pending_job.lastscore + 1
-    >>> flush_database_updates()
-
-New we can check that the next valid candidate is the just-added
-'pending_job', ensuring that it's published before doing so.
-
-    >>> new_pub = old_pub.copyTo(
-    ...     pending_build.distro_series, old_pub.pocket, pending_build.archive)
-    >>> candidate = removeSecurityProxy(bob_builder)._findBuildCandidate()
-    >>> flush_database_updates()
-    >>> candidate.id == pending_job.id
-    True
-
-And as expected, the security job was marked as FAILEDTOBUILD and the
-corresponding BuildQueue record was removed.  This way the security
-builds, created due to missing binary uploads from DAK, will be
-appropriately recorded and ignored.
-
-    >>> print sec_build.status.name
-    FAILEDTOBUILD
-    >>> print sec_build.buildqueue_record
-    None

=== removed file 'lib/lp/soyuz/doc/buildd-slavescanner.txt'
--- lib/lp/soyuz/doc/buildd-slavescanner.txt	2010-10-18 22:24:59 +0000
+++ lib/lp/soyuz/doc/buildd-slavescanner.txt	1970-01-01 00:00:00 +0000
@@ -1,876 +0,0 @@
-= Buildd Slave Scanner =
-
-The Buildd Slave scanner is able to run over the build jobs being
-processed in the current BuildFarm and collect information about the
-status of the process, collect the results of finished jobs and
-automatically dispatch new jobs to idle slaves.
-
-The Master side of Buildd requires access to Launchpad Database, the
-user designed for this kind of access is 'fiera', as in all test the
-transaction should be retrieved.
-
-    >>> from canonical.database.sqlbase import ZopelessTransactionManager
-    >>> local_transaction = ZopelessTransactionManager._installed
-
-We check for sent mails in some places, so load the stub mailer:
-
-    >>> from canonical.database.sqlbase import commit
-    >>> from lp.services.mail import stub
-
-And create a utility function to make tests easier to read.
-
-    >>> def check_mail_sent(last_stub_mail_count):
-    ...    commit()
-    ...    return len(stub.test_emails) == last_stub_mail_count + 3
-
-The master also requires an 'logging' instance to not compromise the
-standard output with noisily output.
-
-    >>> import logging
-    >>> logger = logging.getLogger()
-
-Import MockBuilder and a series of MockSlaves to be used in this test.
-
-    >>> from lp.buildmaster.tests.mock_slaves import (
-    ...    AbortedSlave, AbortingSlave, BuildingSlave,
-    ...    LostBuildingBrokenSlave, MockBuilder, OkSlave, WaitingSlave)
-
-Slave-scanner will deactivate a 'lost-building' builder that could not
-be aborted appropriately.
-
-    >>> from zope.security.proxy import removeSecurityProxy
-    >>> from lp.buildmaster.interfaces.builder import CorruptBuildCookie
-    >>> from lp.testing.fakemethod import FakeMethod
-    >>> lostbuilding_builder = MockBuilder(
-    ...     'Lost Building Broken Slave', LostBuildingBrokenSlave())
-    >>> behavior = removeSecurityProxy(
-    ...     lostbuilding_builder.current_build_behavior)
-    >>> behavior.verifySlaveBuildCookie = FakeMethod(
-    ...     failure=CorruptBuildCookie("Hopelessly lost!"))
-
-    >>> lostbuilding_builder.updateStatus(logger)
-    Aborting slave
-    WARNING:root:Lost Building Broken Slave (http://fake:0000) marked as failed due to: <Fault 8002: 'Could not abort'>
-    Traceback (most recent call last):
-    ...
-    Fault: <Fault 8002: 'Could not abort'>
-
-'ensurePresent()' slave method always return True, it theoretically
-means the slave has the requested file in cache.  In our MockBuilder
-we simply display the URL of the file we're asked to get from the
-librarian.  Typically the first file is always the chroot, which in
-the case of this doctest is a dummy alias pointing at netapplet (!) so
-it is not shown in each case below.
-
-The mock slaves will also print, when necessary, whether it has been
-passed an 'archives' property in the args dictionary.
-
-The archives are passed from the buildmaster and controls what archives
-exist in the apt sources.list.  If nothing is passed, the chroot's default
-list applies, otherwise the passed list is used.  This behavior is required
-in build slaves because some jobs may only depend on certain archives and
-hence certain package dependencies.
-
-The slavescanner system also perform build-notification for the
-following states: FAILEDTOBUILD and CHROOTWAIT
-
-    >>> from lp.buildmaster.interfaces.builder import IBuilderSet
-    >>> from lp.soyuz.interfaces.binarypackagebuild import (
-    ...     IBinaryPackageBuildSet)
-    >>> import datetime, pytz
-
-    >>> UTC = pytz.timezone('UTC')
-
-We want to get a Build and make BuildQueue items for it:
-
-    >>> a_build = getUtility(IBinaryPackageBuildSet).getByBuildID(8)
-
-To make testing easier we provide a convenience function to put a BuildQueue
-object into a preset fixed state:
-
-    >>> default_start = datetime.datetime(2005, 1, 1, 8, 0, 0, tzinfo=UTC)
-    >>> def setupBuildQueue(build_queue, builder):
-    ...     build_queue.markAsBuilding(builder)
-
-Remove any previous buildmaster ROOT directory, to avoid any garbage
-lock conflict (it would be recreated automatically if necessary)
-
-    >>> from canonical.config import config
-    >>> import shutil
-    >>> import os
-    >>> if os.access(config.builddmaster.root, os.F_OK):
-    ...     shutil.rmtree(config.builddmaster.root)
-
-Let's check the procedures to verify/collect running build process:
-
-  WAITING - PACKAGEFAIL -> Package has failed to build, notice from
-  builder is stored, but Build.status is mark as 'Failed to Build':
-
-Get a builder from the sample data:
-
-    >>> a_builder = getUtility(IBuilderSet).get(1)
-
-Make sure that a_builder has no active builds:
-
-    >>> if a_builder.currentjob is not None:
-    ...     currentjob = a_builder.currentjob
-    ...     currentjob.setDateStarted(None)
-    ...     currentjob.builder = None
-
-Force the test builder to be 'ok' as the code required to do this
-automatically is not yet factored into the content class.
-
-    >>> a_builder.builderok = True
-
-Create a mock slave so the builder can operate - one with a failed package.
-
-    >>> a_builder.setSlaveForTesting(WaitingSlave('BuildStatus.PACKAGEFAIL'))
-
-    >>> bqItem3 = a_build.buildqueue_record
-    >>> setupBuildQueue(bqItem3, a_builder)
-    >>> last_stub_mail_count = len(stub.test_emails)
-
-Do the test execution:
-
-    >>> build = getUtility(IBinaryPackageBuildSet).getByQueueEntry(bqItem3)
-    >>> a_builder.updateBuild(bqItem3)
-    >>> build.builder is not None
-    True
-    >>> build.date_finished is not None
-    True
-    >>> build.duration is not None
-    True
-    >>> build.log is not None
-    True
-    >>> check_mail_sent(last_stub_mail_count)
-    True
-    >>> build.status.title
-    'Failed to build'
-
-WAITING - DEPWAIT -> a required dependency is missing, again notice
-from builder, but Build.status has the right state:
-
-    >>> bqItem4 = a_build.queueBuild()
-    >>> setupBuildQueue(bqItem4, a_builder)
-    >>> last_stub_mail_count = len(stub.test_emails)
-
-Create a mock slave so the builder can operate - one with a dependency error.
-
-    >>> bqItem4.builder.setSlaveForTesting(
-    ...                        WaitingSlave('BuildStatus.DEPFAIL',
-    ...                                     'baz (>= 1.0.1)'))
-
-Do the test execution:
-
-    >>> build = getUtility(IBinaryPackageBuildSet).getByQueueEntry(bqItem4)
-    >>> a_builder.updateBuild(bqItem4)
-    CRITICAL:slave-scanner:***** bob is MANUALDEPWAIT *****
-    >>> build.builder is not None
-    True
-    >>> build.date_finished is not None
-    True
-    >>> build.duration is not None
-    True
-    >>> build.log is not None
-    True
-    >>> check_mail_sent(last_stub_mail_count)
-    False
-    >>> build.dependencies
-    u'baz (>= 1.0.1)'
-    >>> build.status.title
-    'Dependency wait'
-
-WAITING - CHROOTFAIL -> the Chroot for this distroseries is damage, nor
-builder, but right state stored in Build entry:
-
-    >>> bqItem5 = a_build.queueBuild()
-    >>> setupBuildQueue(bqItem5, a_builder)
-    >>> last_stub_mail_count = len(stub.test_emails)
-
-  Create a mock slave so the builder can operate - one with a failed chroot.
-
-    >>> bqItem5.builder.setSlaveForTesting(
-    ...     WaitingSlave('BuildStatus.CHROOTFAIL'))
-    >>> build = getUtility(IBinaryPackageBuildSet).getByQueueEntry(bqItem5)
-    >>> a_builder.updateBuild(bqItem5)
-    CRITICAL:slave-scanner:***** bob is CHROOTWAIT *****
-    >>> build.builder is not None
-    True
-    >>> build.date_finished is not None
-    True
-    >>> build.duration is not None
-    True
-    >>> build.log is not None
-    True
-    >>> check_mail_sent(last_stub_mail_count)
-    True
-    >>> build.status.title
-    'Chroot problem'
-
-WAITING - BUILDERFAIL -> builder has failed by internal error, job is available for next build round:
-
-    >>> bqItem6 = a_build.queueBuild()
-    >>> bqItem6.markAsBuilding(a_builder)
-    >>> last_stub_mail_count = len(stub.test_emails)
-
-Create a mock slave so the builder can operate - one with a builder error.
-
-    >>> bqItem6.builder.setSlaveForTesting(
-    ...     WaitingSlave('BuildStatus.BUILDERFAIL'))
-
-    >>> a_builder.updateBuild(bqItem6)
-    WARNING:slave-scanner:***** bob has failed *****
-
-    >>> from canonical.launchpad.ftests import sync
-    >>> sync(a_builder)
-    >>> a_builder.failnotes
-    u'Builder returned BUILDERFAIL when asked for its status'
-
-    >>> bqItem6.builder is None
-    True
-    >>> check_mail_sent(last_stub_mail_count)
-    False
-    >>> build = getUtility(IBinaryPackageBuildSet).getByQueueEntry(bqItem6)
-    >>> print build.status.title
-    Needs building
-    >>> job = bqItem6.specific_job.job
-    >>> print job.status.title
-    Waiting
-
-Cleanup in preparation for the next test:
-
-    >>> bqItem6.destroySelf()
-    >>> a_builder.builderok = True
-
-
-BUILDING -> builder still processing the job, simply collect the logtail:
-
-    >>> bqItem7 = a_build.queueBuild()
-    >>> setupBuildQueue(bqItem7, a_builder)
-    >>> last_stub_mail_count = len(stub.test_emails)
-
-Create a mock slave so the builder can operate - one which is building.
-
-    >>> bqItem7.builder.setSlaveForTesting(BuildingSlave())
-    >>> builder_id = bqItem7.builder.id
-    >>> a_builder.updateBuild(bqItem7)
-
-Due to updateBuild doing a commit we cannot compare the object instance.
-
-    >>> bqItem7.builder.id is builder_id
-    True
-    >>> check_mail_sent(last_stub_mail_count)
-    False
-    >>> bqItem7.logtail
-    u'This is a build log'
-
-Cleanup in preparation for the next test:
-
-    >>> bqItem7.destroySelf()
-
-ABORTED -> builder was aborted, release builder and reset job for the
-next build round:
-
-    >>> bqItem8 = a_build.queueBuild()
-    >>> setupBuildQueue(bqItem8, a_builder)
-    >>> last_stub_mail_count = len(stub.test_emails)
-
-    >>> bqItem8.builder.setSlaveForTesting(BuildingSlave())
-    >>> a_builder.updateBuild(bqItem8)
-    >>> bqItem8.builder.setSlaveForTesting(AbortedSlave())
-    >>> bqItem8.builder.name
-    u'bob'
-    >>> a_builder.updateBuild(bqItem8)
-    >>> bqItem8.builder is None
-    True
-    >>> print bqItem8.specific_job.build.status.name
-    NEEDSBUILD
-
-Cleanup in preparation for the next test:
-
-    >>> bqItem8.destroySelf()
-
-ABORTING -> builder is trying to terminate its children process, the
-only action master can perform is polling the slave status until it
-gets ABORTED.
-
-    >>> bqItem9 = a_build.queueBuild()
-    >>> setupBuildQueue(bqItem9, a_builder)
-    >>> last_stub_mail_count = len(stub.test_emails)
-
-    >>> bqItem9.builder.setSlaveForTesting(AbortingSlave())
-    >>> bqItem9.builder.name
-    u'bob'
-    >>> a_builder.updateBuild(bqItem9)
-    >>> check_mail_sent(last_stub_mail_count)
-    False
-    >>> bqItem9.logtail
-    u'Waiting for slave process to be terminated'
-
-Cleanup in preparation for the next test:
-
-    >>> bqItem9.destroySelf()
-
-
-== Builder WAITING in OK state ==
-
-This situation happens when the builder has finished the job and is
-waiting for the master to collect its results.
-
-The build record in question will end up as UPLOADING.
-
-=== Uploading (UPLOADING) ===
-
-    >>> bqItem10 = a_build.queueBuild()
-    >>> setupBuildQueue(bqItem10, a_builder)
-
-Create a mock slave so the builder gets the right responses for this test.
-
-    >>> bqItem10.builder.setSlaveForTesting(
-    ...     WaitingSlave('BuildStatus.OK'))
-
-The build will progress to the UPLOADING state if the status from
-the builder was OK:
-
-    >>> build = getUtility(IBinaryPackageBuildSet).getByQueueEntry(bqItem10)
-    >>> a_builder.updateBuild(bqItem10)
-    >>> build.status.title
-    'Uploading build'
-
-=== Successfully collected and uploaded  (FULLYBUILT) ===
-
-Build item 6 has binary packages available in the sample data, letting us test
-this case cleanly. We need to set the pocket to updates for this test as its
-uploading to warty.
-
-    >>> bqItem10 = getUtility(IBinaryPackageBuildSet).getByBuildID(
-    ...     6).queueBuild()
-    >>> build = getUtility(IBinaryPackageBuildSet).getByQueueEntry(bqItem10)
-
-XXX: The pocket attribute is not intended to be changed in regular code, but
-for this test we want to change it on the fly. An alternative would be to add
-new sample data for a build that can be uploaded with binary packages attached
-to it.
-
-    >

Follow ups