← Back to team overview

launchpad-reviewers team mailing list archive

[Merge] lp:~jtv/launchpad/buildfail-recipebuilder into lp:~launchpad/launchpad/recife

 

Jeroen T. Vermeulen has proposed merging lp:~jtv/launchpad/buildfail-recipebuilder into lp:~launchpad/launchpad/recife.

Requested reviews:
  Launchpad code reviewers (launchpad-reviewers): code


This fixes spurious failures in one test, test_messagesCanStayDiverged.  It also cleans up the test a bit, but the main thing is that just a switchDbUser apparently isn't always good enough.  lp.testing.TestCase.becomeDbUser also throws in a commit, and that's what does the trick.
-- 
https://code.launchpad.net/~jtv/launchpad/buildfail-recipebuilder/+merge/41280
Your team Launchpad code reviewers is requested to review the proposed merge of lp:~jtv/launchpad/buildfail-recipebuilder into lp:~launchpad/launchpad/recife.
=== modified file 'Makefile'
--- Makefile	2010-11-02 01:34:05 +0000
+++ Makefile	2010-11-19 10:27:10 +0000
@@ -45,6 +45,8 @@
     bin/start_librarian bin/stxdocs bin/tags bin/test bin/tracereport \
     bin/twistd bin/update-download-cache bin/windmill
 
+BUILDOUT_TEMPLATES = buildout-templates/_pythonpath.py.in
+
 # DO NOT ALTER : this should just build by default
 default: inplace
 
@@ -55,10 +57,10 @@
 newsampledata:
 	$(MAKE) -C database/schema newsampledata
 
-hosted_branches: $(PY)
+hosted_branches: buildout_bin
 	$(PY) ./utilities/make-dummy-hosted-branches
 
-$(API_INDEX): $(BZR_VERSION_INFO)
+$(API_INDEX): $(BZR_VERSION_INFO) buildout_bin
 	mkdir -p $(APIDOC_DIR).tmp
 	LPCONFIG=$(LPCONFIG) $(PY) ./utilities/create-lp-wadl-and-apidoc.py --force "$(WADL_TEMPLATE)"
 	mv $(APIDOC_DIR).tmp $(APIDOC_DIR)
@@ -66,12 +68,12 @@
 apidoc: compile $(API_INDEX)
 
 # Run by PQM.
-check_merge: $(PY)
+check_merge: buildout_bin
 	[ `PYTHONPATH= bzr status -S database/schema/ | \
 		grep -v "\(^P\|pending\|security.cfg\|Makefile\|unautovacuumable\|_pythonpath.py\)" | wc -l` -eq 0 ]
 	${PY} lib/canonical/tests/test_no_conflict_marker.py
 
-check_db_merge: $(PY)
+check_db_merge: buildout_bin
 	${PY} lib/canonical/tests/test_no_conflict_marker.py
 
 check_config: build
@@ -109,16 +111,16 @@
 	${PY} -t ./test_on_merge.py $(VERBOSITY) $(TESTOPTS) \
 		--layer=MailmanLayer
 
-lint: ${PY}
+lint: buildout_bin
 	@bash ./bin/lint.sh
 
-lint-verbose: ${PY}
+lint-verbose: buildout_bin
 	@bash ./bin/lint.sh -v
 
-xxxreport: $(PY)
+xxxreport: buildout_bin
 	${PY} -t ./utilities/xxxreport.py -f csv -o xxx-report.csv ./
 
-check-configs: $(PY)
+check-configs: buildout_bin
 	${PY} utilities/check-configs.py
 
 pagetests: build
@@ -140,12 +142,14 @@
 	${SHHH} bin/sprite-util create-image
 
 jsbuild_lazr: bin/jsbuild
-	# We absolutely do not want to include the lazr.testing module and its
-	# jsTestDriver test harness modifications in the lazr.js and launchpad.js
-	# roll-up files.  They fiddle with built-in functions!  See Bug 482340.
-	${SHHH} bin/jsbuild $(JSFLAGS) -b $(LAZR_BUILT_JS_ROOT) -x testing/ -c $(LAZR_BUILT_JS_ROOT)/yui
+	# We absolutely do not want to include the lazr.testing module and
+	# its jsTestDriver test harness modifications in the lazr.js and
+	# launchpad.js roll-up files.  They fiddle with built-in functions!
+	# See Bug 482340.
+	${SHHH} bin/jsbuild $(JSFLAGS) -b $(LAZR_BUILT_JS_ROOT) -x testing/ \
+	-c $(LAZR_BUILT_JS_ROOT)/yui
 
-jsbuild: jsbuild_lazr bin/jsbuild bin/jssize
+jsbuild: jsbuild_lazr bin/jsbuild bin/jssize buildout_bin
 	${SHHH} bin/jsbuild \
 		$(JSFLAGS) \
 		-n launchpad \
@@ -173,12 +177,12 @@
 	@exit 1
 endif
 
-buildonce_eggs: $(PY)
+buildonce_eggs: buildout_bin
 	find eggs -name '*.pyc' -exec rm {} \;
 
 # The download-cache dependency comes *before* eggs so that developers get the
-# warning before the eggs directory is made.  The target for the eggs directory
-# is only there for deployment convenience.
+# warning before the eggs directory is made.  The target for the eggs
+# directory is only there for deployment convenience.
 # Note that the buildout version must be maintained here and in versions.cfg
 # to make sure that the build does not go over the network.
 bin/buildout: download-cache eggs
@@ -192,19 +196,22 @@
 # and the other bits might run into problems like bug 575037.  This
 # target runs buildout, and then removes everything created except for
 # the eggs.
-build_eggs: $(BUILDOUT_BIN) clean_buildout
+build_eggs: buildout_bin clean_buildout
+
+$(BUILDOUT_BIN): buildout_bin
 
 # This builds bin/py and all the other bin files except bin/buildout.
 # Remove the target before calling buildout to ensure that buildout
 # updates the timestamp.
-$(BUILDOUT_BIN): bin/buildout versions.cfg $(BUILDOUT_CFG) setup.py
+buildout_bin: bin/buildout versions.cfg $(BUILDOUT_CFG) setup.py \
+		$(BUILDOUT_TEMPLATES)
 	$(RM) $@
 	$(SHHH) PYTHONPATH= ./bin/buildout \
                 configuration:instance_name=${LPCONFIG} -c $(BUILDOUT_CFG)
 
 # bin/compile_templates is responsible for building all chameleon templates,
 # of which there is currently one, but of which many more are coming.
-compile: $(PY) $(BZR_VERSION_INFO)
+compile: buildout_bin $(BZR_VERSION_INFO)
 	mkdir -p /var/tmp/vostok-archive
 	${SHHH} $(MAKE) -C sourcecode build PYTHON=${PYTHON} \
 	    LPCONFIG=${LPCONFIG}
@@ -405,7 +412,8 @@
 	# We insert the absolute path to the branch-rewrite script
 	# into the Apache config as we copy the file into position.
 	sed -e 's,%BRANCH_REWRITE%,$(shell pwd)/scripts/branch-rewrite.py,' configs/development/local-launchpad-apache > /etc/apache2/sites-available/local-launchpad
-	cp configs/development/local-vostok-apache /etc/apache2/sites-available/local-vostok
+	cp configs/development/local-vostok-apache \
+		/etc/apache2/sites-available/local-vostok
 	touch /var/tmp/bazaar.launchpad.dev/rewrite.log
 	chown $(SUDO_UID):$(SUDO_GID) /var/tmp/bazaar.launchpad.dev/rewrite.log
 
@@ -430,8 +438,9 @@
 
 lp.sfood:
 	# Generate import dependency graph
-	sfood -i -u -I lib/sqlobject -I lib/schoolbell -I lib/devscripts -I lib/contrib \
-	-I lib/canonical/not-used lib/canonical lib/lp 2>/dev/null | grep -v contrib/ \
+	sfood -i -u -I lib/sqlobject -I lib/schoolbell -I lib/devscripts \
+	-I lib/contrib -I lib/canonical/not-used lib/canonical \
+	lib/lp 2>/dev/null | grep -v contrib/ \
 	| grep -v sqlobject | grep -v BeautifulSoup | grep -v psycopg \
 	| grep -v schoolbell > lp.sfood.tmp
 	mv lp.sfood.tmp lp.sfood
@@ -463,10 +472,10 @@
 		--docformat restructuredtext --verbose-about epytext-summary \
 		$(PYDOCTOR_OPTIONS)
 
-.PHONY: apidoc check tags TAGS zcmldocs realclean clean debug stop\
-	start run ftest_build ftest_inplace test_build test_inplace pagetests\
-	check check_merge \
-	schema default launchpad.pot check_merge_ui pull scan sync_branches\
-	reload-apache hosted_branches check_db_merge check_mailman check_config\
-	jsbuild jsbuild_lazr clean_js clean_buildout buildonce_eggs build_eggs\
-	sprite_css sprite_image css_combine compile check_schema pydoctor
+.PHONY: apidoc buildout_bin check tags TAGS zcmldocs realclean clean debug \
+	stop start run ftest_build ftest_inplace test_build test_inplace \
+	pagetests check check_merge schema default launchpad.pot \
+	check_merge_ui pull scan sync_branches reload-apache hosted_branches \
+	check_db_merge check_mailman check_config jsbuild jsbuild_lazr \
+	clean_js clean_buildout buildonce_eggs build_eggs sprite_css \
+	sprite_image css_combine compile check_schema pydoctor

=== modified file 'cronscripts/publishing/cron.publish-copy-archives'
--- cronscripts/publishing/cron.publish-copy-archives	2010-06-25 14:36:11 +0000
+++ cronscripts/publishing/cron.publish-copy-archives	2010-11-19 10:27:10 +0000
@@ -10,7 +10,6 @@
     exit 1
 fi
 
-set -x
 set -e
 set -u
 
@@ -20,24 +19,23 @@
 
 
 # Informational -- this *MUST* match the database.
-ARCHIVEROOT=/srv/launchpad.net/ubuntu-archive/ubuntu
+ARCHIVEROOT=/srv/launchpad.net/rebuild-test/ubuntu
 DISTSROOT=$ARCHIVEROOT/dists
 OVERRIDEROOT=$ARCHIVEROOT/../ubuntu-overrides
 INDICES=$ARCHIVEROOT/indices
 PRODUCTION_CONFIG=ftpmaster-publish
 
 if [ "$LPCONFIG" = "$PRODUCTION_CONFIG" ]; then
-    GNUPGHOME=/srv/launchpad.net/ubuntu-archive/gnupg-home
+    GNUPGHOME=/srv/launchpad.net/rebuild-test/gnupg-home
 else
     echo GPG keys will come from ~/.gnupg
     # GNUPGHOME does not need to be set, keys can come from ~/.gnupg.
 fi
 
 # Configuration options.
-LAUNCHPADROOT=/srv/launchpad.net/codelines/current
-LOCKFILE=/srv/launchpad.net/ubuntu-archive/cron.daily.lock
+LAUNCHPADROOT=/srv/launchpad.net/production/launchpad
+LOCKFILE=/srv/launchpad.net/rebuild-test/cron.daily.lock
 DISTRONAME=ubuntu
-TRACEFILE=$ARCHIVEROOT/project/trace/$(hostname --fqdn)
 
 # Manipulate the environment.
 export GNUPGHOME
@@ -64,20 +62,5 @@
 # Publish the packages to disk.
 publish-distro.py -v -v --copy-archive -d $DISTRONAME
 
-set +x
-
 echo Removing uncompressed Packages and Sources files
 find ${DISTSROOT} \( -name "Packages" -o -name "Sources" \) -exec rm "{}" \;
-
-# Copy in the indices.
-if [ "$LPCONFIG" = "$PRODUCTION_CONFIG" ]; then
-    echo Copying the indices into place.
-    rm -f $INDICES/override.*
-    cp $OVERRIDEROOT/override.* $INDICES
-fi
-
-# Timestamp our trace file to track when the last archive publisher run took
-# place.
-if [ "$LPCONFIG" = "$PRODUCTION_CONFIG" ]; then
-    date -u > "$TRACEFILE"
-fi

=== modified file 'lib/canonical/buildd/binarypackage.py'
--- lib/canonical/buildd/binarypackage.py	2010-07-13 09:13:41 +0000
+++ lib/canonical/buildd/binarypackage.py	2010-11-19 10:27:10 +0000
@@ -19,9 +19,7 @@
 class BuildLogRegexes:
     """Build log regexes for performing actions based on regexes, and extracting dependencies for auto dep-waits"""
     GIVENBACK = [
-        (" terminated by signal 4"),
         ("^E: There are problems and -y was used without --force-yes"),
-        ("^make.* Illegal instruction"),
         ]
     DEPFAIL = [
         ("(?P<pk>[\-+.\w]+)\(inst [^ ]+ ! >> wanted (?P<v>[\-.+\w:~]+)\)","\g<pk> (>> \g<v>)"),

=== modified file 'lib/canonical/buildd/buildrecipe'
--- lib/canonical/buildd/buildrecipe	2010-09-30 20:22:15 +0000
+++ lib/canonical/buildd/buildrecipe	2010-11-19 10:27:10 +0000
@@ -11,6 +11,7 @@
 import os
 import pwd
 import re
+from resource import RLIMIT_AS, setrlimit
 import socket
 from subprocess import call, Popen, PIPE
 import sys
@@ -206,6 +207,7 @@
 
 
 if __name__ == '__main__':
+    setrlimit(RLIMIT_AS, (1000000000, -1))
     builder = RecipeBuilder(*sys.argv[1:])
     if builder.buildTree() != 0:
         sys.exit(RETCODE_FAILURE_BUILD_TREE)

=== modified file 'lib/canonical/launchpad/doc/db-policy.txt'
--- lib/canonical/launchpad/doc/db-policy.txt	2010-02-22 12:16:02 +0000
+++ lib/canonical/launchpad/doc/db-policy.txt	2010-11-19 10:27:10 +0000
@@ -124,3 +124,29 @@
     >>> IMasterObject(ro_janitor) is writable_janitor
     True
 
+Read-Only Mode
+--------------
+
+During database outages, we run in read-only mode. In this mode, no
+matter what database policy is currently installed, explicit requests
+for a master store fail and the default store is always the slave.
+
+    >>> from canonical.launchpad.tests.readonly import read_only_mode
+    >>> from canonical.launchpad.webapp.dbpolicy import MasterDatabasePolicy
+    >>> from contextlib import nested
+
+    >>> with nested(read_only_mode(), MasterDatabasePolicy()):
+    ...     default_store = IStore(Person)
+    ...     IMasterStore.providedBy(default_store)
+    False
+
+    >>> with nested(read_only_mode(), MasterDatabasePolicy()):
+    ...     slave_store = ISlaveStore(Person)
+    ...     IMasterStore.providedBy(slave_store)
+    False
+
+    >>> with nested(read_only_mode(), MasterDatabasePolicy()):
+    ...     master_store = IMasterStore(Person)
+    Traceback (most recent call last):
+    ...
+    ReadOnlyModeDisallowedStore: ('main', 'master')

=== renamed file 'lib/canonical/launchpad/doc/emailaddress.txt.disabled' => 'lib/canonical/launchpad/doc/emailaddress.txt'
--- lib/canonical/launchpad/doc/emailaddress.txt.disabled	2009-08-13 19:03:36 +0000
+++ lib/canonical/launchpad/doc/emailaddress.txt	2010-11-19 10:27:10 +0000
@@ -1,4 +1,5 @@
-= Email Addresses =
+Email Addresses
+===============
 
 In Launchpad we use email addresses to uniquely identify a person. This is why
 email addresses must be unique.
@@ -22,7 +23,7 @@
 
 Email addresses provide both IEmailAddress and IHasOwner.
 
-    >>> from canonical.launchpad.interfaces.launchpad import IHasOwner
+    >>> from lp.registry.interfaces.role import IHasOwner
     >>> verifyObject(IEmailAddress, email)
     True
     >>> verifyObject(IHasOwner, email)
@@ -66,11 +67,12 @@
     [u'celso.providelo@xxxxxxxxxxxxx', u'colin.watson@xxxxxxxxxxxxxxx',
      u'daniel.silverstone@xxxxxxxxxxxxx', u'edgar@xxxxxxxxxxxxxxxx',
      u'foo.bar@xxxxxxxxxxxxx', u'jeff.waugh@xxxxxxxxxxxxxxx',
-     u'limi@xxxxxxxxx', u'mark@xxxxxxxxxxx', u'steve.alexander@xxxxxxxxxxxxxxx',
-     u'support@xxxxxxxxxx']
-
-
-== Deleting email addresses ==
+     u'limi@xxxxxxxxx', u'mark@xxxxxxxxxxx',
+     u'steve.alexander@xxxxxxxxxxxxxxx', u'support@xxxxxxxxxx']
+
+
+Deleting email addresses
+------------------------
 
 Email addresses may be deleted if they're not a person's preferred one
 or the address of a team's mailing list.

=== modified file 'lib/canonical/launchpad/interfaces/__init__.py'
--- lib/canonical/launchpad/interfaces/__init__.py	2010-11-12 20:58:49 +0000
+++ lib/canonical/launchpad/interfaces/__init__.py	2010-11-19 10:27:10 +0000
@@ -11,9 +11,3 @@
 locations under the 'lp' package.  See the `lp` docstring for more details.
 """
 
-# XXX henninge 2010-11-12: This is needed by the file
-# +inbound-email-config.zcml which resides outside of the LP tree and can
-# only be safely updated at roll-out time. The import can be removed again
-# after the 10.11 roll-out.
-from canonical.launchpad.interfaces.mail import IMailHandler
-

=== modified file 'lib/canonical/launchpad/tests/readonly.py'
--- lib/canonical/launchpad/tests/readonly.py	2010-08-20 20:31:18 +0000
+++ lib/canonical/launchpad/tests/readonly.py	2010-11-19 10:27:10 +0000
@@ -7,15 +7,20 @@
 __metaclass__ = type
 __all__ = [
     'touch_read_only_file',
+    'read_only_mode',
     'remove_read_only_file',
     ]
 
+from contextlib import contextmanager
 import os
 
+from lazr.restful.utils import get_current_browser_request
+
 from canonical.launchpad.readonly import (
     is_read_only,
     read_only_file_exists,
     read_only_file_path,
+    READ_ONLY_MODE_ANNOTATIONS_KEY,
     )
 
 
@@ -37,7 +42,7 @@
 def remove_read_only_file(assert_mode_switch=True):
     """Remove the file named read-only.txt from the root of the tree.
 
-    May also assert that the mode switch actually happened (i.e. not 
+    May also assert that the mode switch actually happened (i.e. not
     is_read_only()). This assertion has to be conditional because some tests
     will use this during the processing of a request, when a mode change can't
     happen (i.e. is_read_only() will still return True during that request's
@@ -48,3 +53,15 @@
         # Assert that the switch succeeded and make sure the mode change is
         # logged.
         assert not is_read_only(), "Switching to read-write failed."
+
+
+@contextmanager
+def read_only_mode(flag=True):
+    request = get_current_browser_request()
+    current = request.annotations[READ_ONLY_MODE_ANNOTATIONS_KEY]
+    request.annotations[READ_ONLY_MODE_ANNOTATIONS_KEY] = flag
+    try:
+        assert is_read_only() == flag, 'Failed to set read-only mode'
+        yield
+    finally:
+        request.annotations[READ_ONLY_MODE_ANNOTATIONS_KEY] = current

=== modified file 'lib/canonical/launchpad/webapp/adapter.py'
--- lib/canonical/launchpad/webapp/adapter.py	2010-11-08 12:52:43 +0000
+++ lib/canonical/launchpad/webapp/adapter.py	2010-11-19 10:27:10 +0000
@@ -60,6 +60,7 @@
     IStoreSelector,
     MAIN_STORE,
     MASTER_FLAVOR,
+    ReadOnlyModeDisallowedStore,
     ReadOnlyModeViolation,
     SLAVE_FLAVOR,
     )
@@ -129,6 +130,7 @@
 
 
 class CommitLogger:
+
     def __init__(self, txn):
         self.txn = txn
 
@@ -261,15 +263,16 @@
 def set_permit_timeout_from_features(enabled):
     """Control request timeouts being obtained from the 'hard_timeout' flag.
 
-    Until we've fully setup a page to render - routed the request to the right
-    object, setup a participation etc, feature flags cannot be completely used;
-    and because doing feature flag lookups will trigger DB access, attempting
-    to do a DB lookup will cause a nested DB lookup (the one being done, and
-    the flags lookup). To resolve all of this, timeouts start as a config file
-    only setting, and are then overridden once the request is ready to execute.
+    Until we've fully setup a page to render - routed the request to the
+    right object, setup a participation etc, feature flags cannot be
+    completely used; and because doing feature flag lookups will trigger
+    DB access, attempting to do a DB lookup will cause a nested DB
+    lookup (the one being done, and the flags lookup). To resolve all of
+    this, timeouts start as a config file only setting, and are then
+    overridden once the request is ready to execute.
 
-    :param enabled: If True permit looking up request timeouts in feature
-        flags.
+    :param enabled: If True permit looking up request timeouts in
+        feature flags.
     """
     _local._permit_feature_timeout = enabled
 
@@ -350,6 +353,7 @@
 
 _main_thread_id = None
 
+
 def break_main_thread_db_access(*ignored):
     """Ensure that Storm connections are not made in the main thread.
 
@@ -390,6 +394,7 @@
 
 class ReadOnlyModeConnection(PostgresConnection):
     """storm.database.Connection for read-only mode Launchpad."""
+
     def execute(self, statement, params=None, noresult=False):
         """See storm.database.Connection."""
         try:
@@ -550,13 +555,14 @@
             # XXX: This code does not belong here - see bug=636804.
             # Robert Collins 20100913.
             OpStats.stats['timeouts'] += 1
-            # XXX bug=636801 Robert Colins 20100914 This is duplicated from the
-            # statement tracer, because the tracers are not arranged in a stack
-            # rather a queue: the done-code in the statement tracer never runs.
+            # XXX bug=636801 Robert Colins 20100914 This is duplicated
+            # from the statement tracer, because the tracers are not
+            # arranged in a stack rather a queue: the done-code in the
+            # statement tracer never runs.
             action = getattr(connection, '_lp_statement_action', None)
             if action is not None:
-                # action may be None if the tracer was installed after the
-                # statement was submitted.
+                # action may be None if the tracer was installed after
+                # the statement was submitted.
                 action.finish()
             info = sys.exc_info()
             transaction.doom()
@@ -666,6 +672,20 @@
     @staticmethod
     def get(name, flavor):
         """See `IStoreSelector`."""
+        if is_read_only():
+            # If we are in read-only mode, override the default to the
+            # slave no matter what the existing policy says (it might
+            # work), and raise an exception if the master was explicitly
+            # requested. Most of the time, this doesn't matter as when
+            # we are in read-only mode we have a suitable database
+            # policy installed. However, code can override the policy so
+            # we still need to catch disallowed requests here.
+            if flavor == DEFAULT_FLAVOR:
+                flavor = SLAVE_FLAVOR
+            elif flavor == MASTER_FLAVOR:
+                raise ReadOnlyModeDisallowedStore(name, flavor)
+            else:
+                pass
         db_policy = StoreSelector.get_current()
         if db_policy is None:
             db_policy = MasterDatabasePolicy(None)

=== modified file 'lib/canonical/launchpad/webapp/dbpolicy.py'
--- lib/canonical/launchpad/webapp/dbpolicy.py	2010-11-08 12:52:43 +0000
+++ lib/canonical/launchpad/webapp/dbpolicy.py	2010-11-19 10:27:10 +0000
@@ -149,6 +149,7 @@
 
 class DatabaseBlockedPolicy(BaseDatabasePolicy):
     """`IDatabasePolicy` that blocks all access to the database."""
+
     def getStore(self, name, flavor):
         """Raises `DisallowedStore`. No Database access is allowed."""
         raise DisallowedStore(name, flavor)
@@ -180,6 +181,7 @@
     This policy is used for Feeds requests and other always-read only request.
     """
     default_flavor = SLAVE_FLAVOR
+
     def getStore(self, name, flavor):
         """See `IDatabasePolicy`."""
         if flavor == MASTER_FLAVOR:
@@ -210,6 +212,7 @@
 
     Selects the DEFAULT_FLAVOR based on the request.
     """
+
     def __init__(self, request):
         # The super constructor is a no-op.
         # pylint: disable-msg=W0231
@@ -364,6 +367,7 @@
 
     Access to all master Stores is blocked.
     """
+
     def getStore(self, name, flavor):
         """See `IDatabasePolicy`.
 
@@ -383,6 +387,7 @@
 
 class WhichDbView(LaunchpadView):
     "A page that reports which database is being used by default."
+
     def render(self):
         store = getUtility(IStoreSelector).get(MAIN_STORE, DEFAULT_FLAVOR)
         dbname = store.execute("SELECT current_database()").get_one()[0]

=== modified file 'lib/lp/app/javascript/tests/test_lp_collapsibles.html'
--- lib/lp/app/javascript/tests/test_lp_collapsibles.html	2010-07-26 13:42:32 +0000
+++ lib/lp/app/javascript/tests/test_lp_collapsibles.html	2010-11-19 10:27:10 +0000
@@ -4,14 +4,14 @@
   <title>Launchpad Collapsibles</title>
 
   <!-- YUI 3.0 Setup -->
-  <script type="text/javascript" src="../../../icing/yui/yui/yui.js"></script>
-  <link rel="stylesheet" href="../../../icing/yui/cssreset/reset.css"/>
-  <link rel="stylesheet" href="../../../icing/yui/cssfonts/fonts.css"/>
-  <link rel="stylesheet" href="../../../icing/yui/cssbase/base.css"/>
-  <link rel="stylesheet" href="../../test.css" />
+  <script type="text/javascript" src="../../../../canonical/launchpad/icing/yui/yui/yui.js"></script>
+  <script type="text/javascript" src="../../../../canonical/launchpad/icing/lazr/build/lazr.js"></script>
+  <link rel="stylesheet" href="../../../../canonical/launchpad/icing/yui/cssreset/reset.css"/>
+  <link rel="stylesheet" href="../../../../canonical/launchpad/icing/yui/cssfonts/fonts.css"/>
+  <link rel="stylesheet" href="../../../../canonical/launchpad/icing/yui/cssbase/base.css"/>
+  <link rel="stylesheet" href="../../../../canonical/launchpad/javascript/test.css" />
 
   <!-- The module under test -->
-  <script type="text/javascript" src="../../../icing/lazr/build/effects/effects.js"></script>
   <script type="text/javascript" src="../lp.js"></script>
 
   <!-- The test suite -->

=== modified file 'lib/lp/app/javascript/tests/test_lp_collapsibles.js'
--- lib/lp/app/javascript/tests/test_lp_collapsibles.js	2010-07-26 13:42:32 +0000
+++ lib/lp/app/javascript/tests/test_lp_collapsibles.js	2010-11-19 10:27:10 +0000
@@ -1,21 +1,21 @@
 /* Copyright (c) 2009, Canonical Ltd. All rights reserved. */
 
 YUI({
-    base: '../../../icing/yui/',
+    base: '../../../../canonical/launchpad/icing/yui/',
     filter: 'raw',
     combine: false
     }).use('test', 'console', 'lp', function(Y) {
 
 var Assert = Y.Assert;  // For easy access to isTrue(), etc.
 
-Y.Test.Runner.add(new Y.Test.Case({
+var suite = new Y.Test.Suite("Collapsibles Tests");
+suite.add(new Y.Test.Case({
     name: "activate_collapsibles",
 
     _should: {
-        error: {
+        fail: {
             test_toggle_collapsible_fails_on_wrapperless_collapsible: true,
             test_toggle_collapsible_fails_on_iconless_collapsible: true,
-            test_activate_collapsibles_handles_no_collapsibles: false
         }
     },
 
@@ -149,17 +149,16 @@
     test_toggle_collapsible_opens_collapsed_collapsible: function() {
         // Calling toggle_collapsible() on a collapsed collapsible will
         // toggle its state to open.
+        Y.lp.activate_collapsibles();
         var collapsible = this.container.one('.collapsible');
-        collapsible.addClass('collapsed');
+        var wrapper_div = collapsible.one('.collapseWrapper');
+        wrapper_div.addClass('lazr-closed');
 
-        Y.lp.activate_collapsibles();
         Y.lp.toggle_collapsible(collapsible);
         this.wait(function() {
-
             // The collapsible's wrapper div will now be open.
             var icon = collapsible.one('img');
-            var wrapper_div = collapsible.one('.collapseWrapper');
-            Assert.isTrue(wrapper_div.hasClass('lazr-open'));
+            Assert.isFalse(wrapper_div.hasClass('lazr-closed'));
             Assert.areNotEqual(
                 -1, icon.get('src').indexOf('/@@/treeExpanded'));
         }, 500);
@@ -321,6 +320,15 @@
     }
 }));
 
+// Lock, stock, and two smoking barrels.
+var handle_complete = function(data) {
+    status_node = Y.Node.create(
+        '<p id="complete">Test status: complete</p>');
+    Y.get('body').appendChild(status_node);
+    };
+Y.Test.Runner.on('complete', handle_complete);
+Y.Test.Runner.add(suite);
+
 var yui_console = new Y.Console({
     newestOnTop: false
 });

=== added directory 'lib/lp/app/windmill'
=== added file 'lib/lp/app/windmill/__init__.py'
=== added file 'lib/lp/app/windmill/testing.py'
--- lib/lp/app/windmill/testing.py	1970-01-01 00:00:00 +0000
+++ lib/lp/app/windmill/testing.py	2010-11-19 10:27:10 +0000
@@ -0,0 +1,21 @@
+# Copyright 2009-2010 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+"""Launchpad app specific testing infrastructure for Windmill."""
+
+__metaclass__ = type
+__all__ = [
+    'AppWindmillLayer',
+    ]
+
+
+from canonical.testing.layers import BaseWindmillLayer
+
+
+class AppWindmillLayer(BaseWindmillLayer):
+    """Layer for App Windmill tests."""
+
+    @classmethod
+    def setUp(cls):
+        cls.base_url = cls.appserver_root_url()
+        super(AppWindmillLayer, cls).setUp()

=== added directory 'lib/lp/app/windmill/tests'
=== added file 'lib/lp/app/windmill/tests/__init__.py'
=== added file 'lib/lp/app/windmill/tests/test_yuitests.py'
--- lib/lp/app/windmill/tests/test_yuitests.py	1970-01-01 00:00:00 +0000
+++ lib/lp/app/windmill/tests/test_yuitests.py	2010-11-19 10:27:10 +0000
@@ -0,0 +1,24 @@
+# Copyright 2010 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+"""Run YUI.test tests."""
+
+__metaclass__ = type
+__all__ = []
+
+from lp.app.windmill.testing import AppWindmillLayer
+from lp.testing import (
+    build_yui_unittest_suite,
+    YUIUnitTestCase,
+    )
+
+
+class AppYUIUnitTestCase(YUIUnitTestCase):
+
+    layer = AppWindmillLayer
+    suite_name = 'AppYUIUnitTests'
+
+
+def test_suite():
+    app_testing_path = 'lp/app/javascript/tests'
+    return build_yui_unittest_suite(app_testing_path, AppYUIUnitTestCase)

=== modified file 'lib/lp/archivepublisher/domination.py'
--- lib/lp/archivepublisher/domination.py	2010-10-03 15:30:06 +0000
+++ lib/lp/archivepublisher/domination.py	2010-11-19 10:27:10 +0000
@@ -58,19 +58,24 @@
 import operator
 
 import apt_pkg
+from storm.expr import And, Count, Select
 
 from canonical.database.constants import UTC_NOW
 from canonical.database.sqlbase import (
     clear_current_connection_cache,
-    cursor,
     flush_database_updates,
     sqlvalues,
     )
+from canonical.launchpad.interfaces.lpstorm import IMasterStore
 from lp.archivepublisher import ELIGIBLE_DOMINATION_STATES
+from lp.registry.model.sourcepackagename import SourcePackageName
 from lp.soyuz.enums import (
     BinaryPackageFormat,
     PackagePublishingStatus,
     )
+from lp.soyuz.model.binarypackagename import BinaryPackageName
+from lp.soyuz.model.binarypackagerelease import BinaryPackageRelease
+from lp.soyuz.model.sourcepackagerelease import SourcePackageRelease
 
 
 def clear_cache():
@@ -287,60 +292,67 @@
             self.debug("Performing domination across %s/%s (%s)" % (
                 dr.name, pocket.title, distroarchseries.architecturetag))
 
-            # Here we go behind SQLObject's back to generate an assistance
-            # table which will seriously improve the performance of this
-            # part of the publisher.
-            # XXX: dsilvers 2006-02-04: It would be nice to not have to do
-            # this. Most of this methodology is stolen from person.py
-            # XXX: malcc 2006-08-03: This should go away when we shift to
-            # doing this one package at a time.
-            flush_database_updates()
-            cur = cursor()
-            cur.execute("""SELECT bpn.id AS name, count(bpn.id) AS count INTO
-                temporary table PubDomHelper FROM BinaryPackageRelease bpr,
-                BinaryPackageName bpn, BinaryPackagePublishingHistory
-                sbpph WHERE bpr.binarypackagename = bpn.id AND
-                sbpph.binarypackagerelease = bpr.id AND
-                sbpph.distroarchseries = %s AND sbpph.archive = %s AND
-                sbpph.status = %s AND sbpph.pocket = %s
-                GROUP BY bpn.id""" % sqlvalues(
-                distroarchseries, self.archive,
-                PackagePublishingStatus.PUBLISHED, pocket))
-
-            binaries = BinaryPackagePublishingHistory.select(
-                """
-                binarypackagepublishinghistory.distroarchseries = %s
-                AND binarypackagepublishinghistory.archive = %s
-                AND binarypackagepublishinghistory.pocket = %s
-                AND binarypackagepublishinghistory.status = %s AND
-                binarypackagepublishinghistory.binarypackagerelease =
-                    binarypackagerelease.id
-                AND binarypackagerelease.binpackageformat != %s
-                AND binarypackagerelease.binarypackagename IN (
-                    SELECT name FROM PubDomHelper WHERE count > 1)"""
-                % sqlvalues(distroarchseries, self.archive,
-                            pocket, PackagePublishingStatus.PUBLISHED,
-                            BinaryPackageFormat.DDEB),
-                clauseTables=['BinaryPackageRelease'])
-
+            bpph_location_clauses = And(
+                BinaryPackagePublishingHistory.status ==
+                    PackagePublishingStatus.PUBLISHED,
+                BinaryPackagePublishingHistory.distroarchseries ==
+                    distroarchseries,
+                BinaryPackagePublishingHistory.archive == self.archive,
+                BinaryPackagePublishingHistory.pocket == pocket,
+                )
+            candidate_binary_names = Select(
+                BinaryPackageName.id,
+                And(
+                    BinaryPackageRelease.binarypackagenameID ==
+                        BinaryPackageName.id,
+                    BinaryPackagePublishingHistory.binarypackagereleaseID ==
+                        BinaryPackageRelease.id,
+                    bpph_location_clauses,
+                ),
+                group_by=BinaryPackageName.id,
+                having=Count(BinaryPackagePublishingHistory.id) > 1)
+            binaries = IMasterStore(BinaryPackagePublishingHistory).find(
+                BinaryPackagePublishingHistory,
+                BinaryPackageRelease.id ==
+                    BinaryPackagePublishingHistory.binarypackagereleaseID,
+                BinaryPackageRelease.binarypackagenameID.is_in(
+                    candidate_binary_names),
+                BinaryPackageRelease.binpackageformat !=
+                    BinaryPackageFormat.DDEB,
+                bpph_location_clauses)
             self.debug("Dominating binaries...")
             self._dominatePublications(self._sortPackages(binaries, False))
             if do_clear_cache:
                 self.debug("Flushing SQLObject cache.")
                 clear_cache()
 
-            flush_database_updates()
-            cur.execute("DROP TABLE PubDomHelper")
-
-        if do_clear_cache:
-            self.debug("Flushing SQLObject cache.")
-            clear_cache()
-
         self.debug("Performing domination across %s/%s (Source)" %
                    (dr.name, pocket.title))
-        sources = SourcePackagePublishingHistory.selectBy(
-            distroseries=dr, archive=self.archive, pocket=pocket,
-            status=PackagePublishingStatus.PUBLISHED)
+        spph_location_clauses = And(
+            SourcePackagePublishingHistory.status ==
+                PackagePublishingStatus.PUBLISHED,
+            SourcePackagePublishingHistory.distroseries == dr,
+            SourcePackagePublishingHistory.archive == self.archive,
+            SourcePackagePublishingHistory.pocket == pocket,
+            )
+        candidate_source_names = Select(
+            SourcePackageName.id,
+            And(
+                SourcePackageRelease.sourcepackagenameID ==
+                    SourcePackageName.id,
+                SourcePackagePublishingHistory.sourcepackagereleaseID ==
+                    SourcePackageRelease.id,
+                spph_location_clauses,
+            ),
+            group_by=SourcePackageName.id,
+            having=Count(SourcePackagePublishingHistory.id) > 1)
+        sources = IMasterStore(SourcePackagePublishingHistory).find(
+            SourcePackagePublishingHistory,
+            SourcePackageRelease.id ==
+                SourcePackagePublishingHistory.sourcepackagereleaseID,
+            SourcePackageRelease.sourcepackagenameID.is_in(
+                candidate_source_names),
+            spph_location_clauses)
         self.debug("Dominating sources...")
         self._dominatePublications(self._sortPackages(sources))
         flush_database_updates()

=== modified file 'lib/lp/bugs/model/bugtask.py'
--- lib/lp/bugs/model/bugtask.py	2010-11-15 16:25:05 +0000
+++ lib/lp/bugs/model/bugtask.py	2010-11-19 10:27:10 +0000
@@ -35,11 +35,12 @@
 from storm.expr import (
     Alias,
     And,
-    AutoTables,
     Desc,
+    In,
     Join,
     LeftJoin,
     Or,
+    Select,
     SQL,
     )
 from storm.store import (
@@ -160,6 +161,7 @@
     )
 from lp.registry.model.pillar import pillar_sort_key
 from lp.registry.model.sourcepackagename import SourcePackageName
+from lp.registry.model.structuralsubscription import StructuralSubscription
 from lp.services.propertycache import get_property_cache
 from lp.soyuz.enums import PackagePublishingStatus
 from lp.soyuz.model.publishing import SourcePackagePublishingHistory
@@ -1606,7 +1608,9 @@
         from lp.bugs.model.bug import Bug
         extra_clauses = ['Bug.id = BugTask.bug']
         clauseTables = [BugTask, Bug]
+        join_tables = []
         decorators = []
+        has_duplicate_results = False
 
         # These arguments can be processed in a loop without any other
         # special handling.
@@ -1662,7 +1666,7 @@
             extra_clauses.append("BugTask.milestone %s" % where_cond)
 
         if params.project:
-            # Circular.
+            # Prevent circular import problems.
             from lp.registry.model.product import Product
             clauseTables.append(Product)
             extra_clauses.append("BugTask.product = Product.id")
@@ -1713,47 +1717,54 @@
                     sqlvalues(personid=params.subscriber.id))
 
         if params.structural_subscriber is not None:
-            structural_subscriber_clause = ("""BugTask.id IN (
-                SELECT BugTask.id FROM BugTask, StructuralSubscription
-                WHERE BugTask.product = StructuralSubscription.product
-                  AND StructuralSubscription.subscriber = %(personid)s
-                UNION ALL
-                SELECT BugTask.id FROM BugTask, StructuralSubscription
-                WHERE
-                  BugTask.distribution = StructuralSubscription.distribution
-                  AND BugTask.sourcepackagename =
-                      StructuralSubscription.sourcepackagename
-                  AND StructuralSubscription.subscriber = %(personid)s
-                UNION ALL
-                SELECT BugTask.id FROM BugTask, StructuralSubscription
-                WHERE
-                  BugTask.distroseries = StructuralSubscription.distroseries
-                  AND StructuralSubscription.subscriber = %(personid)s
-                UNION ALL
-                SELECT BugTask.id FROM BugTask, StructuralSubscription
-                WHERE
-                  BugTask.milestone = StructuralSubscription.milestone
-                  AND StructuralSubscription.subscriber = %(personid)s
-                UNION ALL
-                SELECT BugTask.id FROM BugTask, StructuralSubscription
-                WHERE
-                  BugTask.productseries = StructuralSubscription.productseries
-                  AND StructuralSubscription.subscriber = %(personid)s
-                UNION ALL
-                SELECT BugTask.id
-                FROM BugTask, StructuralSubscription, Product
-                WHERE
-                  BugTask.product = Product.id
-                  AND Product.project = StructuralSubscription.project
-                  AND StructuralSubscription.subscriber = %(personid)s
-                UNION ALL
-                SELECT BugTask.id FROM BugTask, StructuralSubscription
-                WHERE
-                  BugTask.distribution = StructuralSubscription.distribution
-                  AND StructuralSubscription.sourcepackagename is NULL
-                  AND StructuralSubscription.subscriber = %(personid)s)""" %
-                sqlvalues(personid=params.structural_subscriber))
-            extra_clauses.append(structural_subscriber_clause)
+            ssub_match_product = (
+                BugTask.productID ==
+                StructuralSubscription.productID)
+            ssub_match_productseries = (
+                BugTask.productseriesID ==
+                StructuralSubscription.productseriesID)
+            # Prevent circular import problems.
+            from lp.registry.model.product import Product
+            ssub_match_project = And(
+                Product.projectID ==
+                StructuralSubscription.projectID,
+                BugTask.product == Product.id)
+            ssub_match_distribution = (
+                BugTask.distributionID ==
+                StructuralSubscription.distributionID)
+            ssub_match_sourcepackagename = (
+                BugTask.sourcepackagenameID ==
+                StructuralSubscription.sourcepackagenameID)
+            ssub_match_null_sourcepackagename = (
+                StructuralSubscription.sourcepackagename == None)
+            ssub_match_distribution_with_optional_package = And(
+                ssub_match_distribution, Or(
+                    ssub_match_sourcepackagename,
+                    ssub_match_null_sourcepackagename))
+            ssub_match_distribution_series = (
+                BugTask.distroseriesID ==
+                StructuralSubscription.distroseriesID)
+            ssub_match_milestone = (
+                BugTask.milestoneID ==
+                StructuralSubscription.milestoneID)
+
+            join_clause = Or(
+                ssub_match_product,
+                ssub_match_productseries,
+                ssub_match_project,
+                ssub_match_distribution_with_optional_package,
+                ssub_match_distribution_series,
+                ssub_match_milestone)
+
+            join_tables.append(
+                (Product, LeftJoin(Product, BugTask.productID == Product.id)))
+            join_tables.append(
+                (StructuralSubscription,
+                 Join(StructuralSubscription, join_clause)))
+            extra_clauses.append(
+                'StructuralSubscription.subscriber = %s'
+                % sqlvalues(params.structural_subscriber))
+            has_duplicate_results = True
 
         if params.component:
             clauseTables += [SourcePackagePublishingHistory,
@@ -1836,7 +1847,7 @@
         if params.bug_commenter:
             bug_commenter_clause = """
             BugTask.id IN (
-                SELECT BugTask.id FROM BugTask, BugMessage, Message
+                SELECT DISTINCT BugTask.id FROM BugTask, BugMessage, Message
                 WHERE Message.owner = %(bug_commenter)s
                     AND Message.id = BugMessage.message
                     AND BugTask.bug = BugMessage.bug
@@ -1928,7 +1939,9 @@
                 for decor in decorators:
                     obj = decor(obj)
                 return obj
-        return query, clauseTables, orderby_arg, decorator
+        return (
+            query, clauseTables, orderby_arg, decorator, join_tables,
+            has_duplicate_results)
 
     def _buildUpstreamClause(self, params):
         """Return an clause for returning upstream data if the data exists.
@@ -2156,101 +2169,137 @@
             ', '.join(tables), ' AND '.join(clauses))
         return clause
 
-    def search(self, params, *args, **kwargs):
-        """See `IBugTaskSet`.
-
-        :param _noprejoins: Private internal parameter to BugTaskSet which
-            disables all use of prejoins : consolidated from code paths that
-            claim they were inefficient and unwanted.
-        """
-        # Circular.
-        from lp.registry.model.product import Product
-        from lp.bugs.model.bug import Bug
-        _noprejoins = kwargs.get('_noprejoins', False)
+    def buildOrigin(self, join_tables, prejoin_tables, clauseTables):
+        """Build the parameter list for Store.using().
+
+        :param join_tables: A sequence of tables that should be joined
+            as returned by buildQuery(). Each element has the form
+            (table, join), where table is the table to join and join
+            is a Storm Join or LeftJoin instance.
+        :param prejoin_tables: A sequence of tables that should additionally
+            be joined. Each element has the form (table, join),
+            where table is the table to join and join is a Storm Join
+            or LeftJoin instance.
+        :param clauseTables: A sequence of tables that should appear in
+            the FROM clause of a query. The join condition is defined in
+            the WHERE clause.
+
+        Tables may appear simultaneously in join_tables, prejoin_tables
+        and in clauseTables. This method ensures that each table
+        appears exactly once in the returned sequence.
+        """
+        origin = [BugTask]
+        already_joined = set(origin)
+        for table, join in join_tables:
+            origin.append(join)
+            already_joined.add(table)
+        for table, join in prejoin_tables:
+            if table not in already_joined:
+                origin.append(join)
+                already_joined.add(table)
+        for table in clauseTables:
+            if table not in already_joined:
+                origin.append(table)
+        return origin
+
+    def _search(self, resultrow, prejoins, params, *args, **kw):
+        """Return a Storm result set for the given search parameters.
+
+        :param resultrow: The type of data returned by the query.
+        :param prejoins: A sequence of Storm SQL row instances which are
+            pre-joined.
+        :param params: A BugTaskSearchParams instance.
+        :param args: optional additional BugTaskSearchParams instances,
+        """
         store = IStore(BugTask)
-        query, clauseTables, orderby, bugtask_decorator = self.buildQuery(
-            params)
+        [query, clauseTables, orderby, bugtask_decorator, join_tables,
+        has_duplicate_results] = self.buildQuery(params)
         if len(args) == 0:
-            if _noprejoins:
-                resultset = store.find(BugTask,
-                    AutoTables(SQL("1=1"), clauseTables),
-                    query)
+            if has_duplicate_results:
+                origin = self.buildOrigin(join_tables, [], clauseTables)
+                outer_origin = self.buildOrigin([], prejoins, [])
+                subquery = Select(BugTask.id, where=SQL(query), tables=origin)
+                resultset = store.using(*outer_origin).find(
+                    resultrow, In(BugTask.id, subquery))
+            else:
+                origin = self.buildOrigin(join_tables, prejoins, clauseTables)
+                resultset = store.using(*origin).find(resultrow, query)
+            if prejoins:
+                decorator = lambda row: bugtask_decorator(row[0])
+            else:
                 decorator = bugtask_decorator
-            else:
-                tables = clauseTables + [Product, SourcePackageName]
-                origin = [
-                    BugTask,
-                    LeftJoin(Bug, BugTask.bug == Bug.id),
-                    LeftJoin(Product, BugTask.product == Product.id),
-                    LeftJoin(
-                        SourcePackageName,
-                        BugTask.sourcepackagename == SourcePackageName.id),
-                    ]
-                # NB: these may work with AutoTables, but its hard to tell,
-                # this way is known to work.
-                if BugNomination in tables:
-                    # The relation is already in query.
-                    origin.append(BugNomination)
-                if BugSubscription in tables:
-                    # The relation is already in query.
-                    origin.append(BugSubscription)
-                if SourcePackageRelease in tables:
-                    origin.append(SourcePackageRelease)
-                if SourcePackagePublishingHistory in tables:
-                    origin.append(SourcePackagePublishingHistory)
-                resultset = store.using(*origin).find(
-                    (BugTask, Product, SourcePackageName, Bug),
-                    AutoTables(SQL("1=1"), tables),
-                    query)
-                decorator=lambda row: bugtask_decorator(row[0])
+
             resultset.order_by(orderby)
             return DecoratedResultSet(resultset, result_decorator=decorator)
 
         bugtask_fti = SQL('BugTask.fti')
-        result = store.find((BugTask, bugtask_fti), query,
-                            AutoTables(SQL("1=1"), clauseTables))
+        inner_resultrow = (BugTask, bugtask_fti)
+        origin = self.buildOrigin(join_tables, [], clauseTables)
+        resultset = store.using(*origin).find(inner_resultrow, query)
+
         decorators = [bugtask_decorator]
         for arg in args:
-            query, clauseTables, dummy, decorator = self.buildQuery(arg)
-            result = result.union(
-                store.find((BugTask, bugtask_fti), query,
-                           AutoTables(SQL("1=1"), clauseTables)))
+            [query, clauseTables, ignore, decorator, join_tables,
+             has_duplicate_results] = self.buildQuery(arg)
+            origin = self.buildOrigin(join_tables, [], clauseTables)
+            next_result = store.using(*origin).find(inner_resultrow, query)
+            resultset = resultset.union(next_result)
             # NB: assumes the decorators are all compatible.
             # This may need revisiting if e.g. searches on behalf of different
             # users are combined.
             decorators.append(decorator)
 
-        def decorator(row):
+        def prejoin_decorator(row):
             bugtask = row[0]
             for decorator in decorators:
                 bugtask = decorator(bugtask)
             return bugtask
 
-        # Build up the joins.
-        # TODO: implement _noprejoins for this code path: as of 20100818 it
-        # has been silently disabled because clients of the API were setting
-        # prejoins=[] which had no effect; this TODO simply notes the reality
-        # already existing when it was added.
-        joins = Alias(result._get_select(), "BugTask")
-        joins = Join(joins, Bug, BugTask.bug == Bug.id)
-        joins = LeftJoin(joins, Product, BugTask.product == Product.id)
-        joins = LeftJoin(joins, SourcePackageName,
-                         BugTask.sourcepackagename == SourcePackageName.id)
-
-        result = store.using(joins).find(
-            (BugTask, Bug, Product, SourcePackageName))
+        def simple_decorator(bugtask):
+            for decorator in decorators:
+                bugtask = decorator(bugtask)
+            return bugtask
+
+        origin = [Alias(resultset._get_select(), "BugTask")]
+        if prejoins:
+            origin += [join for table, join in prejoins]
+            decorator = prejoin_decorator
+        else:
+            decorator = simple_decorator
+
+        result = store.using(*origin).find(resultrow)
         result.order_by(orderby)
         return DecoratedResultSet(result, result_decorator=decorator)
 
+    def search(self, params, *args, **kwargs):
+        """See `IBugTaskSet`.
+
+        :param _noprejoins: Private internal parameter to BugTaskSet which
+            disables all use of prejoins : consolidated from code paths that
+            claim they were inefficient and unwanted.
+        """
+        # Prevent circular import problems.
+        from lp.registry.model.product import Product
+        from lp.bugs.model.bug import Bug
+        _noprejoins = kwargs.get('_noprejoins', False)
+        if _noprejoins:
+            prejoins = []
+            resultrow = BugTask
+        else:
+            prejoins = [
+                (Bug, LeftJoin(Bug, BugTask.bug == Bug.id)),
+                (Product, LeftJoin(Product, BugTask.product == Product.id)),
+                (SourcePackageName,
+                 LeftJoin(
+                     SourcePackageName,
+                     BugTask.sourcepackagename == SourcePackageName.id)),
+                ]
+            resultrow = (BugTask, Bug, Product, SourcePackageName, )
+        return self._search(resultrow, prejoins, params, *args)
+
     def searchBugIds(self, params):
         """See `IBugTaskSet`."""
-        query, clauseTables, orderby, decorator = self.buildQuery(
-            params)
-        store = IStore(BugTask)
-        resultset = store.find(BugTask.bugID,
-            AutoTables(SQL("1=1"), clauseTables), query)
-        resultset.order_by(orderby)
-        return resultset
+        return self._search(BugTask.bugID, [], params).result_set
 
     def getAssignedMilestonesFromSearch(self, search_results):
         """See `IBugTaskSet`."""
@@ -2854,8 +2903,8 @@
 
         if recipients is not None:
             # We need to process subscriptions, so pull all the
-            # subscribes into the cache, then update recipients with
-            # the subscriptions.
+            # subscribers into the cache, then update recipients
+            # with the subscriptions.
             subscribers = list(subscribers)
             for subscription in subscriptions:
                 recipients.addStructuralSubscriber(

=== modified file 'lib/lp/bugs/tests/test_bugtask_search.py'
--- lib/lp/bugs/tests/test_bugtask_search.py	2010-11-09 06:55:07 +0000
+++ lib/lp/bugs/tests/test_bugtask_search.py	2010-11-19 10:27:10 +0000
@@ -504,6 +504,23 @@
         self.assertSearchFinds(params, self.bugtasks[:1])
 
 
+class ProjectGroupAndDistributionTests:
+    """Tests which are useful for project groups and distributions."""
+
+    def setUpStructuralSubscriptions(self):
+        # Subscribe a user to the search target of this test and to
+        # another target.
+        raise NotImplementedError
+
+    def test_unique_results_for_multiple_structural_subscriptions(self):
+        # Searching for a subscriber who is more than once subscribed to a
+        # bug task returns this bug task only once.
+        subscriber = self.setUpStructuralSubscriptions()
+        params = self.getBugTaskSearchParams(
+            user=None, structural_subscriber=subscriber)
+        self.assertSearchFinds(params, self.bugtasks)
+
+
 class BugTargetTestBase:
     """A base class for the bug target mixin classes."""
 
@@ -625,7 +642,8 @@
             bugtask, self.searchtarget.product)
 
 
-class ProjectGroupTarget(BugTargetTestBase, BugTargetWithBugSuperVisor):
+class ProjectGroupTarget(BugTargetTestBase, BugTargetWithBugSuperVisor,
+                         ProjectGroupAndDistributionTests):
     """Use a project group as the bug target."""
 
     def setUp(self):
@@ -695,6 +713,15 @@
             'No bug task found for a product that is not the target of '
             'the main test bugtask.')
 
+    def setUpStructuralSubscriptions(self):
+        # See `ProjectGroupAndDistributionTests`.
+        subscriber = self.factory.makePerson()
+        self.subscribeToTarget(subscriber)
+        with person_logged_in(subscriber):
+            self.bugtasks[0].target.addSubscription(
+                subscriber, subscribed_by=subscriber)
+        return subscriber
+
 
 class MilestoneTarget(BugTargetTestBase):
     """Use a milestone as the bug target."""
@@ -728,7 +755,8 @@
 
 
 class DistributionTarget(BugTargetTestBase, ProductAndDistributionTests,
-                         BugTargetWithBugSuperVisor):
+                         BugTargetWithBugSuperVisor,
+                         ProjectGroupAndDistributionTests):
     """Use a distribution as the bug target."""
 
     def setUp(self):
@@ -750,6 +778,18 @@
         """See `ProductAndDistributionTests`."""
         return self.factory.makeDistroSeries(distribution=self.searchtarget)
 
+    def setUpStructuralSubscriptions(self):
+        # See `ProjectGroupAndDistributionTests`.
+        subscriber = self.factory.makePerson()
+        sourcepackage = self.factory.makeDistributionSourcePackage(
+            distribution=self.searchtarget)
+        self.bugtasks.append(self.factory.makeBugTask(target=sourcepackage))
+        self.subscribeToTarget(subscriber)
+        with person_logged_in(subscriber):
+            sourcepackage.addSubscription(
+                subscriber, subscribed_by=subscriber)
+        return subscriber
+
 
 class DistroseriesTarget(BugTargetTestBase):
     """Use a distro series as the bug target."""
@@ -835,7 +875,30 @@
     )
 
 
-class PreloadBugtaskTargets:
+class MultipleParams:
+    """A mixin class for tests with more than one search parameter object.
+
+    BugTaskSet.search() can be called with more than one
+    BugTaskSearchParams instances, while BugTaskSet.searchBugIds()
+    accepts exactly one instance.
+    """
+
+    def test_two_param_objects(self):
+        # We can pass more than one BugTaskSearchParams instance to
+        # BugTaskSet.search().
+        params1 = self.getBugTaskSearchParams(
+            user=None, status=BugTaskStatus.FIXCOMMITTED)
+        subscriber = self.factory.makePerson()
+        self.subscribeToTarget(subscriber)
+        params2 = self.getBugTaskSearchParams(
+            user=None, status=BugTaskStatus.NEW,
+            structural_subscriber=subscriber)
+        search_result = self.runSearch(params1, params2)
+        expected = self.resultValuesForBugtasks(self.bugtasks[1:])
+        self.assertEqual(expected, search_result)
+
+
+class PreloadBugtaskTargets(MultipleParams):
     """Preload bug targets during a BugTaskSet.search() query."""
 
     def setUp(self):
@@ -849,7 +912,7 @@
         return expected_bugtasks
 
 
-class NoPreloadBugtaskTargets:
+class NoPreloadBugtaskTargets(MultipleParams):
     """Do not preload bug targets during a BugTaskSet.search() query."""
 
     def setUp(self):

=== modified file 'lib/lp/buildmaster/manager.py'
--- lib/lp/buildmaster/manager.py	2010-10-28 15:04:15 +0000
+++ lib/lp/buildmaster/manager.py	2010-11-19 10:27:10 +0000
@@ -151,10 +151,12 @@
         if failure.check(
             BuildSlaveFailure, CannotBuild, BuildBehaviorMismatch,
             CannotResumeHost, BuildDaemonError, CannotFetchFile):
-            self.logger.info("Scanning failed with: %s" % error_message)
+            self.logger.info("Scanning %s failed with: %s" % (
+                self.builder_name, error_message))
         else:
-            self.logger.info("Scanning failed with: %s\n%s" %
-                (failure.getErrorMessage(), failure.getTraceback()))
+            self.logger.info("Scanning %s failed with: %s\n%s" % (
+                self.builder_name, failure.getErrorMessage(),
+                failure.getTraceback()))
 
         # Decide if we need to terminate the job or fail the
         # builder.

=== modified file 'lib/lp/buildmaster/model/builder.py'
--- lib/lp/buildmaster/model/builder.py	2010-11-10 13:06:05 +0000
+++ lib/lp/buildmaster/model/builder.py	2010-11-19 10:27:10 +0000
@@ -8,6 +8,7 @@
 __all__ = [
     'Builder',
     'BuilderSet',
+    'ProxyWithConnectionTimeout',
     'rescueBuilderIfLost',
     'updateBuilderStatus',
     ]
@@ -99,6 +100,41 @@
     noisy = False
 
 
+class ProxyWithConnectionTimeout(xmlrpc.Proxy):
+    """Extend Twisted's Proxy to provide a configurable connection timeout."""
+
+    def __init__(self, url, user=None, password=None, allowNone=False,
+                 useDateTime=False, timeout=None):
+        xmlrpc.Proxy.__init__(
+            self, url, user, password, allowNone, useDateTime)
+        if timeout is None:
+            self.timeout = config.builddmaster.socket_timeout
+        else:
+            self.timeout = timeout
+
+    def callRemote(self, method, *args):
+        """Basically a carbon copy of the parent but passes the timeout
+        to connectTCP."""
+
+        def cancel(d):
+            factory.deferred = None
+            connector.disconnect()
+        factory = self.queryFactory(
+            self.path, self.host, method, self.user,
+            self.password, self.allowNone, args, cancel, self.useDateTime)
+        if self.secure:
+            from twisted.internet import ssl
+            connector = default_reactor.connectSSL(
+                self.host, self.port or 443, factory,
+                ssl.ClientContextFactory(),
+                timeout=self.timeout)
+        else:
+            connector = default_reactor.connectTCP(
+                self.host, self.port or 80, factory,
+                timeout=self.timeout)
+        return factory.deferred
+
+
 class BuilderSlave(object):
     """Add in a few useful methods for the XMLRPC slave.
 
@@ -141,7 +177,7 @@
         """
         rpc_url = urlappend(builder_url.encode('utf-8'), 'rpc')
         if proxy is None:
-            server_proxy = xmlrpc.Proxy(rpc_url, allowNone=True)
+            server_proxy = ProxyWithConnectionTimeout(rpc_url, allowNone=True)
             server_proxy.queryFactory = QuietQueryFactory
         else:
             server_proxy = proxy
@@ -213,7 +249,7 @@
         :param libraryfilealias: An `ILibraryFileAlias`.
         """
         url = libraryfilealias.http_url
-        logger.debug(
+        logger.info(
             "Asking builder on %s to ensure it has file %s (%s, %s)" % (
                 self._file_cache_url, libraryfilealias.filename, url,
                 libraryfilealias.content.sha1))
@@ -432,7 +468,7 @@
             return defer.fail(CannotResumeHost('Undefined vm_host.'))
 
         logger = self._getSlaveScannerLogger()
-        logger.debug("Resuming %s (%s)" % (self.name, self.url))
+        logger.info("Resuming %s (%s)" % (self.name, self.url))
 
         d = self.slave.resume()
         def got_resume_ok((stdout, stderr, returncode)):

=== modified file 'lib/lp/buildmaster/tests/test_builder.py'
--- lib/lp/buildmaster/tests/test_builder.py	2010-11-10 22:40:05 +0000
+++ lib/lp/buildmaster/tests/test_builder.py	2010-11-19 10:27:10 +0000
@@ -43,6 +43,10 @@
     )
 from lp.buildmaster.interfaces.buildqueue import IBuildQueueSet
 from lp.buildmaster.interfaces.builder import CannotResumeHost
+from lp.buildmaster.model.builder import (
+    BuilderSlave,
+    ProxyWithConnectionTimeout,
+    )
 from lp.buildmaster.model.buildfarmjobbehavior import IdleBuildBehavior
 from lp.buildmaster.model.buildqueue import BuildQueue
 from lp.buildmaster.tests.mock_slaves import (
@@ -1059,6 +1063,56 @@
             self.slave.build(None, None, None, None, None))
 
 
+class TestSlaveConnectionTimeouts(TrialTestCase):
+    # Testing that we can override the default 30 second connection
+    # timeout.
+
+    layer = TwistedLayer
+
+    def setUp(self):
+        super(TestSlaveConnectionTimeouts, self).setUp()
+        self.slave_helper = SlaveTestHelpers()
+        self.slave_helper.setUp()
+        self.addCleanup(self.slave_helper.cleanUp)
+        self.clock = Clock()
+        self.proxy = ProxyWithConnectionTimeout("fake_url")
+        self.slave = self.slave_helper.getClientSlave(
+            reactor=self.clock, proxy=self.proxy)
+
+    def test_connection_timeout(self):
+        # The default timeout of 30 seconds should not cause a timeout,
+        # only the config value should.
+        timeout_config = """
+        [builddmaster]
+        socket_timeout: 180
+        """
+        config.push('timeout', timeout_config)
+        self.addCleanup(config.pop, 'timeout')
+
+        d = self.slave.echo()
+        # Advance past the 30 second timeout.  The real reactor will
+        # never call connectTCP() since we're not spinning it up.  This
+        # avoids "connection refused" errors and simulates an
+        # environment where the endpoint doesn't respond.
+        self.clock.advance(31)
+        self.assertFalse(d.called)
+
+        # Now advance past the real socket timeout and expect a
+        # Failure.
+
+        def got_timeout(failure):
+            self.assertIsInstance(failure.value, CancelledError)
+
+        d.addBoth(got_timeout)
+        self.clock.advance(config.builddmaster.socket_timeout + 1)
+        self.assertTrue(d.called)
+
+    def test_BuilderSlave_uses_ProxyWithConnectionTimeout(self):
+        # Make sure that BuilderSlaves use the custom proxy class.
+        slave = BuilderSlave.makeBuilderSlave("url", "host")
+        self.assertIsInstance(slave._server, ProxyWithConnectionTimeout)
+
+
 class TestSlaveWithLibrarian(TrialTestCase):
     """Tests that need more of Launchpad to run."""
 

=== modified file 'lib/lp/code/browser/branchlisting.py'
--- lib/lp/code/browser/branchlisting.py	2010-11-09 07:13:41 +0000
+++ lib/lp/code/browser/branchlisting.py	2010-11-19 10:27:10 +0000
@@ -94,6 +94,7 @@
     PersonActiveReviewsView,
     PersonProductActiveReviewsView,
     )
+from lp.code.browser.branchmergequeuelisting import HasMergeQueuesMenuMixin
 from lp.code.browser.branchvisibilitypolicy import BranchVisibilityPolicyMixin
 from lp.code.browser.summary import BranchCountSummaryView
 from lp.code.enums import (
@@ -849,18 +850,19 @@
                 .scanned())
 
 
-class PersonBranchesMenu(ApplicationMenu):
+class PersonBranchesMenu(ApplicationMenu, HasMergeQueuesMenuMixin):
 
     usedfor = IPerson
     facet = 'branches'
     links = ['registered', 'owned', 'subscribed', 'addbranch',
-             'active_reviews']
+             'active_reviews', 'mergequeues']
     extra_attributes = [
         'active_review_count',
         'owned_branch_count',
         'registered_branch_count',
         'show_summary',
         'subscribed_branch_count',
+        'mergequeue_count',
         ]
 
     def _getCountCollection(self):

=== added file 'lib/lp/code/browser/branchmergequeuelisting.py'
--- lib/lp/code/browser/branchmergequeuelisting.py	1970-01-01 00:00:00 +0000
+++ lib/lp/code/browser/branchmergequeuelisting.py	2010-11-19 10:27:10 +0000
@@ -0,0 +1,105 @@
+# Copyright 2010 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+"""Base class view for merge queue listings."""
+
+__metaclass__ = type
+
+__all__ = [
+    'MergeQueueListingView',
+    'HasMergeQueuesMenuMixin',
+    'PersonMergeQueueListingView',
+    ]
+
+from zope.component import getUtility
+
+from canonical.launchpad.browser.feeds import FeedsMixin
+from canonical.launchpad.webapp import (
+    LaunchpadView,
+    Link,
+    )
+from lp.code.interfaces.branchmergequeuecollection import (
+    IAllBranchMergeQueues,
+    )
+from lp.services.browser_helpers import get_plural_text
+from lp.services.propertycache import cachedproperty
+
+
+class HasMergeQueuesMenuMixin:
+    """A context menus mixin for objects that can own merge queues."""
+
+    def _getCollection(self):
+        return getUtility(IAllBranchMergeQueues).visibleByUser(self.user)
+
+    @property
+    def person(self):
+        """The `IPerson` for the context of the view.
+
+        In simple cases this is the context itself, but in others, like the
+        PersonProduct, it is an attribute of the context.
+        """
+        return self.context
+
+    def mergequeues(self):
+        return Link(
+            '+merge-queues',
+            get_plural_text(
+                self.mergequeue_count,
+                'merge queue', 'merge queues'), site='code')
+
+    @cachedproperty
+    def mergequeue_count(self):
+        return self._getCollection().ownedBy(self.person).count()
+
+
+class MergeQueueListingView(LaunchpadView, FeedsMixin):
+
+    # No feeds initially
+    feed_types = ()
+
+    branch_enabled = True
+    owner_enabled = True
+
+    label_template = 'Merge Queues for %(displayname)s'
+
+    @property
+    def label(self):
+        return self.label_template % {
+            'displayname': self.context.displayname,
+            'title': getattr(self.context, 'title', 'no-title')}
+
+    # Provide a default page_title for distros and other things without
+    # breadcrumbs..
+    page_title = label
+
+    def _getCollection(self):
+        """Override this to say what queues will be in the listing."""
+        raise NotImplementedError(self._getCollection)
+
+    def getVisibleQueuesForUser(self):
+        """Branch merge queues that are visible by the logged in user."""
+        collection = self._getCollection().visibleByUser(self.user)
+        return collection.getMergeQueues()
+
+    @cachedproperty
+    def mergequeues(self):
+        return self.getVisibleQueuesForUser()
+
+    @cachedproperty
+    def mergequeue_count(self):
+        """Return the number of merge queues that will be returned."""
+        return self._getCollection().visibleByUser(self.user).count()
+
+    @property
+    def no_merge_queue_message(self):
+        """Shown when there is no table to show."""
+        return "%s has no merge queues." % self.context.displayname
+
+
+class PersonMergeQueueListingView(MergeQueueListingView):
+
+    label_template = 'Merge Queues owned by %(displayname)s'
+    owner_enabled = False
+
+    def _getCollection(self):
+        return getUtility(IAllBranchMergeQueues).ownedBy(self.context)

=== modified file 'lib/lp/code/browser/configure.zcml'
--- lib/lp/code/browser/configure.zcml	2010-11-08 17:17:45 +0000
+++ lib/lp/code/browser/configure.zcml	2010-11-19 10:27:10 +0000
@@ -1318,6 +1318,24 @@
             for="lp.code.interfaces.sourcepackagerecipe.ISourcePackageRecipe"
             factory="canonical.launchpad.webapp.breadcrumb.NameBreadcrumb"
             permission="zope.Public"/>
+
+        <browser:page
+            for="lp.registry.interfaces.person.IPerson"
+            layer="lp.code.publisher.CodeLayer"
+            class="lp.code.browser.branchmergequeuelisting.PersonMergeQueueListingView"
+            permission="zope.Public"
+            facet="branches"
+            name="+merge-queues"
+            template="../templates/branchmergequeue-listing.pt"/>
+
+        <browser:page
+            for="*"
+            layer="lp.code.publisher.CodeLayer"
+            name="+bmq-macros"
+            permission="zope.Public"
+            template="../templates/branchmergequeue-macros.pt"/>
+
+
     </facet>
 
     <browser:url

=== added file 'lib/lp/code/browser/tests/test_branchmergequeuelisting.py'
--- lib/lp/code/browser/tests/test_branchmergequeuelisting.py	1970-01-01 00:00:00 +0000
+++ lib/lp/code/browser/tests/test_branchmergequeuelisting.py	2010-11-19 10:27:10 +0000
@@ -0,0 +1,227 @@
+# Copyright 2010 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+"""Tests for branch listing."""
+
+__metaclass__ = type
+
+import re
+
+from mechanize import LinkNotFoundError
+import soupmatchers
+from zope.security.proxy import removeSecurityProxy
+
+from canonical.launchpad.testing.pages import (
+    extract_link_from_tag,
+    extract_text,
+    find_tag_by_id,
+    )
+from canonical.launchpad.webapp import canonical_url
+from canonical.testing.layers import DatabaseFunctionalLayer
+from lp.services.features.model import (
+    FeatureFlag,
+    getFeatureStore,
+    )
+from lp.testing import (
+    BrowserTestCase,
+    login_person,
+    person_logged_in,
+    TestCaseWithFactory,
+    )
+from lp.testing.views import create_initialized_view
+
+
+class MergeQueuesTestMixin:
+
+    def setUp(self):
+        self.branch_owner = self.factory.makePerson(name='eric')
+
+    def enable_queue_flag(self):
+        getFeatureStore().add(FeatureFlag(
+            scope=u'default', flag=u'code.branchmergequeue',
+            value=u'on', priority=1))
+
+    def _makeMergeQueues(self, nr_queues=3, nr_with_private_branches=0):
+        # We create nr_queues merge queues in total, and the first
+        # nr_with_private_branches of them will have at least one private
+        # branch in the queue.
+        with person_logged_in(self.branch_owner):
+            mergequeues = [
+                self.factory.makeBranchMergeQueue(
+                    owner=self.branch_owner, branches=self._makeBranches())
+                for i in range(nr_queues-nr_with_private_branches)]
+            mergequeues_with_private_branches = [
+                self.factory.makeBranchMergeQueue(
+                    owner=self.branch_owner,
+                    branches=self._makeBranches(nr_private=1))
+                for i in range(nr_with_private_branches)]
+
+            return mergequeues, mergequeues_with_private_branches
+
+    def _makeBranches(self, nr_public=3, nr_private=0):
+        branches = [
+            self.factory.makeProductBranch(owner=self.branch_owner)
+            for i in range(nr_public)]
+
+        private_branches = [
+            self.factory.makeProductBranch(
+                owner=self.branch_owner, private=True)
+            for i in range(nr_private)]
+
+        branches.extend(private_branches)
+        return branches
+
+
+class TestPersonMergeQueuesView(TestCaseWithFactory, MergeQueuesTestMixin):
+
+    layer = DatabaseFunctionalLayer
+
+    def setUp(self):
+        TestCaseWithFactory.setUp(self)
+        MergeQueuesTestMixin.setUp(self)
+        self.user = self.factory.makePerson()
+
+    def test_mergequeues_with_all_public_branches(self):
+        # Anyone can see mergequeues containing all public branches.
+        mq, mq_with_private = self._makeMergeQueues()
+        login_person(self.user)
+        view = create_initialized_view(
+            self.branch_owner, name="+merge-queues", rootsite='code')
+        self.assertEqual(set(mq), set(view.mergequeues))
+
+    def test_mergequeues_with_a_private_branch_for_owner(self):
+        # Only users with access to private branches can see any queues
+        # containing such branches.
+        mq, mq_with_private = (
+            self._makeMergeQueues(nr_with_private_branches=1))
+        login_person(self.branch_owner)
+        view = create_initialized_view(
+            self.branch_owner, name="+merge-queues", rootsite='code')
+        mq.extend(mq_with_private)
+        self.assertEqual(set(mq), set(view.mergequeues))
+
+    def test_mergequeues_with_a_private_branch_for_other_user(self):
+        # Only users with access to private branches can see any queues
+        # containing such branches.
+        mq, mq_with_private = (
+            self._makeMergeQueues(nr_with_private_branches=1))
+        login_person(self.user)
+        view = create_initialized_view(
+            self.branch_owner, name="+merge-queues", rootsite='code')
+        self.assertEqual(set(mq), set(view.mergequeues))
+
+
+class TestPersonCodePage(BrowserTestCase, MergeQueuesTestMixin):
+    """Tests for the person code homepage.
+
+    This is the default page shown for a person on the code subdomain.
+    """
+
+    layer = DatabaseFunctionalLayer
+
+    def setUp(self):
+        BrowserTestCase.setUp(self)
+        MergeQueuesTestMixin.setUp(self)
+        self._makeMergeQueues()
+
+    def test_merge_queue_menu_link_without_feature_flag(self):
+        login_person(self.branch_owner)
+        browser = self.getUserBrowser(
+            canonical_url(self.branch_owner, rootsite='code'),
+            self.branch_owner)
+        self.assertRaises(
+            LinkNotFoundError,
+            browser.getLink,
+            url='+merge-queues')
+
+    def test_merge_queue_menu_link(self):
+        self.enable_queue_flag()
+        login_person(self.branch_owner)
+        browser = self.getUserBrowser(
+            canonical_url(self.branch_owner, rootsite='code'),
+            self.branch_owner)
+        browser.getLink(url='+merge-queues').click()
+        self.assertEqual(
+            'http://code.launchpad.dev/~eric/+merge-queues',
+            browser.url)
+
+
+class TestPersonMergeQueuesListPage(BrowserTestCase, MergeQueuesTestMixin):
+    """Tests for the person merge queue list page."""
+
+    layer = DatabaseFunctionalLayer
+
+    def setUp(self):
+        BrowserTestCase.setUp(self)
+        MergeQueuesTestMixin.setUp(self)
+        mq, mq_with_private = self._makeMergeQueues()
+        self.merge_queues = mq
+        self.merge_queues.extend(mq_with_private)
+
+    def test_merge_queue_list_contents_without_feature_flag(self):
+        login_person(self.branch_owner)
+        browser = self.getUserBrowser(
+            canonical_url(self.branch_owner, rootsite='code',
+                          view_name='+merge-queues'), self.branch_owner)
+        table = find_tag_by_id(browser.contents, 'mergequeuetable')
+        self.assertIs(None, table)
+        noqueue_matcher = soupmatchers.HTMLContains(
+            soupmatchers.Tag(
+                'No merge queues', 'div',
+                text=re.compile(
+                    '\w*No merge queues\w*')))
+        self.assertThat(browser.contents, noqueue_matcher)
+
+    def test_merge_queue_list_contents(self):
+        self.enable_queue_flag()
+        login_person(self.branch_owner)
+        browser = self.getUserBrowser(
+            canonical_url(self.branch_owner, rootsite='code',
+                          view_name='+merge-queues'), self.branch_owner)
+
+        table = find_tag_by_id(browser.contents, 'mergequeuetable')
+
+        merge_queue_info = {}
+        for row in table.tbody.fetch('tr'):
+            cells = row('td')
+            row_info = {}
+            queue_name = extract_text(cells[0])
+            if not queue_name.startswith('queue'):
+                continue
+            qlink = extract_link_from_tag(cells[0].find('a'))
+            row_info['queue_link'] = qlink
+            queue_size = extract_text(cells[1])
+            row_info['queue_size'] = queue_size
+            queue_branches = cells[2]('a')
+            branch_links = set()
+            for branch_tag in queue_branches:
+                branch_links.add(extract_link_from_tag(branch_tag))
+            row_info['branch_links'] = branch_links
+            merge_queue_info[queue_name] = row_info
+
+        expected_queue_names = [queue.name for queue in self.merge_queues]
+        self.assertEqual(
+            set(expected_queue_names), set(merge_queue_info.keys()))
+
+        #TODO: when IBranchMergeQueue API is available remove '4'
+        expected_queue_sizes = dict(
+            [(queue.name, '4') for queue in self.merge_queues])
+        observed_queue_sizes = dict(
+            [(queue.name, merge_queue_info[queue.name]['queue_size'])
+             for queue in self.merge_queues])
+        self.assertEqual(
+            expected_queue_sizes, observed_queue_sizes)
+
+        def branch_links(branches):
+            return [canonical_url(removeSecurityProxy(branch),
+                                  force_local_path=True)
+                    for branch in branches]
+
+        expected_queue_branches = dict(
+            [(queue.name, set(branch_links(queue.branches)))
+             for queue in self.merge_queues])
+        observed_queue_branches = dict(
+            [(queue.name, merge_queue_info[queue.name]['branch_links'])
+             for queue in self.merge_queues])
+        self.assertEqual(
+            expected_queue_branches, observed_queue_branches)

=== modified file 'lib/lp/code/configure.zcml'
--- lib/lp/code/configure.zcml	2010-11-11 11:55:53 +0000
+++ lib/lp/code/configure.zcml	2010-11-19 10:27:10 +0000
@@ -94,6 +94,12 @@
     <allow attributes="browserDefault
                        __call__"/>
   </class>
+  <class class="lp.code.model.branchmergequeuecollection.GenericBranchMergeQueueCollection">
+    <allow interface="lp.code.interfaces.branchmergequeuecollection.IBranchMergeQueueCollection"/>
+  </class>
+  <class class="lp.code.model.branchmergequeuecollection.VisibleBranchMergeQueueCollection">
+    <allow interface="lp.code.interfaces.branchmergequeuecollection.IBranchMergeQueueCollection"/>
+  </class>
   <class class="lp.code.model.branchcollection.GenericBranchCollection">
     <allow interface="lp.code.interfaces.branchcollection.IBranchCollection"/>
   </class>
@@ -148,6 +154,11 @@
       provides="lp.code.interfaces.revisioncache.IRevisionCache">
     <allow interface="lp.code.interfaces.revisioncache.IRevisionCache"/>
   </securedutility>
+  <securedutility
+      class="lp.code.model.branchmergequeuecollection.GenericBranchMergeQueueCollection"
+      provides="lp.code.interfaces.branchmergequeuecollection.IAllBranchMergeQueues">
+    <allow interface="lp.code.interfaces.branchmergequeuecollection.IAllBranchMergeQueues"/>
+  </securedutility>
   <adapter
       for="lp.registry.interfaces.person.IPerson"
       provides="lp.code.interfaces.revisioncache.IRevisionCache"

=== modified file 'lib/lp/code/interfaces/branchmergequeue.py'
--- lib/lp/code/interfaces/branchmergequeue.py	2010-10-20 15:32:38 +0000
+++ lib/lp/code/interfaces/branchmergequeue.py	2010-11-19 10:27:10 +0000
@@ -8,6 +8,7 @@
 __all__ = [
     'IBranchMergeQueue',
     'IBranchMergeQueueSource',
+    'user_has_special_merge_queue_access',
     ]
 
 from lazr.restful.declarations import (
@@ -21,6 +22,7 @@
     CollectionField,
     Reference,
     )
+from zope.component import getUtility
 from zope.interface import Interface
 from zope.schema import (
     Datetime,
@@ -30,6 +32,7 @@
     )
 
 from canonical.launchpad import _
+from canonical.launchpad.interfaces.launchpad import ILaunchpadCelebrities
 from lp.services.fields import (
     PersonChoice,
     PublicPersonChoice,
@@ -113,3 +116,14 @@
         :param registrant: The registrant of the queue.
         :param branches: A list of branches to add to the queue.
         """
+
+
+def user_has_special_merge_queue_access(user):
+    """Admins and bazaar experts have special access.
+
+    :param user: A 'Person' or None.
+    """
+    if user is None:
+        return False
+    celebs = getUtility(ILaunchpadCelebrities)
+    return user.inTeam(celebs.admin) or user.inTeam(celebs.bazaar_experts)

=== added file 'lib/lp/code/interfaces/branchmergequeuecollection.py'
--- lib/lp/code/interfaces/branchmergequeuecollection.py	1970-01-01 00:00:00 +0000
+++ lib/lp/code/interfaces/branchmergequeuecollection.py	2010-11-19 10:27:10 +0000
@@ -0,0 +1,64 @@
+# Copyright 2010 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+# pylint: disable-msg=E0211, E0213
+
+"""A collection of branche merge queues.
+
+See `IBranchMergeQueueCollection` for more details.
+"""
+
+__metaclass__ = type
+__all__ = [
+    'IAllBranchMergeQueues',
+    'IBranchMergeQueueCollection',
+    'InvalidFilter',
+    ]
+
+from zope.interface import Interface
+
+
+class InvalidFilter(Exception):
+    """Raised when an `IBranchMergeQueueCollection` can't apply the filter."""
+
+
+class IBranchMergeQueueCollection(Interface):
+    """A collection of branch merge queues.
+
+    An `IBranchMergeQueueCollection` is an immutable collection of branch
+    merge queues. It has two kinds of methods:
+    filter methods and query methods.
+
+    Query methods get information about the contents of collection. See
+    `IBranchMergeQueueCollection.count` and
+    `IBranchMergeQueueCollection.getMergeQueues`.
+
+    Implementations of this interface are not 'content classes'. That is, they
+    do not correspond to a particular row in the database.
+
+    This interface is intended for use within Launchpad, not to be exported as
+    a public API.
+    """
+
+    def count():
+        """The number of merge queues in this collection."""
+
+    def getMergeQueues():
+        """Return a result set of all merge queues in this collection.
+
+        The returned result set will also join across the specified tables as
+        defined by the arguments to this function.  These extra tables are
+        joined specificly to allow the caller to sort on values not in the
+        Branch table itself.
+        """
+
+    def ownedBy(person):
+        """Restrict the collection to queues owned by 'person'."""
+
+    def visibleByUser(person):
+        """Restrict the collection to queues that 'person' is allowed to see.
+        """
+
+
+class IAllBranchMergeQueues(IBranchMergeQueueCollection):
+    """An `IBranchMergeQueueCollection` of all branch merge queues."""

=== modified file 'lib/lp/code/model/branchmergequeue.py'
--- lib/lp/code/model/branchmergequeue.py	2010-10-28 03:08:41 +0000
+++ lib/lp/code/model/branchmergequeue.py	2010-11-19 10:27:10 +0000
@@ -7,7 +7,6 @@
 __all__ = ['BranchMergeQueue']
 
 import simplejson
-
 from storm.locals import (
     Int,
     Reference,
@@ -68,7 +67,7 @@
 
     @classmethod
     def new(cls, name, owner, registrant, description=None,
-            configuration=None):
+            configuration=None, branches=None):
         """See `IBranchMergeQueueSource`."""
         store = IMasterStore(BranchMergeQueue)
 
@@ -81,6 +80,9 @@
         queue.registrant = registrant
         queue.description = description
         queue.configuration = configuration
+        if branches is not None:
+            for branch in branches:
+                branch.addToQueue(queue)
 
         store.add(queue)
         return queue

=== added file 'lib/lp/code/model/branchmergequeuecollection.py'
--- lib/lp/code/model/branchmergequeuecollection.py	1970-01-01 00:00:00 +0000
+++ lib/lp/code/model/branchmergequeuecollection.py	2010-11-19 10:27:10 +0000
@@ -0,0 +1,174 @@
+# Copyright 2010 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+"""Implementations of `IBranchMergeQueueCollection`."""
+
+__metaclass__ = type
+__all__ = [
+    'GenericBranchCollection',
+    ]
+
+from zope.interface import implements
+
+from canonical.launchpad.interfaces.lpstorm import IMasterStore
+from lp.code.interfaces.branchmergequeue import (
+    user_has_special_merge_queue_access,
+    )
+from lp.code.interfaces.branchmergequeuecollection import (
+    IBranchMergeQueueCollection,
+    InvalidFilter,
+    )
+from lp.code.interfaces.codehosting import LAUNCHPAD_SERVICES
+from lp.code.model.branchmergequeue import BranchMergeQueue
+
+
+class GenericBranchMergeQueueCollection:
+    """See `IBranchMergeQueueCollection`."""
+
+    implements(IBranchMergeQueueCollection)
+
+    def __init__(self, store=None, merge_queue_filter_expressions=None,
+                 tables=None, exclude_from_search=None):
+        """Construct a `GenericBranchMergeQueueCollection`.
+
+        :param store: The store to look in for merge queues. If not specified,
+            use the default store.
+        :param merge_queue_filter_expressions: A list of Storm expressions to
+            restrict the queues in the collection. If unspecified, then
+            there will be no restrictions on the result set. That is, all
+            queues in the store will be in the collection.
+        :param tables: A dict of Storm tables to the Join expression.  If an
+            expression in merge_queue_filter_expressions refers to a table,
+            then that table *must* be in this list.
+        """
+        self._store = store
+        if merge_queue_filter_expressions is None:
+            merge_queue_filter_expressions = []
+        self._merge_queue_filter_expressions = merge_queue_filter_expressions
+        if tables is None:
+            tables = {}
+        self._tables = tables
+        if exclude_from_search is None:
+            exclude_from_search = []
+        self._exclude_from_search = exclude_from_search
+
+    def count(self):
+        return self._getCount()
+
+    def _getCount(self):
+        """See `IBranchMergeQueueCollection`."""
+        return self._getMergeQueues().count()
+
+    @property
+    def store(self):
+        if self._store is None:
+            return IMasterStore(BranchMergeQueue)
+        else:
+            return self._store
+
+    def _filterBy(self, expressions, table=None, join=None,
+                  exclude_from_search=None):
+        """Return a subset of this collection, filtered by 'expressions'."""
+        tables = self._tables.copy()
+        if table is not None:
+            if join is None:
+                raise InvalidFilter("Cannot specify a table without a join.")
+            tables[table] = join
+        if exclude_from_search is None:
+            exclude_from_search = []
+        if expressions is None:
+            expressions = []
+        return self.__class__(
+            self.store,
+            self._merge_queue_filter_expressions + expressions,
+            tables,
+            self._exclude_from_search + exclude_from_search)
+
+    def _getMergeQueueExpressions(self):
+        """Return the where expressions for this collection."""
+        return self._merge_queue_filter_expressions
+
+    def getMergeQueues(self):
+        return list(self._getMergeQueues())
+
+    def _getMergeQueues(self):
+        """See `IBranchMergeQueueCollection`."""
+        tables = [BranchMergeQueue] + self._tables.values()
+        expressions = self._getMergeQueueExpressions()
+        return self.store.using(*tables).find(BranchMergeQueue, *expressions)
+
+    def ownedBy(self, person):
+        """See `IBranchMergeQueueCollection`."""
+        return self._filterBy([BranchMergeQueue.owner == person])
+
+    def visibleByUser(self, person):
+        """See `IBranchMergeQueueCollection`."""
+        if (person == LAUNCHPAD_SERVICES or
+            user_has_special_merge_queue_access(person)):
+            return self
+        return VisibleBranchMergeQueueCollection(
+            person,
+            self._store, None,
+            self._tables, self._exclude_from_search)
+
+
+class VisibleBranchMergeQueueCollection(GenericBranchMergeQueueCollection):
+    """A mergequeue collection which provides queues visible by a user."""
+
+    def __init__(self, person, store=None,
+                 merge_queue_filter_expressions=None, tables=None,
+                 exclude_from_search=None):
+        super(VisibleBranchMergeQueueCollection, self).__init__(
+            store=store,
+            merge_queue_filter_expressions=merge_queue_filter_expressions,
+            tables=tables,
+            exclude_from_search=exclude_from_search,
+        )
+        self._user = person
+
+    def _filterBy(self, expressions, table=None, join=None,
+                  exclude_from_search=None):
+        """Return a subset of this collection, filtered by 'expressions'."""
+        tables = self._tables.copy()
+        if table is not None:
+            if join is None:
+                raise InvalidFilter("Cannot specify a table without a join.")
+            tables[table] = join
+        if exclude_from_search is None:
+            exclude_from_search = []
+        if expressions is None:
+            expressions = []
+        return self.__class__(
+            self._user,
+            self.store,
+            self._merge_queue_filter_expressions + expressions,
+            tables,
+            self._exclude_from_search + exclude_from_search)
+
+    def visibleByUser(self, person):
+        """See `IBranchMergeQueueCollection`."""
+        if person == self._user:
+            return self
+        raise InvalidFilter(
+            "Cannot filter for merge queues visible by user %r, already "
+            "filtering for %r" % (person, self._user))
+
+    def _getCount(self):
+        """See `IBranchMergeQueueCollection`."""
+        return len(self._getMergeQueues())
+
+    def _getMergeQueues(self):
+        """Return the queues visible by self._user.
+
+        A queue is visible to a user if that user can see all the branches
+        associated with the queue.
+        """
+
+        def allBranchesVisible(user, branches):
+            return len([branch for branch in branches
+                        if branch.visibleByUser(user)]) == branches.count()
+
+        queues = super(
+            VisibleBranchMergeQueueCollection, self)._getMergeQueues()
+        return [queue for queue in queues
+                if allBranchesVisible(self._user, queue.branches)]

=== modified file 'lib/lp/code/model/recipebuilder.py'
--- lib/lp/code/model/recipebuilder.py	2010-10-27 14:25:19 +0000
+++ lib/lp/code/model/recipebuilder.py	2010-11-19 10:27:10 +0000
@@ -122,6 +122,8 @@
         if chroot is None:
             raise CannotBuild("Unable to find a chroot for %s" %
                               distroarchseries.displayname)
+        logger.info(
+            "Sending chroot file for recipe build to %s" % self._builder.name)
         d = self._builder.slave.cacheFile(logger, chroot)
 
         def got_cache_file(ignored):
@@ -131,7 +133,7 @@
             buildid = "%s-%s" % (self.build.id, build_queue_id)
             cookie = self.buildfarmjob.generateSlaveBuildCookie()
             chroot_sha1 = chroot.content.sha1
-            logger.debug(
+            logger.info(
                 "Initiating build %s on %s" % (buildid, self._builder.url))
 
             return self._builder.slave.build(

=== added file 'lib/lp/code/model/tests/test_branchmergequeuecollection.py'
--- lib/lp/code/model/tests/test_branchmergequeuecollection.py	1970-01-01 00:00:00 +0000
+++ lib/lp/code/model/tests/test_branchmergequeuecollection.py	2010-11-19 10:27:10 +0000
@@ -0,0 +1,201 @@
+# Copyright 2009 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+"""Tests for branch merge queue collections."""
+
+__metaclass__ = type
+
+from zope.component import getUtility
+from zope.security.proxy import removeSecurityProxy
+
+from canonical.launchpad.interfaces.launchpad import ILaunchpadCelebrities
+from canonical.launchpad.interfaces.lpstorm import IMasterStore
+from canonical.testing.layers import DatabaseFunctionalLayer
+from lp.code.interfaces.branchmergequeuecollection import (
+    IAllBranchMergeQueues,
+    IBranchMergeQueueCollection,
+    )
+from lp.code.interfaces.codehosting import LAUNCHPAD_SERVICES
+from lp.code.model.branchmergequeue import BranchMergeQueue
+from lp.code.model.branchmergequeuecollection import (
+    GenericBranchMergeQueueCollection,
+    )
+from lp.testing import TestCaseWithFactory
+
+
+class TestGenericBranchMergeQueueCollection(TestCaseWithFactory):
+
+    layer = DatabaseFunctionalLayer
+
+    def setUp(self):
+        TestCaseWithFactory.setUp(self)
+        self.store = IMasterStore(BranchMergeQueue)
+
+    def test_provides_branchmergequeuecollection(self):
+        # `GenericBranchMergeQueueCollection`
+        # provides the `IBranchMergeQueueCollection` interface.
+        self.assertProvides(
+            GenericBranchMergeQueueCollection(self.store),
+            IBranchMergeQueueCollection)
+
+    def test_getMergeQueues_no_filter_no_queues(self):
+        # If no filter is specified, then the collection is of all branches
+        # merge queues. By default, there are no branch merge queues.
+        collection = GenericBranchMergeQueueCollection(self.store)
+        self.assertEqual([], list(collection.getMergeQueues()))
+
+    def test_getMergeQueues_no_filter(self):
+        # If no filter is specified, then the collection is of all branch
+        # merge queues.
+        collection = GenericBranchMergeQueueCollection(self.store)
+        queue = self.factory.makeBranchMergeQueue()
+        self.assertEqual([queue], list(collection.getMergeQueues()))
+
+    def test_count(self):
+        # The 'count' property of a collection is the number of elements in
+        # the collection.
+        collection = GenericBranchMergeQueueCollection(self.store)
+        self.assertEqual(0, collection.count())
+        for i in range(3):
+            self.factory.makeBranchMergeQueue()
+        self.assertEqual(3, collection.count())
+
+    def test_count_respects_filter(self):
+        # If a collection is a subset of all possible queues, then the count
+        # will be the size of that subset. That is, 'count' respects any
+        # filters that are applied.
+        person = self.factory.makePerson()
+        queue = self.factory.makeBranchMergeQueue(owner=person)
+        queue2 = self.factory.makeAnyBranch()
+        collection = GenericBranchMergeQueueCollection(
+            self.store, [BranchMergeQueue.owner == person])
+        self.assertEqual(1, collection.count())
+
+
+class TestBranchMergeQueueCollectionFilters(TestCaseWithFactory):
+
+    layer = DatabaseFunctionalLayer
+
+    def setUp(self):
+        TestCaseWithFactory.setUp(self)
+        self.all_queues = getUtility(IAllBranchMergeQueues)
+
+    def test_count_respects_visibleByUser_filter(self):
+        # IBranchMergeQueueCollection.count() returns the number of queues
+        # that getMergeQueues() yields, even when the visibleByUser filter is
+        # applied.
+        branch = self.factory.makeAnyBranch(private=True)
+        naked_branch = removeSecurityProxy(branch)
+        queue = self.factory.makeBranchMergeQueue(branches=[naked_branch])
+        branch2 = self.factory.makeAnyBranch(private=True)
+        naked_branch2 = removeSecurityProxy(branch2)
+        queue2 = self.factory.makeBranchMergeQueue(branches=[naked_branch2])
+        collection = self.all_queues.visibleByUser(naked_branch.owner)
+        self.assertEqual(1, len(collection.getMergeQueues()))
+        self.assertEqual(1, collection.count())
+
+    def test_ownedBy(self):
+        # 'ownedBy' returns a new collection restricted to queues owned by
+        # the given person.
+        queue = self.factory.makeBranchMergeQueue()
+        queue2 = self.factory.makeBranchMergeQueue()
+        collection = self.all_queues.ownedBy(queue.owner)
+        self.assertEqual([queue], collection.getMergeQueues())
+
+
+class TestGenericBranchMergeQueueCollectionVisibleFilter(TestCaseWithFactory):
+
+    layer = DatabaseFunctionalLayer
+
+    def setUp(self):
+        TestCaseWithFactory.setUp(self)
+        public_branch = self.factory.makeAnyBranch(name='public')
+        self.queue_with_public_branch = self.factory.makeBranchMergeQueue(
+            branches=[removeSecurityProxy(public_branch)])
+        private_branch1 = self.factory.makeAnyBranch(
+            private=True, name='private1')
+        naked_private_branch1 = removeSecurityProxy(private_branch1)
+        self.private_branch1_owner = naked_private_branch1.owner
+        self.queue1_with_private_branch = self.factory.makeBranchMergeQueue(
+            branches=[naked_private_branch1])
+        private_branch2 = self.factory.makeAnyBranch(
+            private=True, name='private2')
+        self.queue2_with_private_branch = self.factory.makeBranchMergeQueue(
+            branches=[removeSecurityProxy(private_branch2)])
+        self.all_queues = getUtility(IAllBranchMergeQueues)
+
+    def test_all_queues(self):
+        # Without the visibleByUser filter, all queues are in the
+        # collection.
+        self.assertEqual(
+            sorted([self.queue_with_public_branch,
+                    self.queue1_with_private_branch,
+                    self.queue2_with_private_branch]),
+            sorted(self.all_queues.getMergeQueues()))
+
+    def test_anonymous_sees_only_public(self):
+        # Anonymous users can see only queues with public branches.
+        queues = self.all_queues.visibleByUser(None)
+        self.assertEqual([self.queue_with_public_branch],
+                         list(queues.getMergeQueues()))
+
+    def test_random_person_sees_only_public(self):
+        # Logged in users with no special permissions can see only queues with
+        # public branches.
+        person = self.factory.makePerson()
+        queues = self.all_queues.visibleByUser(person)
+        self.assertEqual([self.queue_with_public_branch],
+                         list(queues.getMergeQueues()))
+
+    def test_owner_sees_own_branches(self):
+        # Users can always see the queues with branches that they own, as well
+        # as queues with public branches.
+        queues = self.all_queues.visibleByUser(self.private_branch1_owner)
+        self.assertEqual(
+            sorted([self.queue_with_public_branch,
+                    self.queue1_with_private_branch]),
+            sorted(queues.getMergeQueues()))
+
+    def test_owner_member_sees_own_queues(self):
+        # Members of teams that own queues can see queues owned by those
+        # teams, as well as public branches.
+        team_owner = self.factory.makePerson()
+        team = self.factory.makeTeam(team_owner)
+        private_branch = self.factory.makeAnyBranch(
+            owner=team, private=True, name='team')
+        queue_with_private_branch = self.factory.makeBranchMergeQueue(
+            branches=[removeSecurityProxy(private_branch)])
+        queues = self.all_queues.visibleByUser(team_owner)
+        self.assertEqual(
+            sorted([self.queue_with_public_branch,
+                    queue_with_private_branch]),
+            sorted(queues.getMergeQueues()))
+
+    def test_launchpad_services_sees_all(self):
+        # The LAUNCHPAD_SERVICES special user sees *everything*.
+        queues = self.all_queues.visibleByUser(LAUNCHPAD_SERVICES)
+        self.assertEqual(
+            sorted(self.all_queues.getMergeQueues()),
+            sorted(queues.getMergeQueues()))
+
+    def test_admins_see_all(self):
+        # Launchpad administrators see *everything*.
+        admin = self.factory.makePerson()
+        admin_team = removeSecurityProxy(
+            getUtility(ILaunchpadCelebrities).admin)
+        admin_team.addMember(admin, admin_team.teamowner)
+        queues = self.all_queues.visibleByUser(admin)
+        self.assertEqual(
+            sorted(self.all_queues.getMergeQueues()),
+            sorted(queues.getMergeQueues()))
+
+    def test_bazaar_experts_see_all(self):
+        # Members of the bazaar_experts team see *everything*.
+        bzr_experts = removeSecurityProxy(
+            getUtility(ILaunchpadCelebrities).bazaar_experts)
+        expert = self.factory.makePerson()
+        bzr_experts.addMember(expert, bzr_experts.teamowner)
+        queues = self.all_queues.visibleByUser(expert)
+        self.assertEqual(
+            sorted(self.all_queues.getMergeQueues()),
+            sorted(queues.getMergeQueues()))

=== added file 'lib/lp/code/templates/branchmergequeue-listing.pt'
--- lib/lp/code/templates/branchmergequeue-listing.pt	1970-01-01 00:00:00 +0000
+++ lib/lp/code/templates/branchmergequeue-listing.pt	2010-11-19 10:27:10 +0000
@@ -0,0 +1,68 @@
+<html
+  xmlns="http://www.w3.org/1999/xhtml";
+  xmlns:tal="http://xml.zope.org/namespaces/tal";
+  xmlns:metal="http://xml.zope.org/namespaces/metal";
+  xmlns:i18n="http://xml.zope.org/namespaces/i18n";
+  metal:use-macro="view/macro:page/main_only"
+  i18n:domain="launchpad">
+
+<body>
+
+  <div metal:fill-slot="main">
+
+    <div tal:condition="not: features/code.branchmergequeue">
+        <em>
+         No merge queues
+        </em>
+    </div>
+
+    <div tal:condition="features/code.branchmergequeue">
+
+        <tal:has-queues condition="view/mergequeue_count">
+
+            <table id="mergequeuetable" class="listing sortable">
+              <thead>
+                <tr>
+                  <th colspan="2">Name</th>
+                  <th tal:condition="view/owner_enabled">Owner</th>
+                  <th>Queue Size</th>
+                  <th>Associated Branches</th>
+                </tr>
+              </thead>
+              <tbody>
+                <tal:mergequeues repeat="mergeQueue view/mergequeues">
+                  <tr>
+                    <td colspan="2">
+                      <a tal:attributes="href mergeQueue/fmt:url"
+                         tal:content="mergeQueue/name">Merge queue name</a>
+                    </td>
+                    <td tal:condition="view/owner_enabled">
+                      <a tal:replace="structure mergeQueue/owner/fmt:link">
+                        Owner
+                      </a>
+                    </td>
+                    <td>4</td>
+                    <td>
+                        <metal:display-branches
+                            use-macro="context/@@+bmq-macros/merge_queue_branches"/>
+                    </td>
+                  </tr>
+                </tal:mergequeues>
+              </tbody>
+            </table>
+
+        </tal:has-queues>
+
+        <em id="no-queues"
+         tal:condition="not: view/mergequeue_count"
+         tal:content="view/no_merge_queue_message">
+         No merge queues
+        </em>
+
+    </div>
+
+  </div>
+
+</body>
+</html>
+

=== added file 'lib/lp/code/templates/branchmergequeue-macros.pt'
--- lib/lp/code/templates/branchmergequeue-macros.pt	1970-01-01 00:00:00 +0000
+++ lib/lp/code/templates/branchmergequeue-macros.pt	2010-11-19 10:27:10 +0000
@@ -0,0 +1,20 @@
+ <tal:root
+   xmlns:tal="http://xml.zope.org/namespaces/tal";
+   xmlns:metal="http://xml.zope.org/namespaces/metal";
+   omit-tag="">
+
+<metal:merge_queue_branches define-macro="merge_queue_branches">
+    <table class="listing">
+          <tbody>
+            <tal:mergequeue-branches repeat="branch mergeQueue/branches">
+              <tr>
+                <td>
+                  <a tal:attributes="href branch/fmt:url"
+                     tal:content="branch/name">Branch name</a>
+                </td>
+              </tr>
+            </tal:mergequeue-branches>
+          </tbody>
+    </table>
+</metal:merge_queue_branches>
+</tal:root>
\ No newline at end of file

=== modified file 'lib/lp/code/templates/person-codesummary.pt'
--- lib/lp/code/templates/person-codesummary.pt	2010-11-08 09:03:59 +0000
+++ lib/lp/code/templates/person-codesummary.pt	2010-11-19 10:27:10 +0000
@@ -4,7 +4,8 @@
   xmlns:i18n="http://xml.zope.org/namespaces/i18n";
   id="portlet-person-codesummary"
   class="portlet"
-  tal:define="menu context/menu:branches"
+  tal:define="menu context/menu:branches;
+      features request/features"
   tal:condition="menu/show_summary">
 
   <table>
@@ -26,5 +27,11 @@
       <td class="code-count" tal:content="menu/active_review_count">5</td>
       <td tal:content="structure menu/active_reviews/render" />
     </tr>
+    <tr tal:condition="features/code.branchmergequeue" id="mergequeue-counts">
+      <td class="code-count" tal:content="menu/mergequeue_count">5</td>
+      <td tal:condition="menu"
+          tal:content="structure menu/mergequeues/render"
+          />
+    </tr>
   </table>
 </div>

=== modified file 'lib/lp/registry/javascript/tests/test_milestone_table.html'
--- lib/lp/registry/javascript/tests/test_milestone_table.html	2010-04-28 18:43:25 +0000
+++ lib/lp/registry/javascript/tests/test_milestone_table.html	2010-11-19 10:27:10 +0000
@@ -9,7 +9,7 @@
   <link rel="stylesheet" href="../../../../canonical/launchpad/icing/yui/cssreset/reset.css"/>
   <link rel="stylesheet" href="../../../../canonical/launchpad/icing/yui/cssfonts/fonts.css"/>
   <link rel="stylesheet" href="../../../../canonical/launchpad/icing/yui/cssbase/base.css"/>
-  <link rel="stylesheet" href="../../../canonical/launchpad/javascript/test.css" />
+  <link rel="stylesheet" href="../../../../canonical/launchpad/javascript/test.css" />
 
   <!-- The module under test -->
   <script type="text/javascript" src="../milestonetable.js"></script>

=== modified file 'lib/lp/services/apachelogparser/base.py'
--- lib/lp/services/apachelogparser/base.py	2010-09-11 19:25:13 +0000
+++ lib/lp/services/apachelogparser/base.py	2010-11-19 10:27:10 +0000
@@ -204,15 +204,21 @@
 
 def get_method_and_path(request):
     """Extract the method of the request and path of the requested file."""
-    L = request.split()
-    # HTTP 1.0 requests might omit the HTTP version so we must cope with them.
-    if len(L) == 2:
-        method, path = L
+    method, ignore, rest = request.partition(' ')
+    # In the below, the common case is that `first` is the path and `last` is
+    # the protocol.
+    first, ignore, last = rest.rpartition(' ')
+    if first == '':
+        # HTTP 1.0 requests might omit the HTTP version so we cope with them.
+        path = last
+    elif not last.startswith('HTTP'):
+        # We cope with HTTP 1.0 protocol without HTTP version *and* a
+        # space in the path (see bug 676489 for example).
+        path = rest
     else:
-        method, path, protocol = L
-
+        # This is the common case.
+        path = first
     if path.startswith('http://') or path.startswith('https://'):
         uri = URI(path)
         path = uri.path
-
     return method, path

=== modified file 'lib/lp/services/apachelogparser/tests/test_apachelogparser.py'
--- lib/lp/services/apachelogparser/tests/test_apachelogparser.py	2010-10-04 19:50:45 +0000
+++ lib/lp/services/apachelogparser/tests/test_apachelogparser.py	2010-11-19 10:27:10 +0000
@@ -29,6 +29,7 @@
     get_fd_and_file_size,
     get_files_to_parse,
     get_host_date_status_and_request,
+    get_method_and_path,
     parse_file,
     )
 from lp.services.apachelogparser.model.parsedapachelog import ParsedApacheLog
@@ -71,6 +72,35 @@
         date = '[13/Jun/2008:18:38:57 +0100]'
         self.assertEqual(get_day(date), datetime(2008, 6, 13))
 
+    def test_parsing_path_with_missing_protocol(self):
+        request = (r'GET /56222647/deluge-gtk_1.3.0-0ubuntu1_all.deb?'
+                   r'N\x1f\x9b')
+        method, path = get_method_and_path(request)
+        self.assertEqual(method, 'GET')
+        self.assertEqual(
+            path,
+            r'/56222647/deluge-gtk_1.3.0-0ubuntu1_all.deb?N\x1f\x9b')
+
+    def test_parsing_path_with_space(self):
+        # See bug 676489.
+        request = (r'GET /56222647/deluge-gtk_1.3.0-0ubuntu1_all.deb?'
+                   r'N\x1f\x9b Z%7B... HTTP/1.0')
+        method, path = get_method_and_path(request)
+        self.assertEqual(method, 'GET')
+        self.assertEqual(
+            path,
+            r'/56222647/deluge-gtk_1.3.0-0ubuntu1_all.deb?N\x1f\x9b Z%7B...')
+
+    def test_parsing_path_with_space_and_missing_protocol(self):
+        # This is a variation of bug 676489.
+        request = (r'GET /56222647/deluge-gtk_1.3.0-0ubuntu1_all.deb?'
+                   r'N\x1f\x9b Z%7B...')
+        method, path = get_method_and_path(request)
+        self.assertEqual(method, 'GET')
+        self.assertEqual(
+            path,
+            r'/56222647/deluge-gtk_1.3.0-0ubuntu1_all.deb?N\x1f\x9b Z%7B...')
+
 
 class Test_get_fd_and_file_size(TestCase):
 

=== modified file 'lib/lp/services/mailman/doc/postings.txt'
--- lib/lp/services/mailman/doc/postings.txt	2010-10-25 12:11:43 +0000
+++ lib/lp/services/mailman/doc/postings.txt	2010-11-19 10:27:10 +0000
@@ -177,25 +177,6 @@
     From: itest-one-...@xxxxxxxxxxxxxxxxxxx
     To: anne.person@xxxxxxxxxxx
     ...
-    Sender: itest-one-bounces+anne.person=example.com@xxxxxxxxxxxxxxxxxxx
-    Errors-To: itest-one-bounces+anne.person=example.com@xxxxxxxxxxxxxxxxxxx
-    ...
-    X-MailFrom: itest-one-bounces+anne.person=example.com@xxxxxxxxxxxxxxxxxxx
-    X-RcptTo: anne.person@xxxxxxxxxxx
-    <BLANKLINE>
-    Your request to the Itest-one mailing list
-    <BLANKLINE>
-        Posting of your message titled "An unsubscribed post"
-    <BLANKLINE>
-    has been rejected by the list moderator.  The moderator gave the
-    following reason for rejecting your request:
-    <BLANKLINE>
-    "[No reason given]"
-    <BLANKLINE>
-    Any questions or comments should be directed to the list administrator
-    at:
-    <BLANKLINE>
-        itest-one-owner@xxxxxxxxxxxxxxxxxxx
 
 Anne posts another message to the mailing list, but she is still not
 subscribed to it.  The team administrator deems this message to be spam and

=== modified file 'lib/lp/testing/factory.py'
--- lib/lp/testing/factory.py	2010-11-09 09:46:20 +0000
+++ lib/lp/testing/factory.py	2010-11-19 10:27:10 +0000
@@ -1119,7 +1119,8 @@
         return namespace.createBranch(branch_type, name, creator)
 
     def makeBranchMergeQueue(self, registrant=None, owner=None, name=None,
-                             description=None, configuration=None):
+                             description=None, configuration=None,
+                             branches=None):
         """Create a BranchMergeQueue."""
         if name is None:
             name = unicode(self.getUniqueString('queue'))
@@ -1134,7 +1135,7 @@
                 self.getUniqueString('key'): self.getUniqueString('value')}))
 
         queue = getUtility(IBranchMergeQueueSource).new(
-            name, owner, registrant, description, configuration)
+            name, owner, registrant, description, configuration, branches)
         return queue
 
     def enableDefaultStackingForProduct(self, product, branch=None):

=== modified file 'lib/lp/translations/scripts/tests/test_message_sharing_migration.py'
--- lib/lp/translations/scripts/tests/test_message_sharing_migration.py	2010-10-18 16:36:46 +0000
+++ lib/lp/translations/scripts/tests/test_message_sharing_migration.py	2010-11-19 10:27:10 +0000
@@ -18,6 +18,7 @@
     record_statements,
     TestCaseWithFactory,
     )
+from lp.testing.sampledata import ADMIN_EMAIL
 from lp.translations.interfaces.pofiletranslator import IPOFileTranslatorSet
 from lp.translations.model.pomsgid import POMsgID
 from lp.translations.model.potemplate import POTemplate
@@ -62,8 +63,8 @@
         # This test needs the privileges of rosettaadmin (to delete
         # POTMsgSets) but it also needs to set up test conditions which
         # requires other privileges.
+        super(TestPOTMsgSetMerging, self).setUp(user=ADMIN_EMAIL)
         self.layer.switchDbUser('postgres')
-        super(TestPOTMsgSetMerging, self).setUp(user='mark@xxxxxxxxxxx')
         super(TestPOTMsgSetMerging, self).setUpProduct()
 
     def test_matchedPOTMsgSetsShare(self):
@@ -252,9 +253,9 @@
         The matching POTMsgSets will be merged by the _mergePOTMsgSets
         call.
         """
-        self.layer.switchDbUser('postgres')
         super(TestPOTMsgSetMergingAndTranslations, self).setUp(
-            user='mark@xxxxxxxxxxx')
+            user=ADMIN_EMAIL)
+        self.layer.switchDbUser('postgres')
         super(TestPOTMsgSetMergingAndTranslations, self).setUpProduct()
 
     def test_sharingDivergedMessages(self):
@@ -374,9 +375,8 @@
     layer = LaunchpadZopelessLayer
 
     def setUp(self):
+        super(TestTranslationMessageNonMerging, self).setUp(user=ADMIN_EMAIL)
         self.layer.switchDbUser('postgres')
-        super(TestTranslationMessageNonMerging, self).setUp(
-            user='mark@xxxxxxxxxxx')
         super(TestTranslationMessageNonMerging, self).setUpProduct()
 
     def test_MessagesAreNotSharedAcrossPOTMsgSets(self):
@@ -402,9 +402,9 @@
     layer = LaunchpadZopelessLayer
 
     def setUp(self):
+        super(TestTranslationMessageMerging, self).setUp(user=ADMIN_EMAIL)
+        transaction.commit()
         self.layer.switchDbUser('postgres')
-        super(TestTranslationMessageMerging, self).setUp(
-            user='mark@xxxxxxxxxxx')
         super(TestTranslationMessageMerging, self).setUpProduct()
 
     def test_messagesCanStayDiverged(self):
@@ -565,8 +565,8 @@
     layer = LaunchpadZopelessLayer
 
     def setUp(self):
+        super(TestRemoveDuplicates, self).setUp(user=ADMIN_EMAIL)
         self.layer.switchDbUser('postgres')
-        super(TestRemoveDuplicates, self).setUp(user='mark@xxxxxxxxxxx')
         super(TestRemoveDuplicates, self).setUpProduct()
 
     def test_duplicatesAreCleanedUp(self):
@@ -738,8 +738,8 @@
     layer = LaunchpadZopelessLayer
 
     def setUp(self):
-        self.layer.switchDbUser('postgres')
         super(TestSharingMigrationPerformance, self).setUp()
+        self.layer.switchDbUser('postgres')
         super(TestSharingMigrationPerformance, self).setUpProduct()
 
     def _flushDbObjects(self):

=== modified file 'lib/lp/translations/windmill/tests/test_languages.py'
--- lib/lp/translations/windmill/tests/test_languages.py	2010-10-18 12:56:47 +0000
+++ lib/lp/translations/windmill/tests/test_languages.py	2010-11-19 10:27:10 +0000
@@ -7,6 +7,7 @@
 __all__ = []
 
 from canonical.launchpad.windmill.testing.constants import (
+    FOR_ELEMENT,
     PAGE_LOAD,
     SLEEP,
     )
@@ -61,7 +62,8 @@
         # "Not-matching" message is hidden and languages are visible.
         self.client.asserts.assertProperty(
             id=u'no_filter_matches',
-            validator='className|unseen')
+            validator='className|unseen',
+            timeout=FOR_ELEMENT)
         self._assert_languages_visible({
             u'German': True,
             u'Mende': True,


Follow ups