← Back to team overview

launchpad-reviewers team mailing list archive

[Merge] lp:~stub/launchpad/postgresql-9.1 into lp:launchpad

 

Stuart Bishop has proposed merging lp:~stub/launchpad/postgresql-9.1 into lp:launchpad with lp:~stub/launchpad/postgresql-9.1-db as a prerequisite.

Requested reviews:
  Launchpad code reviewers (launchpad-reviewers)

For more details, see:
https://code.launchpad.net/~stub/launchpad/postgresql-9.1/+merge/91841

= Summary =

We want to migrate Launchpad to using PostgreSQL 9.1

== Proposed fix ==

Get the test suite passing with both PostgreSQL 8.4 and PostgreSQL 9.1.

== Implementation details ==

The DB patch in lp:~stub/launchpad/postgresql-9.1-db has already been applied to production and will be merged from lp:launchpad/db-devel to lp:launchpad/devel shortly.
-- 
https://code.launchpad.net/~stub/launchpad/postgresql-9.1/+merge/91841
Your team Launchpad code reviewers is requested to review the proposed merge of lp:~stub/launchpad/postgresql-9.1 into lp:launchpad.
=== modified file 'database/schema/Makefile'
--- database/schema/Makefile	2011-12-13 15:12:32 +0000
+++ database/schema/Makefile	2012-02-07 14:51:23 +0000
@@ -55,7 +55,7 @@
     pg_dump --schema=public --disable-triggers -a --column-inserts -O ${1} \
     | grep -v "\( TOC \|INSERT INTO launchpaddatabase\|sessiondata\|sessionpkgdata\|SELECT pg_catalog\.setval\|^--\| fticache \|'fticache'\|ALTER TABLE secret\|INSERT INTO secret\)" \
     | $(PYTHON) sort_sql.py >> $(2) && \
-    $(PYTHON) fti.py --force -d ${1} -q
+    $(PYTHON) fti.py --live-rebuild -d ${1} -q
 
 
 # The latest schema dump from production. Database patches are relative
@@ -77,7 +77,7 @@
 	@ echo "* Loading sample data"
 	@ psql -v ON_ERROR_STOP=1 -d ${TEMPLATE_WITH_TEST_SAMPLEDATA} -f $(SAMPLEDATA) > /dev/null
 	@ echo "* Rebuilding full text indexes"
-	@ ${PYTHON} fti.py --force -q -d ${TEMPLATE_WITH_TEST_SAMPLEDATA}
+	@ ${PYTHON} fti.py --live-rebuild -q -d ${TEMPLATE_WITH_TEST_SAMPLEDATA}
 	@ echo "* Resetting sequences"
 	@ ${PYTHON} reset_sequences.py -d ${TEMPLATE_WITH_TEST_SAMPLEDATA}
 	@ echo "* Disabling autovacuum"
@@ -94,7 +94,7 @@
 	@ echo "* Loading sample data"
 	@ psql -v ON_ERROR_STOP=1 -d ${TEMPLATE_WITH_DEV_SAMPLEDATA} -f $(SAMPLEDATA_DEV) > /dev/null
 	@ echo "* Rebuilding full text indexes"
-	@ ${PYTHON} fti.py --force -q -d ${TEMPLATE_WITH_DEV_SAMPLEDATA}
+	@ ${PYTHON} fti.py --live-rebuild -q -d ${TEMPLATE_WITH_DEV_SAMPLEDATA}
 	@ echo "* Resetting sequences"
 	@ ${PYTHON} reset_sequences.py -d ${TEMPLATE_WITH_DEV_SAMPLEDATA}
 	@ echo "* Disabling autovacuum"
@@ -110,6 +110,9 @@
 # database schema, full text indexes and grants into it.
 # It will also create session DBs for the test and dev environments.
 # No sample data is added at this point.
+# NB. PG 9.1 has plpgsql installed in the template databases.
+# We can remove the stanzas that install plpgsql when we no longer support
+# PG 8.4.
 create:
 	@ echo "* If this fails you need to run as the postgresql superuser"
 	@ echo "* eg. sudo -u postgres make create"
@@ -127,15 +130,21 @@
 	@ echo "* Vacuuming"
 	@ vacuumdb -fz ${EMPTY_DBNAME}
 
-	@ echo "* Creating session databases '${SESSION_DBNAME}' (if necessary)"
+	@ echo "* Creating session database '${SESSION_DBNAME}' (if necessary)"
 	@if [ "$$((`psql -l | grep -w ${SESSION_DBNAME} | wc -l`))" = '0' ]; \
 	    then ${CREATEDB} template0 ${SESSION_DBNAME} ; \
-	    createlang plpgsql ${SESSION_DBNAME}; \
+	    if [ "$$((`createlang -l ${SESSION_DBNAME} \
+		| grep plpgsql | wc -l`))" = '0' ]; \
+		then createlang plpgsql ${SESSION_DBNAME}; \
+	    fi; \
 	    psql -q -d ${SESSION_DBNAME} -f launchpad_session.sql ; \
 	fi
 	@ echo "* Creating session database '${TEST_SESSION_DBNAME}'"
 	@ ${CREATEDB} template0 ${TEST_SESSION_DBNAME}
-	@ createlang plpgsql ${TEST_SESSION_DBNAME}
+	@if [ "$$((`createlang -l ${TEST_SESSION_DBNAME} \
+	    | grep plpgsql | wc -l`))" = '0' ]; \
+	    then createlang plpgsql ${TEST_SESSION_DBNAME}; \
+	fi
 	@ psql -q -d ${TEST_SESSION_DBNAME} -f launchpad_session.sql
 
 # Confirm that launchpad-XX-00-0.sql hasn't been messed with - this file

=== modified file 'database/schema/comments.sql'
--- database/schema/comments.sql	2012-01-25 06:02:40 +0000
+++ database/schema/comments.sql	2012-02-07 14:51:23 +0000
@@ -8,7 +8,7 @@
 
 -- AccessPolicy
 
-COMMENT ON TABLE AccessPolicy IS 'An access policy used to manage a project or distribution\'s artifacts.';
+COMMENT ON TABLE AccessPolicy IS 'An access policy used to manage a project or distribution''s artifacts.';
 COMMENT ON COLUMN AccessPolicy.product IS 'The product that this policy is used on.';
 COMMENT ON COLUMN AccessPolicy.distribution IS 'The distribution that this policy is used on.';
 COMMENT ON COLUMN AccessPolicy.type IS 'The type of policy (an enum value). Private, Security, etc.';
@@ -557,7 +557,7 @@
 COMMENT ON COLUMN DistributionSourcePackage.bug_count IS 'Number of bugs matching the package distribution and sourcepackagename. NULL means it has not yet been calculated.';
 COMMENT ON COLUMN DistributionSourcePackage.po_message_count IS 'Number of translations matching the package distribution and sourcepackagename. NULL means it has not yet been calculated.';
 COMMENT ON COLUMN DistributionSourcePackage.is_upstream_link_allowed IS 'Whether an upstream link may be added if it does not already exist.';
-COMMENT ON COLUMN DistributionSourcePackage.bug_reported_acknowledgement IS 'A message of acknowledgement to display to a bug reporter after they\'ve reported a new bug.';
+COMMENT ON COLUMN DistributionSourcePackage.bug_reported_acknowledgement IS 'A message of acknowledgement to display to a bug reporter after they''ve reported a new bug.';
 COMMENT ON COLUMN DistributionSourcePackage.enable_bugfiling_duplicate_search IS 'Enable/disable a search for posiible duplicates when a bug is filed.';
 
 -- DistributionSourcePackageCache
@@ -708,7 +708,7 @@
 
 
 -- Product
-COMMENT ON TABLE Product IS 'Product: a DOAP Product. This table stores core information about an open source product. In Launchpad, anything that can be shipped as a tarball would be a product, and in some cases there might be products for things that never actually ship, depending on the project. For example, most projects will have a \'website\' product, because that allows you to file a Malone bug against the project website. Note that these are not actual product releases, which are stored in the ProductRelease table.';
+COMMENT ON TABLE Product IS 'Product: a DOAP Product. This table stores core information about an open source product. In Launchpad, anything that can be shipped as a tarball would be a product, and in some cases there might be products for things that never actually ship, depending on the project. For example, most projects will have a ''website'' product, because that allows you to file a Malone bug against the project website. Note that these are not actual product releases, which are stored in the ProductRelease table.';
 COMMENT ON COLUMN Product.owner IS 'The Product owner would typically be the person who created this product in Launchpad. But we will encourage the upstream maintainer of a product to become the owner in Launchpad. The Product owner can edit any aspect of the Product, as well as appointing people to specific roles with regard to the Product. Also, the owner can add a new ProductRelease and also edit Rosetta POTemplates associated with this product.';
 COMMENT ON COLUMN Product.registrant IS 'The Product registrant is the Person who created the product in Launchpad.  It is set at creation and is never changed thereafter.';
 COMMENT ON COLUMN Product.summary IS 'A brief summary of the product. This will be displayed in bold at the top of the product page, above the description.';
@@ -748,7 +748,7 @@
 COMMENT ON COLUMN Product.remote_product IS 'The ID of this product on its remote bug tracker.';
 COMMENT ON COLUMN Product.max_bug_heat IS 'The highest heat value across bugs for this product.';
 COMMENT ON COLUMN Product.date_next_suggest_packaging IS 'The date when Launchpad can resume suggesting Ubuntu packages that the project provides.';
-COMMENT ON COLUMN Product.bug_reported_acknowledgement IS 'A message of acknowledgement to display to a bug reporter after they\'ve reported a new bug.';
+COMMENT ON COLUMN Product.bug_reported_acknowledgement IS 'A message of acknowledgement to display to a bug reporter after they''ve reported a new bug.';
 COMMENT ON COLUMN Product.enable_bugfiling_duplicate_search IS 'Enable/disable a search for posiible duplicates when a bug is filed.';
 
 -- ProductLicense
@@ -836,7 +836,7 @@
 COMMENT ON COLUMN Project.bug_reporting_guidelines IS 'Guidelines to the end user for reporting bugs on products in this project.';
 COMMENT ON COLUMN Project.reviewer_whiteboard IS 'A whiteboard for Launchpad admins, registry experts and the project owners to capture the state of current issues with the project.';
 COMMENT ON COLUMN Project.max_bug_heat IS 'The highest heat value across bugs for products in this project.';
-COMMENT ON COLUMN Project.bug_reported_acknowledgement IS 'A message of acknowledgement to display to a bug reporter after they\'ve reported a new bug.';
+COMMENT ON COLUMN Project.bug_reported_acknowledgement IS 'A message of acknowledgement to display to a bug reporter after they''ve reported a new bug.';
 
 -- POTMsgSet
 COMMENT ON TABLE POTMsgSet IS 'This table is stores a collection of msgids
@@ -1075,7 +1075,7 @@
 COMMENT ON COLUMN Distribution.mirror_admin IS 'Person or team with privileges to mark a mirror as official.';
 COMMENT ON COLUMN Distribution.driver IS 'The team or person responsible for approving goals for each release in the distribution. This should usually be a very small team because the Distribution driver can approve items for backporting to past releases as well as the current release under development. Each distroseries has its own driver too, so you can have the small superset in the Distribution driver, and then specific teams per distroseries for backporting, for example, or for the current release management team on the current development focus release.';
 COMMENT ON COLUMN Distribution.translationgroup IS 'The translation group that is responsible for all translation work in this distribution.';
-COMMENT ON COLUMN Distribution.translationpermission IS 'The level of openness of this distribution\'s translation process. The enum lists different approaches to translation, from the very open (anybody can edit any translation in any language) to the completely closed (only designated translators can make any changes at all).';
+COMMENT ON COLUMN Distribution.translationpermission IS 'The level of openness of this distribution''s translation process. The enum lists different approaches to translation, from the very open (anybody can edit any translation in any language) to the completely closed (only designated translators can make any changes at all).';
 COMMENT ON COLUMN Distribution.bug_supervisor IS 'Person who is responsible for managing bugs on this distribution.';
 COMMENT ON COLUMN Distribution.security_contact IS 'The person or team who handles security-related issues in the distribution.';
 COMMENT ON COLUMN Distribution.official_rosetta IS 'Whether or not this distribution uses Rosetta for its official translation team and coordination.';
@@ -1088,7 +1088,7 @@
 COMMENT ON COLUMN Distribution.bug_reporting_guidelines IS 'Guidelines to the end user for reporting bugs on this distribution.';
 COMMENT ON COLUMN Distribution.reviewer_whiteboard IS 'A whiteboard for Launchpad admins, registry experts and the project owners to capture the state of current issues with the project.';
 COMMENT ON COLUMN Distribution.max_bug_heat IS 'The highest heat value across bugs for this distribution.';
-COMMENT ON COLUMN Distribution.bug_reported_acknowledgement IS 'A message of acknowledgement to display to a bug reporter after they\'ve reported a new bug.';
+COMMENT ON COLUMN Distribution.bug_reported_acknowledgement IS 'A message of acknowledgement to display to a bug reporter after they''ve reported a new bug.';
 COMMENT ON COLUMN Distribution.registrant IS 'The person in launchpad who registered this distribution.';
 COMMENT ON COLUMN Distribution.package_derivatives_email IS 'The optional email address template to use when sending emails about package updates in a distributrion. The string {package_name} in the template will be replaced with the actual package name being updated.';
 
@@ -1278,7 +1278,7 @@
 COMMENT ON COLUMN Person.subscriptionpolicy IS 'The policy for new members to join this team.';
 COMMENT ON COLUMN Person.renewal_policy IS 'The policy for membership renewal on this team.';
 COMMENT ON COLUMN Person.personal_standing IS 'The standing of the person, which indicates (for now, just) whether the person can post to a mailing list without requiring first post moderation.  Values are documented in dbschema.PersonalStanding.';
-COMMENT ON COLUMN Person.personal_standing_reason IS 'The reason a person\'s standing has changed.';
+COMMENT ON COLUMN Person.personal_standing_reason IS 'The reason a person''s standing has changed.';
 COMMENT ON COLUMN Person.mail_resumption_date IS 'A NULL resumption date or a date in the past indicates that there is no vacation in effect.  Vacations are granular to the day, so a datetime is not necessary.';
 COMMENT ON COLUMN Person.mailing_list_auto_subscribe_policy IS 'The auto-subscription policy for the person, i.e. whether and how the user is automatically subscribed to mailing lists for teams they join.  Values are described in dbschema.MailingListAutoSubscribePolicy.';
 COMMENT ON COLUMN Person.mailing_list_receive_duplicates IS 'True means the user wants to receive list copies of messages on which they are explicitly named as a recipient.';
@@ -1303,7 +1303,7 @@
 COMMENT ON COLUMN PersonLocation.last_modified_by IS 'The person who last updated this record. We allow people to provide location and time zone information for other users, when those users have not specified their own location. This allows people to garden the location information for their teams, for example, like a wiki.';
 COMMENT ON COLUMN PersonLocation.date_last_modified IS 'The date this record was last modified.';
 COMMENT ON COLUMN PersonLocation.locked IS 'Whether or not this record can be modified by someone other than the person himself?';
-COMMENT ON COLUMN PersonLocation.visible IS 'Should this person\'s location and time zone be visible to others?';
+COMMENT ON COLUMN PersonLocation.visible IS 'Should this person''s location and time zone be visible to others?';
 
 
 -- PersonNotification

=== modified file 'database/schema/fti.py'
--- database/schema/fti.py	2012-01-27 03:04:31 +0000
+++ database/schema/fti.py	2012-02-07 14:51:23 +0000
@@ -1,6 +1,6 @@
 #!/usr/bin/python -S
 #
-# Copyright 2009-2011 Canonical Ltd.  This software is licensed under the
+# Copyright 2009-2012 Canonical Ltd.  This software is licensed under the
 # GNU Affero General Public License version 3 (see the file LICENSE).
 #
 # This modules uses relative imports.
@@ -16,16 +16,11 @@
 from distutils.version import LooseVersion
 from optparse import OptionParser
 import os.path
-import subprocess
 import sys
-from tempfile import NamedTemporaryFile
 from textwrap import dedent
-import time
 
 import psycopg2.extensions
 
-from lp.services.config import config
-from lp.services.database.postgresql import ConnectionString
 from lp.services.database.sqlbase import (
     connect,
     ISOLATION_LEVEL_AUTOCOMMIT,
@@ -40,9 +35,6 @@
     )
 import replication.helpers
 
-# Defines parser and locale to use.
-DEFAULT_CONFIG = 'default'
-
 PGSQL_BASE = '/usr/share/postgresql'
 
 # tsearch2 ranking constants:
@@ -174,77 +166,6 @@
         execute(con, sql)
 
 
-def fti(con, table, columns, configuration=DEFAULT_CONFIG):
-    """Setup full text indexing for a table"""
-
-    index = "%s_fti" % table
-    qindex = quote_identifier(index)
-    qtable = quote_identifier(table)
-    # Quote the columns
-    qcolumns = [
-        (quote_identifier(column), weight) for column, weight in columns
-        ]
-
-    # Drop the trigger if it exists
-    trigger_exists = bool(execute(con, """
-        SELECT COUNT(*) FROM pg_trigger, pg_class, pg_namespace
-        WHERE pg_trigger.tgname = 'tsvectorupdate'
-            AND pg_trigger.tgrelid = pg_class.oid
-            AND pg_class.relname = %(table)s
-            AND pg_class.relnamespace = pg_namespace.oid
-            AND pg_namespace.nspname = 'public'
-        """, results=True, args=vars())[0][0])
-    if trigger_exists:
-        log.debug('tsvectorupdate trigger exists in %s. Dropping.' % qtable)
-        sexecute(con, "DROP TRIGGER tsvectorupdate ON %s" % qtable)
-
-    # Drop the fti index if it exists
-    index_exists = bool(execute(con, """
-        SELECT COUNT(*) FROM pg_index, pg_class, pg_namespace
-        WHERE pg_index.indexrelid = pg_class.oid
-            AND pg_class.relnamespace = pg_namespace.oid
-            AND pg_class.relname = %(index)s
-            AND pg_namespace.nspname = 'public'
-        """, results=True, args=vars())[0][0])
-    if index_exists:
-        log.debug('%s exists. Dropping.' % qindex)
-        sexecute(con, "DROP INDEX %s" % qindex)
-
-    # Create the 'fti' column if it doesn't already exist
-    column_exists = bool(execute(con, """
-        SELECT COUNT(*) FROM pg_attribute, pg_class, pg_namespace
-        WHERE pg_attribute.attname='fti'
-            AND pg_attribute.attisdropped IS FALSE
-            AND pg_attribute.attrelid = pg_class.oid
-            AND pg_class.relname = %(table)s
-            AND pg_class.relnamespace = pg_namespace.oid
-            AND pg_namespace.nspname = 'public'
-        """, results=True, args=vars())[0][0])
-    if not column_exists:
-        log.debug('fti column does not exist in %s. Creating.' % qtable)
-        sexecute(con, "ALTER TABLE %s ADD COLUMN fti tsvector" % qtable)
-
-    # Create the trigger
-    columns_and_weights = []
-    for column, weight in qcolumns:
-        columns_and_weights.extend((column, weight))
-
-    sql = """
-        CREATE TRIGGER tsvectorupdate BEFORE UPDATE OR INSERT ON %s
-        FOR EACH ROW EXECUTE PROCEDURE ftiupdate(%s)
-        """ % (table, ','.join(columns_and_weights))
-    sexecute(con, sql)
-
-    # Rebuild the fti column, as the information it contains may be out
-    # of date with recent configuration updates.
-    sexecute(con, r"""UPDATE %s SET fti=NULL""" % qtable)
-
-    # Create the fti index
-    sexecute(con, "CREATE INDEX %s ON %s USING gist(fti)" % (
-        qindex, qtable
-        ))
-
-
 def nullify(con):
     """Set all fti index columns to NULL"""
     for table, ignored in ALL_FTI:
@@ -286,290 +207,6 @@
                 con.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
 
 
-def setup(con, configuration=DEFAULT_CONFIG):
-    """Setup and install tsearch2 if isn't already"""
-
-    # tsearch2 is out-of-the-box in 8.3+
-    required = LooseVersion('8.3.0')
-    assert get_pgversion(con) >= required, (
-        'This script only supports PostgreSQL 8.3+')
-
-    schema_exists = bool(execute(
-        con, "SELECT COUNT(*) FROM pg_namespace WHERE nspname='ts2'",
-        results=True)[0][0])
-    if not schema_exists:
-        execute(con, 'CREATE SCHEMA ts2')
-        con.commit()
-    execute(con, 'SET search_path = ts2, public;')
-
-    tsearch2_sql_path = get_tsearch2_sql_path(con)
-
-    ts2_installed = bool(execute(con, """
-        SELECT COUNT(*) FROM pg_type,pg_namespace
-        WHERE pg_type.typnamespace=pg_namespace.oid
-            AND pg_namespace.nspname  = 'ts2'
-        """, results=True)[0][0])
-    if not ts2_installed:
-        assert slonik_sql is None, """
-            tsearch2 needs to be setup on each node first with
-            fti.py --setup-only
-            """
-
-        log.debug('Installing tsearch2')
-        cmd = 'psql -f - %s' % ConnectionString(
-            config.database.rw_main_master).asPGCommandLineArgs()
-        p = subprocess.Popen(
-            cmd.split(' '), stdin=subprocess.PIPE,
-            stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
-        tsearch2_sql = open(tsearch2_sql_path).read()
-        out, err = p.communicate(
-            "SET client_min_messages=ERROR; CREATE SCHEMA ts2;" +
-            tsearch2_sql.replace('public;', 'ts2, public;'))
-        if p.returncode != 0:
-            log.fatal("Error executing %s:", cmd)
-            log.debug(out)
-            sys.exit(p.returncode)
-
-    # Create ftq helper and its sibling _ftq.
-    # ftq(text) returns a tsquery, suitable for use querying the full text
-    # indexes. _ftq(text) returns the string that would be parsed by
-    # to_tsquery and is used to debug the query we generate.
-    shared_func = r'''
-        import re
-
-        # I think this method would be more robust if we used a real
-        # tokenizer and parser to generate the query string, but we need
-        # something suitable for use as a stored procedure which currently
-        # means no external dependancies.
-
-        # Convert to Unicode
-        query = args[0].decode('utf8')
-        ## plpy.debug('1 query is %s' % repr(query))
-
-        # Normalize whitespace
-        query = re.sub("(?u)\s+"," ", query)
-
-        # Convert AND, OR, NOT and - to tsearch2 punctuation
-        query = re.sub(r"(?u)(?:^|\s)-([\w\(])", r" !\1", query)
-        query = re.sub(r"(?u)\bAND\b", "&", query)
-        query = re.sub(r"(?u)\bOR\b", "|", query)
-        query = re.sub(r"(?u)\bNOT\b", " !", query)
-        ## plpy.debug('2 query is %s' % repr(query))
-
-        # Deal with unwanted punctuation. We convert strings of punctuation
-        # inside words to a '-' character for the hypenation handling below
-        # to deal with further. Outside of words we replace with whitespace.
-        # We don't mess with -&|!()' as they are handled later.
-        #punctuation = re.escape(r'`~@#$%^*+=[]{}:;"<>,.?\/')
-        punctuation = r"[^\w\s\-\&\|\!\(\)']"
-        query = re.sub(r"(?u)(\w)%s+(\w)" % (punctuation,), r"\1-\2", query)
-        query = re.sub(r"(?u)%s+" % (punctuation,), " ", query)
-        ## plpy.debug('3 query is %s' % repr(query))
-
-        # Strip ! characters inside and at the end of a word
-        query = re.sub(r"(?u)(?<=\w)[\!]+", " ", query)
-
-        # Now that we have handled case-sensitive booleans, convert to
-        # lowercase.
-        query = query.lower()
-
-        # Convert foo-bar to ((foo&bar)|foobar) and foo-bar-baz to
-        # ((foo&bar&baz)|foobarbaz)
-        def hyphen_repl(match):
-            bits = match.group(0).split("-")
-            return "((%s)|%s)" % ("&".join(bits), "".join(bits))
-        query = re.sub(r"(?u)\b\w+-[\w\-]+\b", hyphen_repl, query)
-        ## plpy.debug('4 query is %s' % repr(query))
-
-        # Any remaining - characters are spurious
-        query = query.replace('-','')
-
-        # Remove unpartnered bracket on the left and right
-        query = re.sub(r"(?ux) ^ ( [^(]* ) \)", r"(\1)", query)
-        query = re.sub(r"(?ux) \( ( [^)]* ) $", r"(\1)", query)
-
-        # Remove spurious brackets
-        query = re.sub(r"(?u)\(([^\&\|]*?)\)", r" \1 ", query)
-        ## plpy.debug('5 query is %s' % repr(query))
-
-        # Insert & between tokens without an existing boolean operator
-        # ( not proceeded by (|&!
-        query = re.sub(r"(?u)(?<![\(\|\&\!])\s*\(", "&(", query)
-        ## plpy.debug('6 query is %s' % repr(query))
-        # ) not followed by )|&
-        query = re.sub(r"(?u)\)(?!\s*(\)|\||\&|\s*$))", ")&", query)
-        ## plpy.debug('6.1 query is %s' % repr(query))
-        # Whitespace not proceded by (|&! not followed by &|
-        query = re.sub(r"(?u)(?<![\(\|\&\!\s])\s+(?![\&\|\s])", "&", query)
-        ## plpy.debug('7 query is %s' % repr(query))
-
-        # Detect and repair syntax errors - we are lenient because
-        # this input is generally from users.
-
-        # Fix unbalanced brackets
-        openings = query.count("(")
-        closings = query.count(")")
-        if openings > closings:
-            query = query + " ) "*(openings-closings)
-        elif closings > openings:
-            query = " ( "*(closings-openings) + query
-        ## plpy.debug('8 query is %s' % repr(query))
-
-        # Strip ' character that do not have letters on both sides
-        query = re.sub(r"(?u)((?<!\w)'|'(?!\w))", "", query)
-
-        # Brackets containing nothing but whitespace and booleans, recursive
-        last = ""
-        while last != query:
-            last = query
-            query = re.sub(r"(?u)\([\s\&\|\!]*\)", "", query)
-        ## plpy.debug('9 query is %s' % repr(query))
-
-        # An & or | following a (
-        query = re.sub(r"(?u)(?<=\()[\&\|\s]+", "", query)
-        ## plpy.debug('10 query is %s' % repr(query))
-
-        # An &, | or ! immediatly before a )
-        query = re.sub(r"(?u)[\&\|\!\s]*[\&\|\!]+\s*(?=\))", "", query)
-        ## plpy.debug('11 query is %s' % repr(query))
-
-        # An &,| or ! followed by another boolean.
-        query = re.sub(r"(?ux) \s* ( [\&\|\!] ) [\s\&\|]+", r"\1", query)
-        ## plpy.debug('12 query is %s' % repr(query))
-
-        # Leading & or |
-        query = re.sub(r"(?u)^[\s\&\|]+", "", query)
-        ## plpy.debug('13 query is %s' % repr(query))
-
-        # Trailing &, | or !
-        query = re.sub(r"(?u)[\&\|\!\s]+$", "", query)
-        ## plpy.debug('14 query is %s' % repr(query))
-
-        # If we have nothing but whitespace and tsearch2 operators,
-        # return NULL.
-        if re.search(r"(?u)^[\&\|\!\s\(\)]*$", query) is not None:
-            return None
-
-        # Convert back to UTF-8
-        query = query.encode('utf8')
-        ## plpy.debug('15 query is %s' % repr(query))
-        '''
-    text_func = shared_func + """
-        return query or None
-        """
-    tsquery_func = shared_func + """
-        p = plpy.prepare("SELECT to_tsquery('%s', $1) AS x", ["text"])
-        query = plpy.execute(p, [query], 1)[0]["x"]
-        return query or None
-        """ % configuration
-    sexecute(con, r"""
-        CREATE OR REPLACE FUNCTION ts2._ftq(text) RETURNS text AS %s
-        LANGUAGE plpythonu IMMUTABLE
-        RETURNS NULL ON NULL INPUT
-        """ % quote(text_func))
-    #print psycopg2.extensions.QuotedString(text_func)
-    sexecute(con, r"""
-        CREATE OR REPLACE FUNCTION ts2.ftq(text) RETURNS tsquery AS %s
-        LANGUAGE plpythonu IMMUTABLE
-        RETURNS NULL ON NULL INPUT
-        """ % quote(tsquery_func))
-
-    sexecute(con,
-            r"COMMENT ON FUNCTION ftq(text) IS '"
-            r"Convert a string to a tsearch2 query using the preferred "
-            r"configuration. eg. "
-            r""""SELECT * FROM Bug WHERE fti @@ ftq(''fatal crash'')". """
-            r"The query is lowercased, and multiple words searched using "
-            r"AND.'"
-            )
-    sexecute(con,
-            r"COMMENT ON FUNCTION ftq(text) IS '"
-            r"Convert a string to an unparsed tsearch2 query'"
-            )
-
-    # Create our trigger function. The default one that ships with tsearch2
-    # doesn't support weighting so we need our own. We remove safety belts
-    # since we know we will be calling it correctly.
-    sexecute(con, r"""
-        CREATE OR REPLACE FUNCTION ts2.ftiupdate() RETURNS trigger AS '
-            new = TD["new"]
-            args = TD["args"][:]
-
-            # Short circuit if none of the relevant columns have been
-            # modified and fti is not being set to NULL (setting the fti
-            # column to NULL is thus how we can force a rebuild of the fti
-            # column).
-            if TD["event"] == "UPDATE" and new["fti"] != None:
-                old = TD["old"]
-                relevant_modification = False
-                for column_name in args[::2]:
-                    if new[column_name] != old[column_name]:
-                        relevant_modification = True
-                        break
-                if not relevant_modification:
-                    return "OK"
-
-            # Generate an SQL statement that turns the requested
-            # column values into a weighted tsvector
-            sql = []
-            for i in range(0, len(args), 2):
-                sql.append(
-                        "ts2.setweight(ts2.to_tsvector(''default'', coalesce("
-                        "substring(ltrim($%d) from 1 for 2500),'''')),"
-                        "CAST($%d AS \\"char\\"))" % (i + 1, i + 2))
-                args[i] = new[args[i]]
-
-            sql = "SELECT %s AS fti" % "||".join(sql)
-
-            # Execute and store in the fti column
-            plan = plpy.prepare(sql, ["text", "char"] * (len(args)/2))
-            new["fti"] = plpy.execute(plan, args, 1)[0]["fti"]
-
-            # Tell PostgreSQL we have modified the data
-            return "MODIFY"
-        ' LANGUAGE plpythonu
-        """)
-
-    sexecute(con,
-        r"COMMENT ON FUNCTION ftiupdate() IS 'Trigger function that keeps "
-        r"the fti tsvector column up to date.'"
-        )
-
-    # Confirm database locale is valid, and set the 'default' tsearch2
-    # configuration to use it.
-    r = execute(con, r"""
-            SELECT setting FROM pg_settings
-            WHERE context='internal' AND name='lc_ctype'
-            """, results=True)
-    assert len(r) == 1, 'Unable to determine database locale'
-    locale = r[0][0]
-    assert locale.startswith('en_') or locale in ('C', 'en'), (
-            "Non-english database locales are not supported with launchpad. "
-            "Fresh initdb required."
-            )
-    r = locale.split('.', 1)
-    if len(r) > 1:
-        assert r[1].upper() in ("UTF8", "UTF-8"), \
-                "Only UTF8 encodings supported. Fresh initdb required."
-    else:
-        assert len(r) == 1, 'Invalid database locale %s' % repr(locale)
-
-    r = execute(con,
-            "SELECT COUNT(*) FROM pg_ts_config WHERE cfgname='default'",
-            results=True)
-    if r[0][0] == 0:
-        sexecute(con, """
-            CREATE TEXT SEARCH CONFIGURATION ts2.default (
-                COPY = pg_catalog.english)""")
-
-    # Don't bother with this - the setting is not exported with dumps
-    # or propogated  when duplicating the database. Only reliable
-    # way we can use is setting search_path in postgresql.conf
-    #
-    # Set the default schema search path so this stuff can be found
-    #execute(con, 'ALTER DATABASE %s SET search_path = public,ts2;' % dbname)
-
-
 def needs_refresh(con, table, columns):
     '''Return true if the index needs to be rebuilt.
 
@@ -632,16 +269,6 @@
 def main():
     parser = OptionParser()
     parser.add_option(
-            "-s", "--setup-only", dest="setup",
-            action="store_true", default=False,
-            help="Only install tsearch2 - don't build the indexes.",
-            )
-    parser.add_option(
-            "-f", "--force", dest="force",
-            action="store_true", default=False,
-            help="Force a rebuild of all full text indexes.",
-            )
-    parser.add_option(
             "-0", "--null", dest="null",
             action="store_true", default=False,
             help="Set all full text index column values to NULL.",
@@ -657,7 +284,7 @@
     global options, args
     (options, args) = parser.parse_args()
 
-    if options.setup + options.force + options.null + options.liverebuild > 1:
+    if options.null + options.liverebuild > 1:
         parser.error("Incompatible options")
 
     global log
@@ -670,28 +297,14 @@
     if options.liverebuild and is_replicated_db:
         parser.error("--live-rebuild does not work with Slony-I install.")
 
-    if is_replicated_db:
-        global slonik_sql
-        slonik_sql = NamedTemporaryFile(prefix="fti_sl", suffix=".sql")
-        print >> slonik_sql, "-- Generated by %s %s" % (
-                sys.argv[0], time.ctime())
-
     if options.liverebuild:
         con.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
         liverebuild(con)
-    else:
+    elif options.null:
         con.set_isolation_level(ISOLATION_LEVEL_READ_COMMITTED)
-        setup(con)
-        if options.null:
-            nullify(con)
-        elif not options.setup:
-            for table, columns in ALL_FTI:
-                if needs_refresh(con, table, columns):
-                    log.info("Rebuilding full text index on %s", table)
-                    fti(con, table, columns)
-                else:
-                    log.info(
-                        "No need to rebuild full text index on %s", table)
+        nullify(con)
+    else:
+        parser.error("Required argument not specified")
 
     if is_replicated_db:
         slonik_sql.flush()

=== modified file 'lib/devscripts/ec2test/instance.py'
--- lib/devscripts/ec2test/instance.py	2012-01-01 03:03:28 +0000
+++ lib/devscripts/ec2test/instance.py	2012-02-07 14:51:23 +0000
@@ -145,6 +145,10 @@
 127.0.0.99      bazaar.launchpad.dev
 ' >> /etc/hosts
 
+# If this is uncommented, postgresql 9.1 will be explicitly installed
+# before other versisons and end up on port 5432, overriding the version
+# of postgresql specified in launchpad-database-dependencies.
+## apt-get -y install postgresql-9.1 postgresql-9.1-debversion postgresql-client-9.1 postgresql-contrib-9.1 postgresql-plpython-9.1 postgresql-server-dev-9.1 postgresql-doc-9.1
 apt-get -y install launchpad-developer-dependencies apache2 apache2-mpm-worker
 
 # Create the ec2test user, give them passwordless sudo.

=== modified file 'lib/devscripts/ec2test/testrunner.py'
--- lib/devscripts/ec2test/testrunner.py	2011-12-21 07:06:05 +0000
+++ lib/devscripts/ec2test/testrunner.py	2012-02-07 14:51:23 +0000
@@ -330,12 +330,12 @@
             as_user(
                 "sudo mkdir -p %s && sudo mount -t tmpfs none %s" % (d, d))
         as_user(
-            "sudo service postgresql-8.4 stop"
+            "sudo service postgresql stop"
             "; sudo mv /var/lib/postgresql /tmp/postgresql-tmp"
             "&& sudo mkdir /var/lib/postgresql"
             "&& sudo mount -t tmpfs none /var/lib/postgresql"
             "&& sudo mv /tmp/postgresql-tmp/* /var/lib/postgresql"
-            "&& sudo service postgresql-8.4 start")
+            "&& sudo service postgresql start")
         as_user("sudo add-apt-repository ppa:bzr")
         as_user("sudo add-apt-repository ppa:launchpad")
         as_user("sudo aptitude update")

=== modified file 'lib/lp/archivepublisher/model/ftparchive.py'
--- lib/lp/archivepublisher/model/ftparchive.py	2012-01-01 02:58:52 +0000
+++ lib/lp/archivepublisher/model/ftparchive.py	2012-02-07 14:51:23 +0000
@@ -573,7 +573,7 @@
             SourcePackageName.name,
             LibraryFileAlias.filename,
             Component.name,
-            Concatenate("binary-", DistroArchSeries.architecturetag),
+            Concatenate(u"binary-", DistroArchSeries.architecturetag),
             )
         join_conditions = [
             BinaryPackageRelease.id ==

=== modified file 'lib/lp/bugs/doc/official-bug-tags.txt'
--- lib/lp/bugs/doc/official-bug-tags.txt	2011-12-24 17:49:30 +0000
+++ lib/lp/bugs/doc/official-bug-tags.txt	2012-02-07 14:51:23 +0000
@@ -43,8 +43,7 @@
     <lp.bugs.model.bugtarget.OfficialBugTag object at...
     >>> store.flush()
     Traceback (most recent call last):
-    IntegrityError: duplicate key value violates unique constraint
-    "officialbugtag__distribution__tag__key"
+    IntegrityError: ...
 
     >>> import transaction
     >>> transaction.abort()

=== modified file 'lib/lp/bugs/model/bugtask.py'
--- lib/lp/bugs/model/bugtask.py	2012-02-02 14:52:48 +0000
+++ lib/lp/bugs/model/bugtask.py	2012-02-07 14:51:23 +0000
@@ -3424,7 +3424,7 @@
                          # tag that comes first in alphabetic order.
                          BugTag.id == SQL("""
                              SELECT id FROM BugTag AS bt
-                             WHERE bt.bug=bug.id ORDER BY bt.name LIMIT 1
+                             WHERE bt.bug=bug.id ORDER BY bt.tag LIMIT 1
                              """))),
                     ]
                 ),

=== modified file 'lib/lp/hardwaredb/doc/hwdb-device-tables.txt'
--- lib/lp/hardwaredb/doc/hwdb-device-tables.txt	2012-01-20 15:42:44 +0000
+++ lib/lp/hardwaredb/doc/hwdb-device-tables.txt	2012-02-07 14:51:23 +0000
@@ -29,8 +29,7 @@
     >>> store.flush()
     Traceback (most recent call last):
     ...
-    IntegrityError: duplicate key... violates unique
-    constraint "hwvendorname__lc_vendor_name__idx"
+    IntegrityError: ...
     <BLANKLINE>
     >>> LaunchpadZopelessLayer.txn.abort()
 
@@ -42,8 +41,7 @@
     >>> store.flush()
     Traceback (most recent call last):
     ...
-    IntegrityError: duplicate key... violates unique
-    constraint "hwvendorname__lc_vendor_name__idx"
+    IntegrityError: ...
     <BLANKLINE>
     >>> LaunchpadZopelessLayer.txn.abort()
 
@@ -52,8 +50,7 @@
     >>> store.flush()
     Traceback (most recent call last):
     ...
-    IntegrityError: duplicate key... violates unique
-    constraint "hwvendorname__lc_vendor_name__idx"
+    IntegrityError: ...
     <BLANKLINE>
     >>> LaunchpadZopelessLayer.txn.abort()
 
@@ -72,8 +69,7 @@
     >>> store.flush()
     Traceback (most recent call last):
     ...
-    IntegrityError: duplicate key... violates unique
-    constraint "hwvendorname__lc_vendor_name__idx"
+    IntegrityError: ...
     <BLANKLINE>
     >>> LaunchpadZopelessLayer.txn.abort()
 
@@ -90,8 +86,7 @@
     >>> store.flush()
     Traceback (most recent call last):
     ...
-    IntegrityError: duplicate key... violates unique
-    constraint "hwvendorname__lc_vendor_name__idx"
+    IntegrityError: ...
     <BLANKLINE>
     >>> LaunchpadZopelessLayer.txn.abort()
 
@@ -108,8 +103,7 @@
     >>> store.flush()
     Traceback (most recent call last):
     ...
-    IntegrityError: duplicate key... violates unique
-    constraint "hwvendorname__lc_vendor_name__idx"
+    IntegrityError: ...
     <BLANKLINE>
     >>> LaunchpadZopelessLayer.txn.abort()
 
@@ -174,8 +168,7 @@
     >>> store.flush()
     Traceback (most recent call last):
     ...
-    IntegrityError: duplicate key... violates unique
-    constraint "hwvendorid__bus_vendor_id__vendor_name__key"
+    IntegrityError: ...
     <BLANKLINE>
     >>> LaunchpadZopelessLayer.txn.abort()
 
@@ -832,8 +825,7 @@
     >>> store.flush()
     Traceback (most recent call last):
     ...
-    IntegrityError: duplicate key... violates unique
-    constraint "hwdevice__bus_vendor_id__bus_product_id__key"
+    IntegrityError: ...
     <BLANKLINE>
     >>> LaunchpadZopelessLayer.txn.abort()
 
@@ -845,8 +837,7 @@
     >>> store.flush()
     Traceback (most recent call last):
     ...
-    IntegrityError: duplicate key... violates unique
-    constraint "hwdevice__bus_vendor_id__bus_product_id__variant__key"
+    IntegrityError: ...
     <BLANKLINE>
     >>> LaunchpadZopelessLayer.txn.abort()
 
@@ -1176,8 +1167,7 @@
     >>> store.flush()
     Traceback (most recent call last):
     ...
-    IntegrityError: duplicate key... violates unique
-    constraint "hwdevicenamevariant__vendor_name__product_name__device__key"
+    IntegrityError: ...
     <BLANKLINE>
     >>> LaunchpadZopelessLayer.txn.abort()
 
@@ -1255,8 +1245,7 @@
     >>> store.flush()
     Traceback (most recent call last):
     ...
-    IntegrityError: duplicate key... violates unique
-    constraint "hwdriver__package_name__name__key"
+    IntegrityError: ...
     <BLANKLINE>
     >>> LaunchpadZopelessLayer.txn.abort()
 
@@ -1266,8 +1255,7 @@
     >>> store.flush()
     Traceback (most recent call last):
     ...
-    IntegrityError: duplicate key... violates unique
-    constraint "hwdriver__package_name__name__key"
+    IntegrityError: ...
     <BLANKLINE>
     >>> LaunchpadZopelessLayer.txn.abort()
 
@@ -1605,8 +1593,7 @@
     >>> store.flush()
     Traceback (most recent call last):
     ...
-    IntegrityError: duplicate key... violates unique
-    constraint "hwdevicedriverlink__device__driver__key"
+    IntegrityError: ...
     <BLANKLINE>
     >>> LaunchpadZopelessLayer.txn.abort()
 
@@ -1615,8 +1602,7 @@
     >>> store.flush()
     Traceback (most recent call last):
     ...
-    IntegrityError: duplicate key... violates unique
-    constraint "hwdevicedriverlink__device__key"
+    IntegrityError: ...
     <BLANKLINE>
     >>> LaunchpadZopelessLayer.txn.abort()
 

=== modified file 'lib/lp/registry/model/oopsreferences.py'
--- lib/lp/registry/model/oopsreferences.py	2012-01-25 04:01:19 +0000
+++ lib/lp/registry/model/oopsreferences.py	2012-02-07 14:51:23 +0000
@@ -39,7 +39,7 @@
     # Note that the POSIX regexp syntax is subtly different to the Python,
     # and that we need to escape all \ characters to keep the SQL interpreter
     # happy.
-    posix_oops_match = (r"~* '^(oops-\\w+)|(\\moops-\\w+)'")
+    posix_oops_match = (r"~* E'^(oops-\\w+)|(\\moops-\\w+)'")
     params = dict(start_date=start_date, end_date=end_date)
     params.update(context_params)
     sql_params = sqlvalues(**params)

=== modified file 'lib/lp/registry/vocabularies.py'
--- lib/lp/registry/vocabularies.py	2012-01-21 03:40:40 +0000
+++ lib/lp/registry/vocabularies.py	2012-02-07 14:51:23 +0000
@@ -2166,12 +2166,12 @@
                 SELECT DISTINCT ON (spn.id)
                     spn.id, spn.name, dspc.binpkgnames,
                     CASE WHEN spn.name = ? THEN 100
-                        WHEN dspc.binpkgnames SIMILAR TO
-                            '(^| )' || ? || '( |$)' THEN 75
-                        WHEN spn.name SIMILAR TO
-                            '(^|.*-)' || ? || '(-|$)' THEN 50
-                        WHEN dspc.binpkgnames SIMILAR TO
-                            '(^|.*-)' || ? || '(-| |$)' THEN 25
+                        WHEN dspc.binpkgnames
+                            ~ ('(^| )' || ? || '( |$)') THEN 75
+                        WHEN spn.name
+                            ~ ('(^|.*-)' || ? || '(-|$)') THEN 50
+                        WHEN dspc.binpkgnames
+                            ~ ('(^|.*-)' || ? || '(-| |$)') THEN 25
                         ELSE 1
                         END AS rank
                 FROM SourcePackageName spn

=== modified file 'lib/lp/services/database/doc/db-policy.txt'
--- lib/lp/services/database/doc/db-policy.txt	2011-12-24 17:49:30 +0000
+++ lib/lp/services/database/doc/db-policy.txt	2012-02-07 14:51:23 +0000
@@ -41,7 +41,7 @@
     >>> transaction.commit()
     Traceback (most recent call last):
     ...
-    InternalError: transaction is read-only
+    InternalError: ...
 
     >>> transaction.abort()
 

=== modified file 'lib/lp/services/database/doc/storm.txt'
--- lib/lp/services/database/doc/storm.txt	2012-01-15 11:08:00 +0000
+++ lib/lp/services/database/doc/storm.txt	2012-02-07 14:51:23 +0000
@@ -75,7 +75,7 @@
     >>> transaction.commit()
     Traceback (most recent call last):
     ...
-    InternalError: transaction is read-only
+    InternalError: ...
 
     >>> transaction.abort()
     >>> t = transaction.begin()

=== modified file 'lib/lp/services/webapp/doc/test_adapter_permissions.txt'
--- lib/lp/services/webapp/doc/test_adapter_permissions.txt	2011-12-24 17:49:30 +0000
+++ lib/lp/services/webapp/doc/test_adapter_permissions.txt	2012-02-07 14:51:23 +0000
@@ -23,7 +23,7 @@
     >>> transaction.commit()
     Traceback (most recent call last):
     ...
-    InternalError: transaction is read-only
+    InternalError: ...
 
 Test this once more to ensure the settings stick across transactions.
 
@@ -33,7 +33,7 @@
     >>> transaction.commit()
     Traceback (most recent call last):
     ...
-    InternalError: transaction is read-only
+    InternalError: ...
 
 If a MASTER_FLAVOR is requested, it should allow writes to table in that
 Store's replication set.

=== modified file 'lib/lp/soyuz/model/sourcepackagerelease.py'
--- lib/lp/soyuz/model/sourcepackagerelease.py	2012-01-03 05:05:39 +0000
+++ lib/lp/soyuz/model/sourcepackagerelease.py	2012-02-07 14:51:23 +0000
@@ -204,6 +204,8 @@
         """See `ISourcePackageRelease`."""
         store = Store.of(self)
         store.flush()
+        if content is not None:
+            content = unicode(content)
         store.execute(
             "UPDATE sourcepackagerelease SET copyright=%s WHERE id=%s",
             (content, self.id))

=== modified file 'lib/lp/translations/browser/potemplate.py'
--- lib/lp/translations/browser/potemplate.py	2012-01-05 20:11:40 +0000
+++ lib/lp/translations/browser/potemplate.py	2012-02-07 14:51:23 +0000
@@ -970,7 +970,8 @@
                 SourcePackageName.id == POTemplate.sourcepackagenameID))
 
         return join.select(POTemplate, Packaging, ProductSeries, Product,
-            OtherTemplate, SourcePackageName)
+            OtherTemplate, SourcePackageName).order_by(
+                SourcePackageName.name, POTemplate.priority, POTemplate.name)
 
     def rowCSSClass(self, template):
         if template.iscurrent:

=== modified file 'lib/lp/translations/stories/standalone/xx-series-templates.txt'
--- lib/lp/translations/stories/standalone/xx-series-templates.txt	2011-10-10 13:47:51 +0000
+++ lib/lp/translations/stories/standalone/xx-series-templates.txt	2012-02-07 14:51:23 +0000
@@ -72,7 +72,6 @@
     0         evolution       ... 2007-01-05  Edit Upload Download Administer
     ...
     0         pmount          ... 2005-05-06  Edit Upload Download Administer
-    ...
 
 Translation group owners can administer all templates for their distribution,
 including those that are currently disabled.

=== modified file 'utilities/launchpad-database-setup'
--- utilities/launchpad-database-setup	2012-01-25 13:54:41 +0000
+++ utilities/launchpad-database-setup	2012-02-07 14:51:23 +0000
@@ -18,7 +18,7 @@
 # https://dev.launchpad.net/DatabaseSetup which are intended for
 # initial Launchpad setup on an otherwise unconfigured postgresql instance
 
-for pgversion in 8.4 8.3 8.2
+for pgversion in 9.1 8.4
 do
   sudo grep -q "^auto" /etc/postgresql/$pgversion/main/start.conf
   if [ $? -eq 0 ]; then
@@ -58,23 +58,18 @@
 
 sudo cp -a /etc/postgresql/$pgversion/main/pg_hba.conf \
     /etc/postgresql/$pgversion/main/pg_hba.conf.old
-sudo grep -q Launchpad /etc/postgresql/$pgversion/main/pg_hba.conf || \
-sudo patch /etc/postgresql/$pgversion/main/pg_hba.conf <<'EOF'
---- pg_hba.conf 2005-11-02 17:33:08.000000000 -0800
-+++ /tmp/pg_hba.conf    2005-11-03 07:32:46.932400423 -0800
-@@ -58,7 +58,10 @@
- # on a non-local interface via the listen_addresses configuration parameter,
- # or via the -i or -h command line switches.
- #
--
-+# Launchpad users
-+local   all         all                           trust
-+host    all         all         127.0.0.1/32      trust
-+host    all         all         ::1/128           trust
-
-
+sudo dd of=/etc/postgresql/$pgversion/main/pg_hba.conf <<'EOF'
+# Launchpad users
+# This section added automatically by launchpad-database-setup
+local   all         all                           trust
+host    all         all         127.0.0.1/32      trust
+host    all         all         ::1/128           trust
+# END Launchpad users
 
 EOF
+sudo dd oflag=append conv=notrunc \
+    if=/etc/postgresql/$pgversion/main/pg_hba.conf.old \
+    of=/etc/postgresql/$pgversion/main/pg_hba.conf
 sudo chown --reference=/etc/postgresql/$pgversion/main/pg_hba.conf.old \
     /etc/postgresql/$pgversion/main/pg_hba.conf
 sudo chmod --reference=/etc/postgresql/$pgversion/main/pg_hba.conf.old \
@@ -88,7 +83,10 @@
 ##
 # Enable launchpad full text searching in database
 search_path='$user,public,ts2'
-add_missing_from=false
+# Per Bug #90809, standard_conforming_strings should be 'on'
+standard_conforming_strings=off
+escape_string_warning=off
+
 #enable_seqscan=false
 log_statement='none'
 log_line_prefix='[%t] %q%u@%d '
@@ -96,15 +94,6 @@
 
 EOF
 
-if [ "$pgversion" = 8.2 -o "$pgversion" = 8.3 ]
-then
-  sudo grep -q '^[[:space:]]*max_fsm_relations' /etc/postgresql/$pgversion/main/postgresql.conf || \
-  sudo tee -a /etc/postgresql/$pgversion/main/postgresql.conf <<'EOF'
-max_fsm_relations=2000
-
-EOF
-fi
-
 if [ -e /etc/init.d/postgresql-$pgversion ]; then
     sudo /etc/init.d/postgresql-$pgversion start
 else