launchpad-reviewers team mailing list archive
-
launchpad-reviewers team
-
Mailing list archive
-
Message #24955
[Merge] ~cjwatson/launchpad:services-database-future-imports into launchpad:master
Colin Watson has proposed merging ~cjwatson/launchpad:services-database-future-imports into launchpad:master.
Commit message:
Port lp.services.database doctests to usual __future__ imports
Requested reviews:
Launchpad code reviewers (launchpad-reviewers)
For more details, see:
https://code.launchpad.net/~cjwatson/launchpad/+git/launchpad/+merge/386774
--
Your team Launchpad code reviewers is requested to review the proposed merge of ~cjwatson/launchpad:services-database-future-imports into launchpad:master.
diff --git a/lib/lp/services/database/doc/decoratedresultset.txt b/lib/lp/services/database/doc/decoratedresultset.txt
index f4b9981..0066d84 100644
--- a/lib/lp/services/database/doc/decoratedresultset.txt
+++ b/lib/lp/services/database/doc/decoratedresultset.txt
@@ -49,7 +49,7 @@ definition), so all the normal methods can be used. Iterating over the
decorated result set produces the decorated results:
>>> for dist in decorated_result_set:
- ... print dist
+ ... print(dist)
Dist name is: debian
Dist name is: gentoo
...
@@ -58,7 +58,7 @@ decorated result set produces the decorated results:
Splicing works as normal:
>>> for dist in decorated_result_set[1:3]:
- ... print dist
+ ... print(dist)
Dist name is: gentoo
Dist name is: guadalinex
@@ -68,7 +68,7 @@ decorated results:
>>> decorated_result_set.config(return_both=True)
<lp.services.database.decoratedresultset.DecoratedResultSet object at ...>
>>> for dist in decorated_result_set:
- ... print dist
+ ... print(dist)
(<Distribution 'Debian' (debian)>, u'Dist name is: debian')
(<Distribution 'Gentoo' (gentoo)>, u'Dist name is: gentoo')
...
@@ -81,7 +81,7 @@ This works even if there are multiple levels:
>>> drs_squared = DecoratedResultSet(
... decorated_result_set, lambda x: len(x)).config(return_both=True)
>>> for dist in drs_squared:
- ... print dist
+ ... print(dist)
(<Distribution 'Debian' (debian)>, 20)
(<Distribution 'Gentoo' (gentoo)>, 20)
...
@@ -133,7 +133,7 @@ bulk_decorator argument permits operations to be performed over large
chunks of results at once.
>>> def all_ones(rows):
- ... print "that's a chunk of %d" % len(rows)
+ ... print("that's a chunk of %d" % len(rows))
... return (1 for row in rows)
>>> drs = DecoratedResultSet(results, bulk_decorator=all_ones)
>>> list(drs)
@@ -190,9 +190,9 @@ DecoratedResultSet can inform its hooks about slice data if slice_info=True is
passed.
>>> def pre_iter(rows, slice):
- ... print "pre iter", len(rows), slice.start, slice.stop
+ ... print("pre iter", len(rows), slice.start, slice.stop)
>>> def decorate(row, row_index):
- ... print "row", row.id, row_index
+ ... print("row", row.id, row_index)
>>> _ = result_set.order_by(Distribution.id)
>>> drs = DecoratedResultSet(
... result_set, decorate, pre_iter, slice_info=True)
diff --git a/lib/lp/services/database/doc/enumcol.txt b/lib/lp/services/database/doc/enumcol.txt
index d8915f9..ac5ad80 100644
--- a/lib/lp/services/database/doc/enumcol.txt
+++ b/lib/lp/services/database/doc/enumcol.txt
@@ -79,7 +79,7 @@ You cannot use integers or strings as EnumCol values:
...
TypeError: Not a DBItem: 2
- >>> t.foo = "TWO"
+ >>> t.foo = six.ensure_str("TWO")
Traceback (most recent call last):
...
TypeError: Not a DBItem: 'TWO'
diff --git a/lib/lp/services/database/doc/multitablecopy.txt b/lib/lp/services/database/doc/multitablecopy.txt
index d56dacd..83684de 100644
--- a/lib/lp/services/database/doc/multitablecopy.txt
+++ b/lib/lp/services/database/doc/multitablecopy.txt
@@ -70,7 +70,7 @@ in any old order. We must follow the list of tables we gave, in that order:
>>> numeric_holding_table = copier.getHoldingTableName('numeric')
>>> copier.extract('numeric', where_clause="n <= 2")
>>> cur.execute("SELECT count(*) FROM %s" % numeric_holding_table)
- >>> print cur.fetchall()[0][0]
+ >>> print(cur.fetchall()[0][0])
2
Since we haven't extracted all tables yet, we're not allowed to move to the
@@ -93,14 +93,14 @@ We now have two holding tables, one with some of the values from numeric, the
other with all values from textual:
>>> cur.execute("SELECT count(*) FROM %s" % numeric_holding_table)
- >>> print cur.fetchall()[0][0]
+ >>> print(cur.fetchall()[0][0])
2
>>> textual_holding_table = copier.getHoldingTableName('textual')
>>> cur.execute("SELECT count(*) FROM %s" % textual_holding_table)
- >>> print cur.fetchall()[0][0]
+ >>> print(cur.fetchall()[0][0])
3
- >>> print len(textual_values)
+ >>> print(len(textual_values))
3
Meanwhile we're still free to play with our original table, and manipulate the
@@ -133,14 +133,14 @@ We now see the extra data in the original tables:
>>> cur.execute("SELECT n FROM numeric ORDER BY n")
>>> for row in cur.fetchall():
- ... print row[0]
+ ... print(row[0])
1
2
3
4
5
>>> cur.execute("SELECT count(*) FROM textual")
- >>> print cur.fetchall()[0][0]
+ >>> print(cur.fetchall()[0][0])
7
And the holding tables are gone.
@@ -179,7 +179,7 @@ tables that are being copied.
... WHERE textual = textual.id
... ORDER BY n""")
>>> for numeric, textual in cur.fetchall():
- ... print numeric, textual
+ ... print(numeric, textual)
1 one
2 two
3 three
@@ -207,11 +207,11 @@ from numeric that referred to it into a holding table for numeric.
>>> cur.execute("SELECT t FROM %s" % textual_holding_table)
>>> for row in cur.fetchall():
- ... print row[0]
+ ... print(row[0])
many
>>> cur.execute("SELECT n FROM %s" % numeric_holding_table)
>>> for row in cur.fetchall():
- ... print row[0]
+ ... print(row[0])
5
6
7
@@ -226,7 +226,7 @@ from numeric that referred to it into a holding table for numeric.
... FROM numeric,textual
... WHERE numeric.textual=textual.id""")
>>> for numeric, textual in cur.fetchall():
- ... print numeric, textual
+ ... print(numeric, textual)
1 one
2 two
3 three
@@ -252,7 +252,7 @@ pass a where_clause argument of "false":
>>> cur = cursor()
>>> cur.execute(
... "SELECT count(*) FROM %s" % copier.getHoldingTableName('textual'))
- >>> print cur.fetchone()[0]
+ >>> print(cur.fetchone()[0])
0
After that, the table has been extracted and you can merrily proceed. Of
@@ -263,7 +263,7 @@ skipped table, they will not have any rows extracted either.
>>> cur.execute(
... "SELECT count(*) FROM %s" % copier.getHoldingTableName('numeric'))
- >>> print cur.fetchone()[0]
+ >>> print(cur.fetchone()[0])
0
>>> copier.dropHoldingTables()
@@ -293,7 +293,7 @@ up with incomplete data that should be deleted:
>>> cur.execute("SELECT t, count(*) FROM textual GROUP BY t ORDER BY t")
>>> for textual, count in cur.fetchall():
- ... print textual, count
+ ... print(textual, count)
lots 1
many 1
one 2
@@ -341,7 +341,7 @@ original words twice.
>>> cur.execute("SELECT t, count(*) FROM textual GROUP BY t ORDER BY t")
>>> for textual, count in cur.fetchall():
- ... print textual, count
+ ... print(textual, count)
lots 2
many 2
one 4
@@ -381,7 +381,7 @@ This time we run to completion without problems.
... LEFT JOIN textual tt on nt.textual = tt.id
... ORDER BY n""")
>>> for numeric, textual in cur.fetchall():
- ... print numeric, (textual or "null")
+ ... print(numeric, (textual or "null"))
1 one
2 two
3 three
@@ -413,7 +413,7 @@ This time we run to completion without problems.
... LEFT JOIN textual tt on nt.textual = tt.id
... ORDER BY n""")
>>> for numeric, textual in cur.fetchall():
- ... print numeric, (textual or "null")
+ ... print(numeric, (textual or "null"))
1 one
2 two
3 three
@@ -446,7 +446,7 @@ value also occurs in another table.
>>> holding_table = copier.getHoldingTableName('numeric')
>>> cur.execute("SELECT n FROM %s ORDER BY n" % holding_table)
>>> for number, in cur.fetchall():
- ... print number
+ ... print(number)
2
4
6
@@ -466,7 +466,7 @@ table names get uncomfortably long.
... external_joins=['double dub', 'double quad'])
>>> cur.execute("SELECT n FROM %s ORDER BY n" % holding_table)
>>> for number, in cur.fetchall():
- ... print number
+ ... print(number)
4
12
@@ -506,7 +506,7 @@ textual.
... has_id = "Yes"
... else:
... has_id = "No"
- ... print (textual or "null"), has_id
+ ... print((textual or "null"), has_id)
one Yes
two No
@@ -531,7 +531,7 @@ valid new_id in the textual holding table.
... FROM %s num JOIN %s AS text ON num.textual = text.new_id
... """ % (numeric_holding_table, textual_holding_table))
>>> for n, t in cur.fetchall():
- ... print n, t
+ ... print(n, t)
1 one
2 two
@@ -558,7 +558,7 @@ Only the non-inert extracted rows will be copied.
... ORDER BY t
... """)
>>> for t, count in cur.fetchall():
- ... print t, count
+ ... print(t, count)
one 2
However, all extracted rows of the referring table are copied, regardless of
@@ -572,7 +572,7 @@ whether they point to an inert or a non-inert row in the first table.
... ORDER BY n
... """)
>>> for n, count in cur.fetchall():
- ... print n, count
+ ... print(n, count)
1 2
2 2
@@ -596,10 +596,10 @@ being poured from and the name of the source table that data is being poured
back into.
>>> def textual_prepour(holding_table, source_table):
- ... print "Pouring textual"
+ ... print("Pouring textual")
>>> def numeric_prepour(holding_table, source_table):
- ... print "Pouring numeric"
+ ... print("Pouring numeric")
"Batch preparation" callbacks will be called at the beginning of every batch
of data that is poured. Each invocation runs in the same transaction as the
@@ -611,8 +611,8 @@ poured.
>>> def textual_batch(
... holding_table, source_table, batch_size, lowest_id, highest_id):
... """Print information about each batch of textual being poured."""
- ... print "Pouring text from %s to %s" % (
- ... holding_table, source_table)
+ ... print("Pouring text from %s to %s" % (
+ ... holding_table, source_table))
>>> copier = MultiTableCopy(
... 'test', ['textual', 'numeric'], minimum_batch_size=1)
@@ -626,8 +626,8 @@ other callbacks on other tables.
>>> def numeric_batch(
... holding_table, source_table, batch_size, lowest_id, highest_id):
... """Print information about each batch of numeric being poured."""
- ... print "Pouring numbers from %s to %s" % (
- ... holding_table, source_table)
+ ... print("Pouring numbers from %s to %s" % (
+ ... holding_table, source_table))
>>> copier.extract(
... 'numeric', joins=['textual'],
diff --git a/lib/lp/services/database/doc/security-proxies.txt b/lib/lp/services/database/doc/security-proxies.txt
index a0e2856..c1c1c85 100644
--- a/lib/lp/services/database/doc/security-proxies.txt
+++ b/lib/lp/services/database/doc/security-proxies.txt
@@ -48,7 +48,7 @@ DB schema objects should be comparable correctly when proxied...
>>> from lp.registry.interfaces.distroseries import IDistroSeriesSet
>>> from lp.registry.interfaces.series import SeriesStatus
>>> hoary = getUtility(IDistroSeriesSet).get(3)
- >>> print hoary.status.name
+ >>> print(hoary.status.name)
DEVELOPMENT
>>> hoary.status == SeriesStatus.DEVELOPMENT
True
diff --git a/lib/lp/services/database/doc/sqlbaseconnect.txt b/lib/lp/services/database/doc/sqlbaseconnect.txt
index c815b26..9a933c0 100644
--- a/lib/lp/services/database/doc/sqlbaseconnect.txt
+++ b/lib/lp/services/database/doc/sqlbaseconnect.txt
@@ -13,7 +13,7 @@ Ensure that lp.services.database.sqlbase connects as we expect.
... where = cur.fetchone()[0]
... cur.execute('SHOW transaction_isolation')
... how = cur.fetchone()[0]
- ... print 'Connected as %s to %s in %s isolation.' % (who, where, how)
+ ... print('Connected as %s to %s in %s isolation.' % (who, where, how))
Specifying the user connects as that user.
diff --git a/lib/lp/services/database/doc/storm-security-proxies.txt b/lib/lp/services/database/doc/storm-security-proxies.txt
index 740c1ef..54c1390 100644
--- a/lib/lp/services/database/doc/storm-security-proxies.txt
+++ b/lib/lp/services/database/doc/storm-security-proxies.txt
@@ -15,21 +15,21 @@ Get Mark's person and another person, wrapped in security proxies.
>>> mark = getUtility(IPersonSet).getByName('mark')
>>> spiv = getUtility(IPersonSet).getByName('spiv')
- >>> print type(mark)
+ >>> print(type(mark))
<type 'zope.security._proxy._Proxy'>
Get a bug task assigned to Mark. The bug task is also security-proxied.
>>> bugtask = BugTask.get(2)
- >>> print bugtask.assignee.name
+ >>> print(bugtask.assignee.name)
mark
- >>> print type(mark)
+ >>> print(type(mark))
<type 'zope.security._proxy._Proxy'>
Assign a different person as the assignee, and check that it worked by reading
it back, despite the security proxies.
>>> bugtask.transitionToAssignee(spiv)
- >>> print bugtask.assignee.name
+ >>> print(bugtask.assignee.name)
spiv
diff --git a/lib/lp/services/database/doc/storm-store-reset.txt b/lib/lp/services/database/doc/storm-store-reset.txt
index 564feb6..518e7c7 100644
--- a/lib/lp/services/database/doc/storm-store-reset.txt
+++ b/lib/lp/services/database/doc/storm-store-reset.txt
@@ -32,16 +32,16 @@ we rely on that to find out whether or not to reset stores.
... alive_items = len(IStore(Person)._alive)
>>> request_salgados_homepage()
- >>> print thread_name
+ >>> print(thread_name)
MainThread
- >>> print alive_items > 0
+ >>> print(alive_items > 0)
True
>>> from threading import Thread
>>> thread = Thread(target=request_salgados_homepage)
>>> thread.start()
>>> thread.join()
- >>> print thread_name != 'MainThread'
+ >>> print(thread_name != 'MainThread')
True
- >>> print alive_items
+ >>> print(alive_items)
0
diff --git a/lib/lp/services/database/doc/textsearching.txt b/lib/lp/services/database/doc/textsearching.txt
index a4253ac..f9e7b6c 100644
--- a/lib/lp/services/database/doc/textsearching.txt
+++ b/lib/lp/services/database/doc/textsearching.txt
@@ -38,7 +38,7 @@ against the database and display the results:
... fmt = '%%-%ds ' % colsize
... line += fmt % col
... line = line.rstrip()
- ... print line
+ ... print(line)
All queries against the full text indexes use the following basic syntax:
@@ -111,7 +111,7 @@ The following examples show the text version of the query using
... if compiled is not None:
... compiled = compiled.decode('UTF-8')
... compiled = compiled.encode('US-ASCII', 'backslashreplace')
- ... print '%s <=> %s' % (uncompiled, compiled)
+ ... print('%s <=> %s' % (uncompiled, compiled))
>>>
>>> def search(text_to_search, search_phrase):
... cur = cursor()
@@ -124,7 +124,7 @@ The following examples show the text version of the query using
... (text_to_search, search_phrase))
... match = cur.fetchall()[0][0]
... return "FTI data: %s query: %s match: %s" % (
- ... ts_vector, ts_query, str(match))
+ ... ts_vector, ts_query, six.text_type(match))
>>>
>>> def search_same(text):
... return search(text, text)
@@ -207,21 +207,21 @@ The implicit boolean operation is AND
'-' symbols are treated by the Postgres FTI parser context sensitive.
If they precede a word, they are removed.
- >>> print search_same('foo -bar')
+ >>> print(search_same('foo -bar'))
FTI data: 'bar':2 'foo':1
query: 'foo' & 'bar'
match: True
If a '-' precedes a number, it is retained.
- >>> print search_same('123 -456')
+ >>> print(search_same('123 -456'))
FTI data: '-456':2 '123':1
query: '123' & '-456'
match: True
Trailing '-' are always ignored.
- >>> print search_same('bar- 123-')
+ >>> print(search_same('bar- 123-'))
FTI data: '123':2 'bar':1
query: 'bar' & '123'
match: True
@@ -234,14 +234,14 @@ Repeated '-' are simply ignored by to_tsquery().
Hyphens surrounded by two words are retained. This reflects the way
how to_tsquery() and to_tsvector() handle such strings.
- >>> print search_same('foo-bar')
+ >>> print(search_same('foo-bar'))
FTI data: 'bar':3 'foo':2 'foo-bar':1
query: 'foo-bar' & 'foo' & 'bar'
match: True
A '-' surrounded by numbers is treated as the sign of the right-hand number.
- >>> print search_same('123-456')
+ >>> print(search_same('123-456'))
FTI data: '-456':2 '123':1
query: '123' & '-456'
match: True
@@ -250,9 +250,9 @@ Punctuation is handled consistently. If a string containing punctuation
appears in an FTI, it can also be passed to ftq(),and a search for this
string finds the indexed text.
- >>> punctuation = '\'"#$%*+,./:;<=>?@[\]^`{}~'
+ >>> punctuation = six.ensure_str('\'"#$%*+,./:;<=>?@[\]^`{}~')
>>> for symbol in punctuation:
- ... print repr(symbol), search_same('foo%sbar' % symbol)
+ ... print(repr(symbol), search_same('foo%sbar' % symbol))
"'" FTI data: 'bar':2 'foo':1 query: 'foo' & 'bar' match: True
'"' FTI data: 'bar':2 'foo':1 query: 'foo' & 'bar' match: True
'#' FTI data: 'bar':2 'foo':1 query: 'foo' & 'bar' match: True
@@ -280,7 +280,7 @@ string finds the indexed text.
'~' FTI data: 'foo':1 '~bar':2 query: 'foo' & '~bar' match: True
>>> for symbol in punctuation:
- ... print repr(symbol), search_same('aa %sbb%s cc' % (symbol, symbol))
+ ... print(repr(symbol), search_same('aa %sbb%s cc' % (symbol, symbol)))
"'" FTI data: 'aa':1 'bb':2 'cc':3 query: 'aa' & 'bb' & 'cc' match: True
'"' FTI data: 'aa':1 'bb':2 'cc':3 query: 'aa' & 'bb' & 'cc' match: True
'#' FTI data: 'aa':1 'bb':2 'cc':3 query: 'aa' & 'bb' & 'cc' match: True
@@ -318,12 +318,12 @@ Tags are simply dropped from the FTI data. The terms show up without
brackets in parsed queries as a consequence of phrase operator stripping
added for PostgreSQL 9.6.
- >>> print search('some text <div>whatever</div>', '<div>')
+ >>> print(search('some text <div>whatever</div>', '<div>'))
FTI data: 'text':2 'whatev':3 query: 'div' match: False
Of course, omitting '<' and '>'from the query does not help.
- >>> print search('some text <div>whatever</div>', 'div')
+ >>> print(search('some text <div>whatever</div>', 'div'))
FTI data: 'text':2 'whatev':3 query: 'div' match: False
The symbols '&', '|' and '!' are treated as operators by to_tsquery();
@@ -337,12 +337,12 @@ the search term.
>>> ftq('cool!')
cool <=> 'cool'
- >>> print search_same('Shell scripts usually start with #!/bin/sh.')
+ >>> print(search_same('Shell scripts usually start with #!/bin/sh.'))
FTI data: '/bin/sh':6 'script':2 'shell':1 'start':4 'usual':3
query: 'shell' & 'script' & 'usual' & 'start' & '/bin/sh'
match: True
- >>> print search_same('int foo = (bar & ! baz) | bla;')
+ >>> print(search_same('int foo = (bar & ! baz) | bla;'))
FTI data: 'bar':3 'baz':4 'bla':5 'foo':2 'int':1
query: 'int' & 'foo' & 'bar' & 'baz' & 'bla'
match: True
@@ -359,18 +359,18 @@ two symbols that are not tokenized and returns null.
Email addresses are retained as a whole, both by to_tsvector() and by
ftq().
- >>> print search_same('foo@xxxxxxx')
+ >>> print(search_same('foo@xxxxxxx'))
FTI data: 'foo@xxxxxxx':1 query: 'foo@xxxxxxx' match: True
File names are retained as a whole.
- >>> print search_same('foo-bar.txt')
+ >>> print(search_same('foo-bar.txt'))
FTI data: 'foo-bar.txt':1 query: 'foo-bar.txt' match: True
Some punctuation we pass through to tsearch2 for it to handle.
NB. This gets stemmed, see below.
- >>> print search_same("shouldn't")
+ >>> print(search_same("shouldn't"))
FTI data: 'shouldn':1 query: 'shouldn' match: True
Bug #44913 - Unicode characters in the wrong place.
@@ -383,35 +383,35 @@ Bug #44913 - Unicode characters in the wrong place.
Cut & Paste of 'Smart' quotes. Note that the quotation mark is retained
in the FTI.
- >>> print search_same(u'a-a\N{RIGHT DOUBLE QUOTATION MARK}')
+ >>> print(search_same(u'a-a\N{RIGHT DOUBLE QUOTATION MARK}'))
FTI data: 'a-a”':1 'a”':3 query: 'a-a”' & 'a”' match: True
- >>> print search_same(
+ >>> print(search_same(
... u'\N{LEFT SINGLE QUOTATION MARK}a.a'
- ... u'\N{RIGHT SINGLE QUOTATION MARK}')
+ ... u'\N{RIGHT SINGLE QUOTATION MARK}'))
FTI data: 'a’':2 '‘a':1 query: '‘a' & 'a’' match: True
Bug #44913 - Nothing but stopwords in a query needing repair
- >>> print search_same('a)a')
+ >>> print(search_same('a)a'))
FTI data: query: None match: None
Stop words (words deemed too common in English to search on) are removed
from queries by tsearch2.
- >>> print search_same("Don't do it harder!")
+ >>> print(search_same("Don't do it harder!"))
FTI data: 'harder':5 query: 'harder' match: True
Note that some queries will return None after compilation, because they
contained nothing but stop words or punctuation.
- >>> print search_same("don't do it!")
+ >>> print(search_same("don't do it!"))
FTI data: query: None match: None
- >>> print search_same(",,,")
+ >>> print(search_same(",,,"))
FTI data: query: None match: None
diff --git a/lib/lp/services/database/tests/decoratedresultset.txt b/lib/lp/services/database/tests/decoratedresultset.txt
index 41a0a70..30262c4 100644
--- a/lib/lp/services/database/tests/decoratedresultset.txt
+++ b/lib/lp/services/database/tests/decoratedresultset.txt
@@ -19,14 +19,14 @@ ResultSet:
>>> from zope.security.checker import ProxyFactory
>>> proxied_result_set = ProxyFactory(result_set)
- >>> print proxied_result_set
+ >>> print(proxied_result_set)
<security proxied storm.store.ResultSet ...>
>>> def result_decorator(distribution):
... return "Dist name is: %s" % distribution.name
>>> def pre_iter_hook(values):
- ... print len(values), "elements in result set"
+ ... print(len(values), "elements in result set")
>>> from lp.services.database.decoratedresultset import (
... DecoratedResultSet)
@@ -106,8 +106,8 @@ pre_iter_hook is not called from methods like first() or one() which return
at most one row:
>>> empty_result_set = decorated_result_set.copy()
- >>> print empty_result_set.config(
- ... offset=empty_result_set.count()).first()
+ >>> print(empty_result_set.config(
+ ... offset=empty_result_set.count()).first())
None
== last() ==
diff --git a/lib/lp/services/database/tests/test_decoratedresultset.py b/lib/lp/services/database/tests/test_decoratedresultset.py
index 5b2b400..7355caa 100644
--- a/lib/lp/services/database/tests/test_decoratedresultset.py
+++ b/lib/lp/services/database/tests/test_decoratedresultset.py
@@ -22,7 +22,7 @@ def test_suite():
test = LayeredDocFileSuite(
'decoratedresultset.txt',
- setUp=setUp, tearDown=tearDown,
+ setUp=lambda test: setUp(test, future=True), tearDown=tearDown,
layer=DatabaseFunctionalLayer)
suite.addTest(test)
return suite
diff --git a/lib/lp/services/database/tests/test_doc.py b/lib/lp/services/database/tests/test_doc.py
index bb85450..c5f1167 100644
--- a/lib/lp/services/database/tests/test_doc.py
+++ b/lib/lp/services/database/tests/test_doc.py
@@ -9,11 +9,14 @@ import os
from lp.services.testing import build_test_suite
from lp.testing.layers import DatabaseFunctionalLayer
+from lp.testing.systemdocs import setUp
here = os.path.dirname(os.path.realpath(__file__))
def test_suite():
- suite = build_test_suite(here, {}, layer=DatabaseFunctionalLayer)
+ suite = build_test_suite(
+ here, {}, setUp=lambda test: setUp(test, future=True),
+ layer=DatabaseFunctionalLayer)
return suite