← Back to team overview

launchpad-reviewers team mailing list archive

[Merge] ~cjwatson/launchpad:rename-slave-store into launchpad:master

 

Colin Watson has proposed merging ~cjwatson/launchpad:rename-slave-store into launchpad:master.

Commit message:
Rename ISlaveStore to IStandbyStore

Requested reviews:
  Launchpad code reviewers (launchpad-reviewers)

For more details, see:
https://code.launchpad.net/~cjwatson/launchpad/+git/launchpad/+merge/414253
-- 
Your team Launchpad code reviewers is requested to review the proposed merge of ~cjwatson/launchpad:rename-slave-store into launchpad:master.
diff --git a/cronscripts/librarian-feed-swift.py b/cronscripts/librarian-feed-swift.py
index ae60177..1cb1374 100755
--- a/cronscripts/librarian-feed-swift.py
+++ b/cronscripts/librarian-feed-swift.py
@@ -11,7 +11,7 @@ import os
 
 import six
 
-from lp.services.database.interfaces import ISlaveStore
+from lp.services.database.interfaces import IStandbyStore
 from lp.services.librarian.model import LibraryFileContent
 from lp.services.librarianserver import swift
 from lp.services.scripts.base import LaunchpadCronScript
@@ -75,14 +75,14 @@ class LibrarianFeedSwift(LaunchpadCronScript):
             remove = None
 
         if self.options.start_since:
-            self.options.start = ISlaveStore(LibraryFileContent).execute("""
+            self.options.start = IStandbyStore(LibraryFileContent).execute("""
                 SELECT MAX(id) FROM LibraryFileContent
                 WHERE datecreated < current_timestamp at time zone 'UTC'
                     - CAST(%s AS INTERVAL)
                 """, (six.text_type(self.options.start_since),)).get_one()[0]
 
         if self.options.end_at:
-            self.options.end = ISlaveStore(LibraryFileContent).execute("""
+            self.options.end = IStandbyStore(LibraryFileContent).execute("""
                 SELECT MAX(id) FROM LibraryFileContent
                 WHERE datecreated < current_timestamp at time zone 'UTC'
                     - CAST(%s AS INTERVAL)
diff --git a/cronscripts/translations-export-to-branch.py b/cronscripts/translations-export-to-branch.py
index 308c693..ca5d1dd 100755
--- a/cronscripts/translations-export-to-branch.py
+++ b/cronscripts/translations-export-to-branch.py
@@ -10,7 +10,7 @@ access hosted branches.
 
 Besides committing to branches, the script updates Branch records in the
 database, to let the branch scanner know that the branches' contents
-have been updated.  For the rest, the script talks to the slave store.
+have been updated.  For the rest, the script talks to the standby store.
 """
 
 __all__ = []
diff --git a/lib/lp/app/stories/basics/xx-dbpolicy.txt b/lib/lp/app/stories/basics/xx-dbpolicy.txt
index b0b8e21..c4ffa89 100644
--- a/lib/lp/app/stories/basics/xx-dbpolicy.txt
+++ b/lib/lp/app/stories/basics/xx-dbpolicy.txt
@@ -4,11 +4,11 @@ Application Server Database Policy
 The database policy chooses the default Storm store to used. Its goal
 is to distribute load away from the master databases to read only
 stores where possible. It will benefit old code - new code should
-explicitly select objects from the master or slave stores as needed.
+explicitly select objects from the master or standby stores as needed.
 
-To test this policy, lets point the MAIN SLAVE store to a Launchpad
+To test this policy, lets point the MAIN STANDBY store to a Launchpad
 database with a different name. This makes it easy to check if a
-request is querying the master or slave database.
+request is querying the master or standby database.
 
     >>> from lp.services.config import config
     >>> from textwrap import dedent
@@ -19,21 +19,24 @@ request is querying the master or slave database.
     >>> config.push('empty_standby', config_overlay)
 
     >>> from lp.registry.model.person import Person
-    >>> from lp.services.database.interfaces import IMasterStore, ISlaveStore
+    >>> from lp.services.database.interfaces import (
+    ...     IMasterStore,
+    ...     IStandbyStore,
+    ...     )
     >>> from lp.testing.layers import DatabaseLayer
     >>> master = IMasterStore(Person)
     >>> dbname = DatabaseLayer._db_fixture.dbname
     >>> dbname == master.execute("SELECT current_database()").get_one()[0]
     True
-    >>> slave = ISlaveStore(Person)
-    >>> print(slave.execute("SELECT current_database()").get_one()[0])
+    >>> standby = IStandbyStore(Person)
+    >>> print(standby.execute("SELECT current_database()").get_one()[0])
     launchpad_empty
 
 We should confirm that the empty database is as empty as we hope it is.
 
-    >>> slave_store = ISlaveStore(Person)
+    >>> standby_store = IStandbyStore(Person)
     >>> master_store = IMasterStore(Person)
-    >>> slave_store.find(Person).is_empty()
+    >>> standby_store.find(Person).is_empty()
     True
     >>> master_store.find(Person).is_empty()
     False
@@ -46,16 +49,16 @@ needs to be created externally to this pagetest).
     ...     if dbname == DatabaseLayer._db_fixture.dbname:
     ...         return 'MASTER'
     ...     elif dbname == 'launchpad_empty':
-    ...         return 'SLAVE'
+    ...         return 'STANDBY'
     ...     else:
     ...         return 'UNKNOWN'
 
-Read only requests such as GET and HEAD will use the MAIN SLAVE
+Read only requests such as GET and HEAD will use the MAIN STANDBY
 Store by default.
 
     >>> browser.open('http://launchpad.test/+whichdb')
     >>> print(whichdb(browser))
-    SLAVE
+    STANDBY
 
 POST requests might make updates, so they use the MAIN MASTER
 Store by default.
@@ -66,18 +69,18 @@ Store by default.
 
 This is an unauthenticated browser.  These typically have no session, unless
 special dispensation has been made. Without a session, subsequent requests
-will then immediately return to using the SLAVE.
+will then immediately return to using the STANDBY.
 
     >>> browser.open('http://launchpad.test/+whichdb')
     >>> print(whichdb(browser))
-    SLAVE
+    STANDBY
 
 However, if the request has a session (that is, is authenticated; or is
 unauthenticated, but under special dispensation to have a session), once a
 POST request has been made, further GET and HEAD requests from the same client
 continue to use the MAIN MASTER Store by default for 5 minutes. This ensures
 that a user will see any changes they have made immediately, even though the
-slave databases may lag some time behind the master database.
+standby databases may lag some time behind the master database.
 
     >>> browser.addHeader('Authorization', 'Basic mark@xxxxxxxxxxx:test')
     >>> browser.getControl('Do Post').click() # POST request
@@ -88,19 +91,19 @@ slave databases may lag some time behind the master database.
     MASTER
 
 GET and HEAD requests from other clients are unaffected though
-and use the MAIN SLAVE Store by default.
+and use the MAIN STANDBY Store by default.
 
     >>> anon_browser.open('http://launchpad.test/+whichdb')
     >>> print(whichdb(anon_browser))
-    SLAVE
+    STANDBY
     >>> admin_browser.open('http://launchpad.test/+whichdb')
     >>> print(whichdb(admin_browser))
-    SLAVE
+    STANDBY
 
 If no more POST requests are made for 5 minutes, GET and HEAD
-requests will once again be using the MAIN SLAVE store as we
+requests will once again be using the MAIN STANDBY store as we
 can assume that any changes made to the master database have
-propagated to the slaves.
+propagated to the standbys.
 
 To test this, first we need to wind forward the database policy's clock.
 
@@ -119,18 +122,18 @@ To test this, first we need to wind forward the database policy's clock.
 
     >>> browser.open('http://launchpad.test/+whichdb')
     >>> print(whichdb(browser))
-    SLAVE
+    STANDBY
 
     >>> dbpolicy._now = _original_now # Reset the time machine.
 
 
-When lag gets too bad, we stop using slave databases. This stops
+When lag gets too bad, we stop using standby databases. This stops
 replication oddities from becoming too bad, as well as lightening the load
-on the slaves allowing them to catch up.
+on the standbys allowing them to catch up.
 
     >>> anon_browser.open('http://launchpad.test/+whichdb')
     >>> print(whichdb(anon_browser))
-    SLAVE
+    STANDBY
 
     >>> dbpolicy._test_lag = timedelta(minutes=10)
     >>> anon_browser.open('http://launchpad.test/+whichdb')
@@ -139,7 +142,7 @@ on the slaves allowing them to catch up.
     >>> dbpolicy._test_lag = None
 
 
-A 404 error page is shown when code raises a LookupError. If a slave
+A 404 error page is shown when code raises a LookupError. If a standby
 database is being used, this might have been caused by replication lag
 if the missing data was only recently created. To fix this surprising
 error, requests are always retried using the master database before
@@ -148,12 +151,12 @@ returning a 404 error to the user.
     >>> anon_browser.handleErrors = True
     >>> anon_browser.raiseHttpErrors = False
 
-    # Confirm requests are going to the SLAVE
+    # Confirm requests are going to the STANDBY
     >>> anon_browser.open('http://launchpad.test/+whichdb')
     >>> print(whichdb(anon_browser))
-    SLAVE
+    STANDBY
 
-    # The slave database contains no data, but we don't get
+    # The standby database contains no data, but we don't get
     # a 404 page - the request is retried against the MASTER.
     >>> anon_browser.open('http://launchpad.test/~stub')
     >>> anon_browser.headers['Status']
@@ -165,10 +168,10 @@ returning a 404 error to the user.
     >>> anon_browser.headers['Status']
     '404 Not Found'
 
-    # This session is still using the SLAVE though by default.
+    # This session is still using the STANDBY though by default.
     >>> anon_browser.open('http://launchpad.test/+whichdb')
     >>> print(whichdb(anon_browser))
-    SLAVE
+    STANDBY
 
 Reset our config to avoid affecting other tests.
 
diff --git a/lib/lp/bugs/scripts/bugtasktargetnamecaches.py b/lib/lp/bugs/scripts/bugtasktargetnamecaches.py
index d6cc18d..3d741fc 100644
--- a/lib/lp/bugs/scripts/bugtasktargetnamecaches.py
+++ b/lib/lp/bugs/scripts/bugtasktargetnamecaches.py
@@ -21,7 +21,7 @@ from lp.registry.model.productseries import ProductSeries
 from lp.registry.model.sourcepackagename import SourcePackageName
 from lp.services.database.interfaces import (
     IMasterStore,
-    ISlaveStore,
+    IStandbyStore,
     )
 from lp.services.looptuner import (
     ITunableLoop,
@@ -62,7 +62,7 @@ class BugTaskTargetNameCachesTunableLoop:
         Returns a list of (target, set_of_cached_names) pairs, where target is
         a tuple of IDs from the columns in target_columns.
         """
-        store = ISlaveStore(BugTask)
+        store = IStandbyStore(BugTask)
         candidate_set = store.find(target_columns).config(distinct=True)
         candidates = defaultdict(set)
         for candidate in candidate_set:
diff --git a/lib/lp/buildmaster/model/builder.py b/lib/lp/buildmaster/model/builder.py
index eeef109..1fd92ec 100644
--- a/lib/lp/buildmaster/model/builder.py
+++ b/lib/lp/buildmaster/model/builder.py
@@ -50,7 +50,7 @@ from lp.services.database.constants import UTC_NOW
 from lp.services.database.decoratedresultset import DecoratedResultSet
 from lp.services.database.enumcol import DBEnum
 from lp.services.database.interfaces import (
-    ISlaveStore,
+    IStandbyStore,
     IStore,
     )
 from lp.services.database.stormbase import StormBase
@@ -300,7 +300,7 @@ class BuilderSet:
 
     def getBuildQueueSizes(self):
         """See `IBuilderSet`."""
-        results = ISlaveStore(BuildQueue).find((
+        results = IStandbyStore(BuildQueue).find((
             Count(),
             Sum(BuildQueue.estimated_duration),
             Processor,
diff --git a/lib/lp/code/model/branchcloud.py b/lib/lp/code/model/branchcloud.py
index e6969f3..000a1ad 100644
--- a/lib/lp/code/model/branchcloud.py
+++ b/lib/lp/code/model/branchcloud.py
@@ -29,7 +29,7 @@ from zope.interface import provider
 from lp.code.interfaces.branch import IBranchCloud
 from lp.code.model.revision import RevisionCache
 from lp.registry.model.product import Product
-from lp.services.database.interfaces import ISlaveStore
+from lp.services.database.interfaces import IStandbyStore
 
 
 @provider(IBranchCloud)
@@ -44,8 +44,8 @@ class BranchCloud:
         commits = Alias(Count(RevisionCache.revision_id))
         epoch = datetime.now(pytz.UTC) - timedelta(days=30)
         # It doesn't matter if this query is even a whole day out of date, so
-        # use the slave store.
-        result = ISlaveStore(RevisionCache).find(
+        # use the standby store.
+        result = IStandbyStore(RevisionCache).find(
             (Product.name,
              commits,
              Count(distinct_revision_author),
diff --git a/lib/lp/code/model/tests/test_branchcloud.py b/lib/lp/code/model/tests/test_branchcloud.py
index 4fb7143..2af246f 100644
--- a/lib/lp/code/model/tests/test_branchcloud.py
+++ b/lib/lp/code/model/tests/test_branchcloud.py
@@ -37,8 +37,9 @@ class TestBranchCloud(TestCaseWithFactory):
 
     def getProductsWithInfo(self, num_products=None):
         """Get product cloud information."""
-        # Since we use the slave store to get the information, we need to
-        # commit the transaction to make the information visible to the slave.
+        # Since we use the standby store to get the information, we need to
+        # commit the transaction to make the information visible to the
+        # standby.
         transaction.commit()
         cloud_info = self._branch_cloud.getProductsWithInfo(num_products)
 
diff --git a/lib/lp/registry/scripts/teamparticipation.py b/lib/lp/registry/scripts/teamparticipation.py
index cfca76a..cb15dff 100644
--- a/lib/lp/registry/scripts/teamparticipation.py
+++ b/lib/lp/registry/scripts/teamparticipation.py
@@ -30,7 +30,7 @@ from lp.registry.interfaces.teammembership import ACTIVE_STATES
 from lp.registry.model.teammembership import TeamParticipation
 from lp.services.database.interfaces import (
     IMasterStore,
-    ISlaveStore,
+    IStandbyStore,
     )
 from lp.services.database.sqlbase import (
     quote,
@@ -52,7 +52,7 @@ def check_teamparticipation_circular(log):
            AND tp.person = tp2.team
            AND tp.id != tp2.id;
         """
-    circular_references = list(ISlaveStore(TeamParticipation).execute(query))
+    circular_references = list(IStandbyStore(TeamParticipation).execute(query))
     if len(circular_references) > 0:
         raise LaunchpadScriptFailure(
             "Circular references found: %s" % circular_references)
@@ -93,7 +93,7 @@ def execute_long_query(store, log, interval, query):
 def fetch_team_participation_info(log):
     """Fetch people, teams, memberships and participations."""
     slurp = partial(
-        execute_long_query, ISlaveStore(TeamParticipation), log, 10000)
+        execute_long_query, IStandbyStore(TeamParticipation), log, 10000)
 
     people = dict(
         slurp(
diff --git a/lib/lp/services/database/doc/db-policy.txt b/lib/lp/services/database/doc/db-policy.txt
index 8a1cce2..fbb9aae 100644
--- a/lib/lp/services/database/doc/db-policy.txt
+++ b/lib/lp/services/database/doc/db-policy.txt
@@ -31,8 +31,8 @@ of the primary, the better the overall performance of Launchpad will be.
 We can distribute this load over many standby databases but are limited to
 a single primary.
 
-    >>> from lp.services.database.interfaces import ISlaveStore
-    >>> ro_janitor = ISlaveStore(Person).find(
+    >>> from lp.services.database.interfaces import IStandbyStore
+    >>> ro_janitor = IStandbyStore(Person).find(
     ...     Person, Person.name == 'janitor').one()
     >>> ro_janitor is writable_janitor
     False
diff --git a/lib/lp/services/database/doc/storm-store-reset.txt b/lib/lp/services/database/doc/storm-store-reset.txt
index 8380370..b22a9a6 100644
--- a/lib/lp/services/database/doc/storm-store-reset.txt
+++ b/lib/lp/services/database/doc/storm-store-reset.txt
@@ -28,7 +28,7 @@ we rely on that to find out whether or not to reset stores.
     ...     http = setupBrowser(auth="Basic foo.bar@xxxxxxxxxxxxx:test")
     ...     http.open("http://launchpad.test/~salgado/+edit";)
     ...     http.getControl("Display Name").value = "Changed"
-    ...     # Need a POST or the DB policy will be using the slave.
+    ...     # Need a POST or the DB policy will be using the standby.
     ...     http.getControl("Save Changes").click()
     ...     alive_items = len(IStore(Person)._alive)
 
diff --git a/lib/lp/services/database/doc/storm.txt b/lib/lp/services/database/doc/storm.txt
index 82823a5..2690d05 100644
--- a/lib/lp/services/database/doc/storm.txt
+++ b/lib/lp/services/database/doc/storm.txt
@@ -4,7 +4,7 @@ back into the main replication set as part of login server separation.
 -- StuartBishop 20100222
 
 In addition to what Storm provides, we also have some Launchpad
-specific Storm tools to cope with our master and slave store arrangement.
+specific Storm tools to cope with our master and standby store arrangement.
 
     >>> from lp.services.identity.interfaces.emailaddress import (
     ...     EmailAddressStatus,
@@ -13,7 +13,7 @@ specific Storm tools to cope with our master and slave store arrangement.
     >>> from lp.services.database.interfaces import (
     ...     IMasterObject,
     ...     IMasterStore,
-    ...     ISlaveStore,
+    ...     IStandbyStore,
     ...     IStore,
     ...     )
     >>> from lp.services.identity.model.emailaddress import EmailAddress
@@ -33,33 +33,33 @@ provides.
 
     >>> IMasterStore.providedBy(main_master)
     True
-    >>> ISlaveStore.providedBy(main_master)
+    >>> IStandbyStore.providedBy(main_master)
     False
 
 
-Changes to the slave Stores will lag behind the master Stores. If
+Changes to the standby Stores will lag behind the master Stores. If
 you only need to read an object but require it to be in sync with the
 master, you should use the default Store. Launchpad will give you the
-slave store if it is sure all your recent changes have been replicated.
+standby store if it is sure all your recent changes have been replicated.
 Otherwise, it gives you the master. See IStoreSelector for details.
 
     >>> main_default = IStore(Person)
-    >>> main_slave = ISlaveStore(Person)
+    >>> main_standby = IStandbyStore(Person)
     >>> main_default is main_master
     True
-    >>> main_default is main_slave
+    >>> main_default is main_standby
     False
 
 
 You can also adapt database object instances to Stores, although
 this is less generally useful.
 
-    >>> janitor = ISlaveStore(Person).find(Person, name='janitor').one()
-    >>> ISlaveStore(janitor) is ISlaveStore(Person)
+    >>> janitor = IStandbyStore(Person).find(Person, name='janitor').one()
+    >>> IStandbyStore(janitor) is IStandbyStore(Person)
     True
     >>> IMasterStore(janitor) is IMasterStore(Person)
     True
-    >>> IMasterStore(janitor) is ISlaveStore(Person)
+    >>> IMasterStore(janitor) is IStandbyStore(Person)
     False
 
 
@@ -68,9 +68,9 @@ Good defensive programming is to use this adapter if you want to make
 changes to an object, just in case you have been passed an instance
 from a store other than the correct Master.
 
-    >>> main_slave = ISlaveStore(Person)
+    >>> main_standby = IStandbyStore(Person)
     >>> t = transaction.begin()
-    >>> person = main_slave.find(Person, name='mark').one()
+    >>> person = main_standby.find(Person, name='mark').one()
     >>> person.display_name = 'Cannot change'
     >>> transaction.commit()
     Traceback (most recent call last):
@@ -79,7 +79,7 @@ from a store other than the correct Master.
 
     >>> transaction.abort()
     >>> t = transaction.begin()
-    >>> person = main_slave.find(Person, name='mark').one()
+    >>> person = main_standby.find(Person, name='mark').one()
     >>> IMasterObject(person).display_name = 'Can change'
     >>> transaction.commit()
 
@@ -121,14 +121,14 @@ stores.
     >>> master_email = IMasterStore(EmailAddress).find(
     ...     EmailAddress, Person.name == 'janitor',
     ...     EmailAddress.person==Person.id).one()
-    >>> slave_email = ISlaveStore(EmailAddress).find(
+    >>> standby_email = IStandbyStore(EmailAddress).find(
     ...     EmailAddress, Person.name == 'janitor',
     ...     EmailAddress.person==Person.id).one()
-    >>> master_email is slave_email
+    >>> master_email is standby_email
     False
-    >>> master_email == slave_email
+    >>> master_email == standby_email
     True
-    >>> master_email != slave_email
+    >>> master_email != standby_email
     False
 
 Comparison works for security wrapped objects too.
diff --git a/lib/lp/services/database/interfaces.py b/lib/lp/services/database/interfaces.py
index 5e89515..aae6350 100644
--- a/lib/lp/services/database/interfaces.py
+++ b/lib/lp/services/database/interfaces.py
@@ -9,8 +9,8 @@ __all__ = [
     'IMasterObject',
     'IMasterStore',
     'IRequestExpired',
-    'ISlaveStore',
     'ISQLBase',
+    'IStandbyStore',
     'IStore',
     'IStoreSelector',
     'MAIN_STORE',
@@ -153,7 +153,7 @@ class IMasterStore(IStore):
     """A writeable Storm Stores."""
 
 
-class ISlaveStore(IStore):
+class IStandbyStore(IStore):
     """A read-only Storm Store."""
 
 
diff --git a/lib/lp/services/database/policy.py b/lib/lp/services/database/policy.py
index a50fe3d..8d8f4e3 100644
--- a/lib/lp/services/database/policy.py
+++ b/lib/lp/services/database/policy.py
@@ -44,7 +44,7 @@ from lp.services.database.interfaces import (
     DisallowedStore,
     IDatabasePolicy,
     IMasterStore,
-    ISlaveStore,
+    IStandbyStore,
     IStoreSelector,
     MAIN_STORE,
     PRIMARY_FLAVOR,
@@ -158,7 +158,7 @@ class BaseDatabasePolicy:
             if flavor == PRIMARY_FLAVOR:
                 alsoProvides(store, IMasterStore)
             else:
-                alsoProvides(store, ISlaveStore)
+                alsoProvides(store, IStandbyStore)
 
             store._lp_store_initialized = True
 
diff --git a/lib/lp/services/database/tests/test_bulk.py b/lib/lp/services/database/tests/test_bulk.py
index a90d11e..78981ae 100644
--- a/lib/lp/services/database/tests/test_bulk.py
+++ b/lib/lp/services/database/tests/test_bulk.py
@@ -30,7 +30,7 @@ from lp.registry.model.person import Person
 from lp.services.database import bulk
 from lp.services.database.interfaces import (
     IMasterStore,
-    ISlaveStore,
+    IStandbyStore,
     IStore,
     )
 from lp.services.database.sqlbase import (
@@ -131,13 +131,13 @@ class TestLoaders(TestCaseWithFactory):
         db_object = self.factory.makeComponent()
         db_object_type = bulk.get_type(db_object)
         # Commit so the database object is available in both master
-        # and slave stores.
+        # and standby stores.
         transaction.commit()
         # Use a list, since objects corresponding to the same DB row from
         # different stores compare equal.
         db_objects = [
             IMasterStore(db_object).get(db_object_type, db_object.id),
-            ISlaveStore(db_object).get(db_object_type, db_object.id),
+            IStandbyStore(db_object).get(db_object_type, db_object.id),
             ]
         db_object_ids = {id(obj) for obj in db_objects}
         db_queries = list(bulk.gen_reload_queries(db_objects))
@@ -234,7 +234,7 @@ class TestLoaders(TestCaseWithFactory):
         # load() can use an alternative store.
         db_object = self.factory.makeComponent()
         # Commit so the database object is available in both master
-        # and slave stores.
+        # and standby stores.
         transaction.commit()
         # Master store.
         master_store = IMasterStore(db_object)
@@ -242,12 +242,12 @@ class TestLoaders(TestCaseWithFactory):
             Component, [db_object.id], store=master_store)
         self.assertEqual(
             Store.of(db_object_from_master), master_store)
-        # Slave store.
-        slave_store = ISlaveStore(db_object)
-        [db_object_from_slave] = bulk.load(
-            Component, [db_object.id], store=slave_store)
+        # Standby store.
+        standby_store = IStandbyStore(db_object)
+        [db_object_from_standby] = bulk.load(
+            Component, [db_object.id], store=standby_store)
         self.assertEqual(
-            Store.of(db_object_from_slave), slave_store)
+            Store.of(db_object_from_standby), standby_store)
 
     def test_load_related(self):
         owning_objects = [
diff --git a/lib/lp/services/database/transaction_policy.py b/lib/lp/services/database/transaction_policy.py
index bb1b403..8766894 100644
--- a/lib/lp/services/database/transaction_policy.py
+++ b/lib/lp/services/database/transaction_policy.py
@@ -24,7 +24,7 @@ class DatabaseTransactionPolicy:
 
         # We want to be sure that inspect_data does not inadvertently
         # make any changes in the database, but we can't run it on the
-        # slave store because it doesn't tolerate replication lag.
+        # standby store because it doesn't tolerate replication lag.
         with DatabaseTransactionPolicy(read_only=True):
             inspect_data()
 
@@ -69,7 +69,7 @@ class DatabaseTransactionPolicy:
         writability of database transactions.
 
         :param store: The store to set policy on.  Defaults to the main master
-            store.  You don't want to use this on a slave store!
+            store.  You don't want to use this on a standby store!
         :param read_only: Is this policy read-only?
         """
         self.read_only = read_only
diff --git a/lib/lp/services/librarian/tests/test_client.py b/lib/lp/services/librarian/tests/test_client.py
index cb054a9..6bef538 100644
--- a/lib/lp/services/librarian/tests/test_client.py
+++ b/lib/lp/services/librarian/tests/test_client.py
@@ -25,7 +25,7 @@ import transaction
 
 from lp.services.config import config
 from lp.services.daemons.tachandler import TacTestSetup
-from lp.services.database.interfaces import ISlaveStore
+from lp.services.database.interfaces import IStandbyStore
 from lp.services.database.policy import StandbyDatabasePolicy
 from lp.services.database.sqlbase import block_implicit_flushes
 from lp.services.librarian import client as client_module
@@ -299,7 +299,7 @@ class LibrarianClientTestCase(TestCase):
         # standby store and try to add a file, verifying that the primary
         # is used.
         client = LibrarianClient()
-        ISlaveStore(LibraryFileAlias).close()
+        IStandbyStore(LibraryFileAlias).close()
         with StandbyDatabasePolicy():
             alias_id = client.addFile(
                 'sample.txt', 6, io.BytesIO(b'sample'), 'text/plain')
diff --git a/lib/lp/services/librarianserver/swift.py b/lib/lp/services/librarianserver/swift.py
index 637110f..ddb3125 100644
--- a/lib/lp/services/librarianserver/swift.py
+++ b/lib/lp/services/librarianserver/swift.py
@@ -24,7 +24,7 @@ from six.moves.urllib.parse import quote
 from swiftclient import client as swiftclient
 
 from lp.services.config import config
-from lp.services.database.interfaces import ISlaveStore
+from lp.services.database.interfaces import IStandbyStore
 from lp.services.librarian.model import LibraryFileContent
 
 
@@ -143,7 +143,7 @@ def to_swift(log, start_lfc_id=None, end_lfc_id=None,
 
             log.debug('Found {} ({})'.format(lfc, filename))
 
-            if ISlaveStore(LibraryFileContent).get(
+            if IStandbyStore(LibraryFileContent).get(
                     LibraryFileContent, lfc) is None:
                 log.info("{} exists on disk but not in the db".format(
                     lfc))
@@ -205,7 +205,7 @@ def _put(log, swift_connection, lfc_id, container, obj_name, fs_path):
     fs_size = os.path.getsize(fs_path)
     fs_file = HashStream(open(fs_path, 'rb'))
 
-    db_md5_hash = ISlaveStore(LibraryFileContent).get(
+    db_md5_hash = IStandbyStore(LibraryFileContent).get(
         LibraryFileContent, lfc_id).md5
 
     assert hasattr(fs_file, 'tell') and hasattr(fs_file, 'seek'), '''
diff --git a/lib/lp/services/oauth/model.py b/lib/lp/services/oauth/model.py
index 3b48de6..2c07b19 100644
--- a/lib/lp/services/oauth/model.py
+++ b/lib/lp/services/oauth/model.py
@@ -68,7 +68,7 @@ class OAuthBase:
         """Return the correct store for this class.
 
         We want all OAuth classes to be retrieved from the master flavour.  If
-        they are retrieved from the slave, there will be problems in the
+        they are retrieved from the standby, there will be problems in the
         authorization exchange, since it will be done across applications that
         won't share the session cookies.
         """
diff --git a/lib/lp/services/session/adapters.py b/lib/lp/services/session/adapters.py
index 1cc2767..e2bc15b 100644
--- a/lib/lp/services/session/adapters.py
+++ b/lib/lp/services/session/adapters.py
@@ -11,7 +11,7 @@ from zope.interface import implementer
 
 from lp.services.database.interfaces import (
     IMasterStore,
-    ISlaveStore,
+    IStandbyStore,
     IStore,
     )
 from lp.services.database.sqlbase import session_store
@@ -26,9 +26,9 @@ def session_master_store(cls):
 
 
 @adapter(IUseSessionStore)
-@implementer(ISlaveStore)
-def session_slave_store(cls):
-    """Adapt a Session database object to an `ISlaveStore`."""
+@implementer(IStandbyStore)
+def session_standby_store(cls):
+    """Adapt a Session database object to an `IStandbyStore`."""
     return session_store()
 
 
diff --git a/lib/lp/services/session/configure.zcml b/lib/lp/services/session/configure.zcml
index dea1cd8..8c3c399 100644
--- a/lib/lp/services/session/configure.zcml
+++ b/lib/lp/services/session/configure.zcml
@@ -7,6 +7,6 @@
     xmlns:i18n="http://namespaces.zope.org/i18n";
     i18n_domain="launchpad">
     <adapter factory=".adapters.session_master_store" />
-    <adapter factory=".adapters.session_slave_store" />
+    <adapter factory=".adapters.session_standby_store" />
     <adapter factory=".adapters.session_default_store" />
 </configure>
diff --git a/lib/lp/services/session/tests/test_session.py b/lib/lp/services/session/tests/test_session.py
index 6f5c1c0..10cab73 100644
--- a/lib/lp/services/session/tests/test_session.py
+++ b/lib/lp/services/session/tests/test_session.py
@@ -5,7 +5,7 @@
 
 from lp.services.database.interfaces import (
     IMasterStore,
-    ISlaveStore,
+    IStandbyStore,
     IStore,
     )
 from lp.services.session.model import (
@@ -20,7 +20,7 @@ class TestSessionModelAdapters(TestCase):
     layer = DatabaseFunctionalLayer
 
     def test_adapters(self):
-        for adapter in [IMasterStore, ISlaveStore, IStore]:
+        for adapter in [IMasterStore, IStandbyStore, IStore]:
             for cls in [SessionData, SessionPkgData]:
                 for obj in [cls, cls()]:
                     store = adapter(obj)
diff --git a/lib/lp/services/webapp/adapter.py b/lib/lp/services/webapp/adapter.py
index 26b4b6d..da15bfa 100644
--- a/lib/lp/services/webapp/adapter.py
+++ b/lib/lp/services/webapp/adapter.py
@@ -750,7 +750,7 @@ class StoreSelector:
 
 
 # We want to be able to adapt a Storm class to an IStore, IMasterStore or
-# ISlaveStore. Unfortunately, the component architecture provides no
+# IStandbyStore. Unfortunately, the component architecture provides no
 # way for us to declare that a class, and all its subclasses, provides
 # a given interface. This means we need to use an global adapter.
 
@@ -768,8 +768,8 @@ def get_master_store(storm_class):
     return get_store(storm_class, PRIMARY_FLAVOR)
 
 
-def get_slave_store(storm_class):
-    """Return the master Store for the given database class."""
+def get_standby_store(storm_class):
+    """Return the standby Store for the given database class."""
     return get_store(storm_class, STANDBY_FLAVOR)
 
 
diff --git a/lib/lp/services/webapp/batching.py b/lib/lp/services/webapp/batching.py
index 894e6a6..b4c3b2b 100644
--- a/lib/lp/services/webapp/batching.py
+++ b/lib/lp/services/webapp/batching.py
@@ -36,7 +36,7 @@ from zope.security.proxy import (
 from lp.app.browser.launchpad import iter_view_registrations
 from lp.services.config import config
 from lp.services.database.decoratedresultset import DecoratedResultSet
-from lp.services.database.interfaces import ISlaveStore
+from lp.services.database.interfaces import IStandbyStore
 from lp.services.database.sqlbase import (
     convert_storm_clause_to_string,
     sqlvalues,
@@ -621,7 +621,7 @@ class StormRangeFactory:
         select = removeSecurityProxy(self.plain_resultset).get_select_expr(
             *columns)
         explain = 'EXPLAIN ' + convert_storm_clause_to_string(select)
-        result = ISlaveStore(LibraryFileAlias).execute(explain)
+        result = IStandbyStore(LibraryFileAlias).execute(explain)
         _rows_re = re.compile(r"rows=(\d+)\swidth=")
         first_line = result.get_one()[0]
         match = _rows_re.search(first_line)
diff --git a/lib/lp/services/webapp/database.zcml b/lib/lp/services/webapp/database.zcml
index 2d9fcd1..c283ee1 100644
--- a/lib/lp/services/webapp/database.zcml
+++ b/lib/lp/services/webapp/database.zcml
@@ -46,9 +46,9 @@
         factory="lp.services.webapp.adapter.get_master_store"
         />
     <adapter
-        provides="lp.services.database.interfaces.ISlaveStore"
+        provides="lp.services.database.interfaces.IStandbyStore"
         for="zope.interface.Interface"
-        factory="lp.services.webapp.adapter.get_slave_store"
+        factory="lp.services.webapp.adapter.get_standby_store"
         />
     <!-- Universal adapter needed here per Bug #591841.
          We have no way of specifying that all subclasses of
diff --git a/lib/lp/services/webapp/doc/test_adapter_permissions.txt b/lib/lp/services/webapp/doc/test_adapter_permissions.txt
index 2ae057f..35d35e3 100644
--- a/lib/lp/services/webapp/doc/test_adapter_permissions.txt
+++ b/lib/lp/services/webapp/doc/test_adapter_permissions.txt
@@ -5,7 +5,7 @@ traversed to from a PRIMARY_FLAVOR store.
 Because our development environment is not replicated, we use database
 permissions to ensure that tables we should not be writing to cannot
 be written to. The same permissions structure is also used on production,
-so the Slony-I triggers blocking writes to some tables will never
+so the Slony-I triggers blocking writes to replicated tables will never
 actually be invoked.
 
     >>> from lp.registry.model.person import Person
diff --git a/lib/lp/services/webapp/publication.py b/lib/lp/services/webapp/publication.py
index 31079d6..80effc1 100644
--- a/lib/lp/services/webapp/publication.py
+++ b/lib/lp/services/webapp/publication.py
@@ -703,7 +703,7 @@ class LaunchpadBrowserPublication(
                 return False
 
             # If we get a LookupError and the default database being
-            # used is a replica, raise a Retry exception instead of
+            # used is a standby, raise a Retry exception instead of
             # returning the 404 error page. We do this in case the
             # LookupError is caused by replication lag. Our database
             # policy forces the use of the primary database for retries.
diff --git a/lib/lp/services/webapp/tests/test_dbpolicy.py b/lib/lp/services/webapp/tests/test_dbpolicy.py
index 8855268..a2a8120 100644
--- a/lib/lp/services/webapp/tests/test_dbpolicy.py
+++ b/lib/lp/services/webapp/tests/test_dbpolicy.py
@@ -36,7 +36,7 @@ from lp.services.database.interfaces import (
     DisallowedStore,
     IDatabasePolicy,
     IMasterStore,
-    ISlaveStore,
+    IStandbyStore,
     IStoreSelector,
     MAIN_STORE,
     PRIMARY_FLAVOR,
@@ -110,7 +110,7 @@ class StandbyDatabasePolicyTestCase(BaseDatabasePolicyTestCase):
         for store in ALL_STORES:
             self.assertProvides(
                 getUtility(IStoreSelector).get(store, DEFAULT_FLAVOR),
-                ISlaveStore)
+                IStandbyStore)
 
     def test_primary_allowed(self):
         for store in ALL_STORES:
@@ -157,7 +157,7 @@ class PrimaryDatabasePolicyTestCase(BaseDatabasePolicyTestCase):
         for store in ALL_STORES:
             self.assertProvides(
                 getUtility(IStoreSelector).get(store, STANDBY_FLAVOR),
-                ISlaveStore)
+                IStandbyStore)
 
 
 class LaunchpadDatabasePolicyTestCase(StandbyDatabasePolicyTestCase):
@@ -268,56 +268,56 @@ class PrimaryFallbackTestCase(TestCase):
         '''Confirm that this TestCase's test infrastructure works as needed.
         '''
         master_store = IMasterStore(Person)
-        slave_store = ISlaveStore(Person)
+        standby_store = IStandbyStore(Person)
 
         # Both Stores work when pgbouncer is up.
         master_store.get(Person, 1)
-        slave_store.get(Person, 1)
+        standby_store.get(Person, 1)
 
-        # Slave Store breaks when pgbouncer is torn down. Master Store
+        # Standby Store breaks when pgbouncer is torn down. Master Store
         # is fine.
         self.pgbouncer_fixture.stop()
         master_store.get(Person, 2)
-        self.assertRaises(DisconnectionError, slave_store.get, Person, 2)
+        self.assertRaises(DisconnectionError, standby_store.get, Person, 2)
 
     def test_startup_with_no_standby(self):
         '''An attempt is made for the first time to connect to a standby.'''
         self.pgbouncer_fixture.stop()
 
         master_store = IMasterStore(Person)
-        slave_store = ISlaveStore(Person)
+        standby_store = IStandbyStore(Person)
 
-        # The master and slave Stores are the same object.
-        self.assertIs(master_store, slave_store)
+        # The master and standby Stores are the same object.
+        self.assertIs(master_store, standby_store)
 
     def test_standby_shutdown_during_transaction(self):
         '''Standby is shutdown while running, but we can recover.'''
         master_store = IMasterStore(Person)
-        slave_store = ISlaveStore(Person)
+        standby_store = IStandbyStore(Person)
 
-        self.assertIsNot(master_store, slave_store)
+        self.assertIsNot(master_store, standby_store)
 
         self.pgbouncer_fixture.stop()
 
-        # The transaction fails if the slave store is used. Robust
+        # The transaction fails if the standby store is used. Robust
         # processes will handle this and retry (even if just means exit
         # and wait for the next scheduled invocation).
-        self.assertRaises(DisconnectionError, slave_store.get, Person, 1)
+        self.assertRaises(DisconnectionError, standby_store.get, Person, 1)
 
         transaction.abort()
 
         # But in the next transaction, we get the master Store if we ask
-        # for the slave Store so we can continue.
+        # for the standby Store so we can continue.
         master_store = IMasterStore(Person)
-        slave_store = ISlaveStore(Person)
+        standby_store = IStandbyStore(Person)
 
-        self.assertIs(master_store, slave_store)
+        self.assertIs(master_store, standby_store)
 
     def test_standby_shutdown_between_transactions(self):
         '''Standby is shutdown in between transactions.'''
         master_store = IMasterStore(Person)
-        slave_store = ISlaveStore(Person)
-        self.assertIsNot(master_store, slave_store)
+        standby_store = IStandbyStore(Person)
+        self.assertIsNot(master_store, standby_store)
 
         transaction.abort()
         self.pgbouncer_fixture.stop()
@@ -325,31 +325,31 @@ class PrimaryFallbackTestCase(TestCase):
         # The process doesn't notice the standby going down, and things
         # will fail the next time the standby is used.
         master_store = IMasterStore(Person)
-        slave_store = ISlaveStore(Person)
-        self.assertIsNot(master_store, slave_store)
-        self.assertRaises(DisconnectionError, slave_store.get, Person, 1)
+        standby_store = IStandbyStore(Person)
+        self.assertIsNot(master_store, standby_store)
+        self.assertRaises(DisconnectionError, standby_store.get, Person, 1)
 
         # But now it has been discovered the socket is no longer
         # connected to anything, next transaction we get a master
-        # Store when we ask for a slave.
+        # Store when we ask for a standby.
         master_store = IMasterStore(Person)
-        slave_store = ISlaveStore(Person)
-        self.assertIs(master_store, slave_store)
+        standby_store = IStandbyStore(Person)
+        self.assertIs(master_store, standby_store)
 
     def test_standby_reconnect_after_outage(self):
         '''The standby is again used once it becomes available.'''
         self.pgbouncer_fixture.stop()
 
         master_store = IMasterStore(Person)
-        slave_store = ISlaveStore(Person)
-        self.assertIs(master_store, slave_store)
+        standby_store = IStandbyStore(Person)
+        self.assertIs(master_store, standby_store)
 
         self.pgbouncer_fixture.start()
         transaction.abort()
 
         master_store = IMasterStore(Person)
-        slave_store = ISlaveStore(Person)
-        self.assertIsNot(master_store, slave_store)
+        standby_store = IStandbyStore(Person)
+        self.assertIsNot(master_store, standby_store)
 
 
 class TestFastDowntimeRollout(TestCase):
@@ -401,7 +401,7 @@ class TestFastDowntimeRollout(TestCase):
         '''You can always access a working standby store during fast downtime.
         '''
         # Everything is running happily.
-        store = ISlaveStore(Person)
+        store = IStandbyStore(Person)
         original_store = store
         self.assertTrue(self.store_is_working(store))
         self.assertTrue(self.store_is_standby(store))
@@ -437,9 +437,9 @@ class TestFastDowntimeRollout(TestCase):
         # But if we handle that and retry, we can continue.
         # Now the failed connection has been detected, the next Store
         # we are handed is a primary Store instead of a standby.
-        store = ISlaveStore(Person)
+        store = IStandbyStore(Person)
         self.assertTrue(self.store_is_primary(store))
-        self.assertIsNot(ISlaveStore(Person), original_store)
+        self.assertIsNot(IStandbyStore(Person), original_store)
 
         # But alas, it might not work the first transaction. If it has
         # been earlier, its connection was killed by pgbouncer earlier
@@ -449,7 +449,7 @@ class TestFastDowntimeRollout(TestCase):
 
         # Next retry attempt, everything is fine using the primary
         # connection, even though our code only asked for a standby.
-        store = ISlaveStore(Person)
+        store = IStandbyStore(Person)
         self.assertTrue(self.store_is_primary(store))
         self.assertTrue(self.store_is_working(store))
 
@@ -464,7 +464,7 @@ class TestFastDowntimeRollout(TestCase):
         self.pgbouncer_cur.execute('ENABLE %s' % self.standby_dbname)
 
         # And next transaction, we are back to normal.
-        store = ISlaveStore(Person)
+        store = IStandbyStore(Person)
         self.assertTrue(self.store_is_working(store))
         self.assertTrue(self.store_is_standby(store))
         self.assertIs(original_store, store)
@@ -477,9 +477,9 @@ class TestFastDowntimeRollout(TestCase):
         self.assertTrue(self.store_is_primary(master_store))
         self.assertTrue(self.store_is_working(master_store))
 
-        slave_store = ISlaveStore(Person)
-        self.assertTrue(self.store_is_standby(slave_store))
-        self.assertTrue(self.store_is_working(slave_store))
+        standby_store = IStandbyStore(Person)
+        self.assertTrue(self.store_is_standby(standby_store))
+        self.assertTrue(self.store_is_working(standby_store))
 
         # But fast downtime is about to happen.
 
@@ -492,7 +492,7 @@ class TestFastDowntimeRollout(TestCase):
         self.pgbouncer_cur.execute('KILL %s' % self.primary_dbname)
 
         # Of course, standby connections are unaffected.
-        self.assertTrue(self.store_is_working(slave_store))
+        self.assertTrue(self.store_is_working(standby_store))
 
         # But attempts to use a primary store will fail.
         self.assertFalse(self.store_is_working(master_store))
@@ -515,18 +515,18 @@ class TestFastDowntimeRollout(TestCase):
 
         # The next attempt at accessing the standby store will fail
         # with a DisconnectionError.
-        slave_store = ISlaveStore(Person)
-        self.assertTrue(self.store_is_standby(slave_store))
+        standby_store = IStandbyStore(Person)
+        self.assertTrue(self.store_is_standby(standby_store))
         self.assertRaises(
-            DisconnectionError, slave_store.execute, 'SELECT TRUE')
+            DisconnectionError, standby_store.execute, 'SELECT TRUE')
         transaction.abort()
 
         # But if we handle that and retry, we can continue.
         # Now the failed connection has been detected, the next Store
         # we are handed is a primary Store instead of a standby.
-        slave_store = ISlaveStore(Person)
-        self.assertTrue(self.store_is_primary(slave_store))
-        self.assertTrue(self.store_is_working(slave_store))
+        standby_store = IStandbyStore(Person)
+        self.assertTrue(self.store_is_primary(standby_store))
+        self.assertTrue(self.store_is_working(standby_store))
 
         # Once replication has caught up, the standby is reenabled.
         self.pgbouncer_cur.execute('RESUME %s' % self.standby_dbname)
@@ -538,6 +538,6 @@ class TestFastDowntimeRollout(TestCase):
         self.assertTrue(self.store_is_primary(master_store))
         self.assertTrue(self.store_is_working(master_store))
 
-        slave_store = ISlaveStore(Person)
-        self.assertTrue(self.store_is_standby(slave_store))
-        self.assertTrue(self.store_is_working(slave_store))
+        standby_store = IStandbyStore(Person)
+        self.assertTrue(self.store_is_standby(standby_store))
+        self.assertTrue(self.store_is_working(standby_store))
diff --git a/lib/lp/services/worlddata/model/language.py b/lib/lp/services/worlddata/model/language.py
index aaff902..d4af9d6 100644
--- a/lib/lp/services/worlddata/model/language.py
+++ b/lib/lp/services/worlddata/model/language.py
@@ -26,7 +26,7 @@ from lp.registry.model.karma import (
 from lp.services.database.decoratedresultset import DecoratedResultSet
 from lp.services.database.enumcol import DBEnum
 from lp.services.database.interfaces import (
-    ISlaveStore,
+    IStandbyStore,
     IStore,
     )
 from lp.services.database.sqlbase import SQLBase
@@ -292,7 +292,7 @@ class LanguageSet:
         """See `ILanguageSet`."""
         if text:
             text = six.ensure_text(text).lower()
-            results = ISlaveStore(Language).find(
+            results = IStandbyStore(Language).find(
                 Language, Or(
                     Language.code.lower().contains_string(text),
                     Language.englishname.lower().contains_string(
diff --git a/lib/lp/soyuz/model/archive.py b/lib/lp/soyuz/model/archive.py
index b3c7798..59312ea 100644
--- a/lib/lp/soyuz/model/archive.py
+++ b/lib/lp/soyuz/model/archive.py
@@ -105,7 +105,7 @@ from lp.services.database.datetimecol import UtcDateTimeCol
 from lp.services.database.decoratedresultset import DecoratedResultSet
 from lp.services.database.enumcol import DBEnum
 from lp.services.database.interfaces import (
-    ISlaveStore,
+    IStandbyStore,
     IStore,
     )
 from lp.services.database.sqlbase import (
@@ -1097,7 +1097,7 @@ class Archive(SQLBase):
             Component.name.is_in(components))
             for (archive, not_used, pocket, components) in deps])
 
-        return ISlaveStore(BinaryPackagePublishingHistory).find(
+        return IStandbyStore(BinaryPackagePublishingHistory).find(
             BinaryPackagePublishingHistory,
             BinaryPackageName.name == dep_name,
             BinaryPackagePublishingHistory.binarypackagename ==
diff --git a/lib/lp/soyuz/tests/test_archive.py b/lib/lp/soyuz/tests/test_archive.py
index 9e7be9a..cbc40ac 100644
--- a/lib/lp/soyuz/tests/test_archive.py
+++ b/lib/lp/soyuz/tests/test_archive.py
@@ -1901,7 +1901,7 @@ class TestFindDepCandidates(TestCaseWithFactory):
         archive, and compares it to the given expected value.
         The archive defaults to self.archive.
 
-        Also commits, since findDepCandidates uses the slave store.
+        Also commits, since findDepCandidates uses the standby store.
         """
         transaction.commit()
 
diff --git a/lib/lp/testing/factory.py b/lib/lp/testing/factory.py
index 71265f7..1588aad 100644
--- a/lib/lp/testing/factory.py
+++ b/lib/lp/testing/factory.py
@@ -397,7 +397,7 @@ def default_master_store(func):
 
     In some cases, such as in the middle of a page test story,
     we might be calling factory methods with the default Store set
-    to the slave which breaks stuff. For instance, if we set an account's
+    to the standby which breaks stuff. For instance, if we set an account's
     password that needs to happen on the master store and this is forced.
     However, if we then read it back the default Store has to be used.
     """
diff --git a/lib/lp/translations/doc/poexport-request.txt b/lib/lp/translations/doc/poexport-request.txt
index ba2b666..18d685d 100644
--- a/lib/lp/translations/doc/poexport-request.txt
+++ b/lib/lp/translations/doc/poexport-request.txt
@@ -33,7 +33,7 @@ Our user requests the Catalan and Czech translations of a template.
 
 Now we request that the queue be processed.
 
-(Commits are needed to make the test requests seep through to the slave
+(Commits are needed to make the test requests seep through to the standby
 database).
 
     >>> import transaction
diff --git a/lib/lp/translations/doc/poexportqueue-replication-lag.txt b/lib/lp/translations/doc/poexportqueue-replication-lag.txt
index 409fb71..e737cfc 100644
--- a/lib/lp/translations/doc/poexportqueue-replication-lag.txt
+++ b/lib/lp/translations/doc/poexportqueue-replication-lag.txt
@@ -2,7 +2,7 @@ Replication Lag and the Export Queue
 ====================================
 
 Due to replication lag it's possible for the export queue to see a
-request on the slave store that it actually just removed from the master
+request on the standby store that it actually just removed from the master
 store.
 
 We start our story with an empty export queue.
@@ -71,7 +71,7 @@ were to ask again.
     ja
 
 The first request is removed from the master store after processing, but
-not yet from the slave store.  (Since this test is all one session, we
+not yet from the standby store.  (Since this test is all one session, we
 can reproduce this by not committing the removal).  The second request
 is still technically on the queue, but no longer "live."
 
diff --git a/lib/lp/translations/doc/potmsgset.txt b/lib/lp/translations/doc/potmsgset.txt
index a7a8d79..da86a39 100644
--- a/lib/lp/translations/doc/potmsgset.txt
+++ b/lib/lp/translations/doc/potmsgset.txt
@@ -4,7 +4,7 @@ POTMsgSet tests
 POTMsgSet represents messages to translate that a POTemplate file has.
 
 In this test we'll be committing a lot to let changes replicate to the
-slave database.
+standby database.
 
     >>> import transaction
 
diff --git a/lib/lp/translations/interfaces/poexportrequest.py b/lib/lp/translations/interfaces/poexportrequest.py
index de4ad9b..2d0968b 100644
--- a/lib/lp/translations/interfaces/poexportrequest.py
+++ b/lib/lp/translations/interfaces/poexportrequest.py
@@ -47,7 +47,7 @@ class IPOExportRequestSet(Interface):
          * The requested `TranslationFileFormat`.
          * The list of request record ids making up this request.
 
-        The objects are all read-only objects from the slave store.  The
+        The objects are all read-only objects from the standby store.  The
         request ids list should be passed to `removeRequest` when
         processing of the request completes.
         """
diff --git a/lib/lp/translations/interfaces/potmsgset.py b/lib/lp/translations/interfaces/potmsgset.py
index 4af6b3d..3390f5f 100644
--- a/lib/lp/translations/interfaces/potmsgset.py
+++ b/lib/lp/translations/interfaces/potmsgset.py
@@ -172,7 +172,7 @@ class IPOTMsgSet(Interface):
         `POTMsgSet` that are actually used (i.e. current or imported) in
         other templates.
 
-        The suggestions are read-only; they come from the slave store.
+        The suggestions are read-only; they come from the standby store.
 
         :param language: language we want translations for.
         """
@@ -184,7 +184,7 @@ class IPOTMsgSet(Interface):
         `POTMsgSet` that were entered in another context, but for the
         same English text, and are not in actual use.
 
-        The suggestions are read-only; they come from the slave store.
+        The suggestions are read-only; they come from the standby store.
 
         :param language: language we want translations for.
         """
diff --git a/lib/lp/translations/model/poexportrequest.py b/lib/lp/translations/model/poexportrequest.py
index dc9b7d9..c532086 100644
--- a/lib/lp/translations/model/poexportrequest.py
+++ b/lib/lp/translations/model/poexportrequest.py
@@ -20,7 +20,7 @@ from lp.services.database.constants import DEFAULT
 from lp.services.database.enumcol import DBEnum
 from lp.services.database.interfaces import (
     IMasterStore,
-    ISlaveStore,
+    IStandbyStore,
     IStore,
     )
 from lp.services.database.sqlbase import quote
@@ -121,7 +121,7 @@ class POExportRequestSet:
         """Return the oldest live request on the master store.
 
         Due to replication lag, the master store is always a little
-        ahead of the slave store that exports come from.
+        ahead of the standby store that exports come from.
         """
         master_store = IMasterStore(POExportRequest)
         sorted_by_id = master_store.find(POExportRequest).order_by(
@@ -130,7 +130,7 @@ class POExportRequestSet:
 
     def _getHeadRequest(self):
         """Return oldest request on the queue."""
-        # Due to replication lag, it's possible that the slave store
+        # Due to replication lag, it's possible that the standby store
         # still has copies of requests that have already been completed
         # and deleted from the master store.  So first get the oldest
         # request that is "live," i.e. still present on the master
@@ -139,21 +139,21 @@ class POExportRequestSet:
         if oldest_live is None:
             return None
         else:
-            return ISlaveStore(POExportRequest).find(
+            return IStandbyStore(POExportRequest).find(
                 POExportRequest,
                 POExportRequest.id == oldest_live.id).one()
 
     def getRequest(self):
         """See `IPOExportRequestSet`."""
-        # Exports happen off the slave store.  To ensure that export
+        # Exports happen off the standby store.  To ensure that export
         # does not happen until requests have been replicated to the
-        # slave, they are read primarily from the slave even though they
+        # standby, they are read primarily from the standby even though they
         # are deleted on the master afterwards.
         head = self._getHeadRequest()
         if head is None:
             return None, None, None, None
 
-        requests = ISlaveStore(POExportRequest).find(
+        requests = IStandbyStore(POExportRequest).find(
             POExportRequest,
             POExportRequest.person == head.person,
             POExportRequest.format == head.format,
diff --git a/lib/lp/translations/model/potmsgset.py b/lib/lp/translations/model/potmsgset.py
index 9435564..49c4051 100644
--- a/lib/lp/translations/model/potmsgset.py
+++ b/lib/lp/translations/model/potmsgset.py
@@ -374,7 +374,7 @@ class POTMsgSet(SQLBase):
         A message is used if it's either imported or current, and unused
         otherwise.
 
-        Suggestions are read-only, so these objects come from the slave
+        Suggestions are read-only, so these objects come from the standby
         store.
 
         :param suggested_languages: Languages that suggestions should be found
diff --git a/lib/lp/translations/model/translationgroup.py b/lib/lp/translations/model/translationgroup.py
index ab39ddf..c274697 100644
--- a/lib/lp/translations/model/translationgroup.py
+++ b/lib/lp/translations/model/translationgroup.py
@@ -25,7 +25,7 @@ from lp.services.database.constants import DEFAULT
 from lp.services.database.datetimecol import UtcDateTimeCol
 from lp.services.database.decoratedresultset import DecoratedResultSet
 from lp.services.database.interfaces import (
-    ISlaveStore,
+    IStandbyStore,
     IStore,
     )
 from lp.services.database.sqlbase import SQLBase
@@ -219,7 +219,7 @@ class TranslationGroup(SQLBase):
             LibraryFileAlias,
             LibraryFileContent,
             )
-        project_data = ISlaveStore(ProjectGroup).using(*using).find(
+        project_data = IStandbyStore(ProjectGroup).using(*using).find(
             tables,
             ProjectGroup.translationgroupID == self.id,
             ProjectGroup.active == True).order_by(ProjectGroup.display_name)
@@ -244,7 +244,7 @@ class TranslationGroup(SQLBase):
             LibraryFileAlias,
             LibraryFileContent,
             )
-        distro_data = ISlaveStore(Distribution).using(*using).find(
+        distro_data = IStandbyStore(Distribution).using(*using).find(
             tables, Distribution.translationgroupID == self.id).order_by(
             Distribution.display_name)
 
diff --git a/lib/lp/translations/model/translationimportqueue.py b/lib/lp/translations/model/translationimportqueue.py
index 39df58b..03f85e7 100644
--- a/lib/lp/translations/model/translationimportqueue.py
+++ b/lib/lp/translations/model/translationimportqueue.py
@@ -60,7 +60,7 @@ from lp.services.database.constants import (
 from lp.services.database.enumcol import DBEnum
 from lp.services.database.interfaces import (
     IMasterStore,
-    ISlaveStore,
+    IStandbyStore,
     IStore,
     )
 from lp.services.database.sqlbase import quote
@@ -1388,13 +1388,13 @@ class TranslationImportQueue:
 
         return approved_entries
 
-    def _getSlaveStore(self):
-        """Return the slave store for the import queue.
+    def _getStandbyStore(self):
+        """Return the standby store for the import queue.
 
         Tests can override this to avoid unnecessary synchronization
         issues.
         """
-        return ISlaveStore(TranslationImportQueueEntry)
+        return IStandbyStore(TranslationImportQueueEntry)
 
     def _getBlockableDirectories(self):
         """Describe all directories where uploads are to be blocked.
@@ -1415,7 +1415,7 @@ class TranslationImportQueue:
         """
         importer = getUtility(ITranslationImporter)
 
-        store = self._getSlaveStore()
+        store = self._getStandbyStore()
         TIQE = TranslationImportQueueEntry
         result = store.find(
             (TIQE.distroseries_id, TIQE.sourcepackagename_id,
diff --git a/lib/lp/translations/model/vpoexport.py b/lib/lp/translations/model/vpoexport.py
index 4e1f730..11b7c1a 100644
--- a/lib/lp/translations/model/vpoexport.py
+++ b/lib/lp/translations/model/vpoexport.py
@@ -70,7 +70,7 @@ class VPOExportSet:
         if languagepack:
             conditions.append(POTemplate.languagepack == True)
 
-        # Use the slave store.  We may want to write to the distroseries
+        # Use the standby store.  We may want to write to the distroseries
         # to register a language pack, but not to the translation data
         # we retrieve for it.
         # XXX wgrant 2017-03-21: Moved to master to avoid termination
diff --git a/lib/lp/translations/scripts/tests/test_translations_to_branch.py b/lib/lp/translations/scripts/tests/test_translations_to_branch.py
index ae6c1c2..e99e417 100644
--- a/lib/lp/translations/scripts/tests/test_translations_to_branch.py
+++ b/lib/lp/translations/scripts/tests/test_translations_to_branch.py
@@ -22,7 +22,7 @@ from lp.registry.interfaces.teammembership import (
     )
 from lp.registry.model.productseries import ProductSeries
 from lp.services.config import config
-from lp.services.database.interfaces import ISlaveStore
+from lp.services.database.interfaces import IStandbyStore
 from lp.services.log.logger import BufferLogger
 from lp.services.scripts.tests import run_script
 from lp.testing import (
@@ -162,11 +162,11 @@ class TestExportTranslationsToBranch(TestCaseWithFactory):
         self.assertNotEqual(
             db_branch.last_mirrored_id,
             six.ensure_text(tree.branch.last_revision()))
-        # The export code works on a Branch from the slave store.  It
+        # The export code works on a Branch from the standby store.  It
         # shouldn't stop the scan request.
-        slave_series = ISlaveStore(productseries).get(
+        standby_series = IStandbyStore(productseries).get(
             ProductSeries, productseries.id)
-        exporter._exportToBranch(slave_series)
+        exporter._exportToBranch(standby_series)
         self.assertEqual(
             db_branch.last_mirrored_id,
             six.ensure_text(tree.branch.last_revision()))
diff --git a/lib/lp/translations/scripts/translations_to_branch.py b/lib/lp/translations/scripts/translations_to_branch.py
index 92f5ca4..c3ef20b 100644
--- a/lib/lp/translations/scripts/translations_to_branch.py
+++ b/lib/lp/translations/scripts/translations_to_branch.py
@@ -39,7 +39,7 @@ from lp.codehosting.vfs import get_rw_server
 from lp.services.config import config
 from lp.services.database.interfaces import (
     IMasterStore,
-    ISlaveStore,
+    IStandbyStore,
     )
 from lp.services.helpers import shortlist
 from lp.services.mail.helpers import (
@@ -312,7 +312,7 @@ class ExportTranslationsToBranch(LaunchpadCronScript):
 
         self.logger.info("Exporting to translations branches.")
 
-        self.store = ISlaveStore(Product)
+        self.store = IStandbyStore(Product)
 
         product_join = Join(
             ProductSeries, Product, ProductSeries.product == Product.id)
diff --git a/lib/lp/translations/tests/test_autoapproval.py b/lib/lp/translations/tests/test_autoapproval.py
index b778fac..b567c9d 100644
--- a/lib/lp/translations/tests/test_autoapproval.py
+++ b/lib/lp/translations/tests/test_autoapproval.py
@@ -1154,9 +1154,9 @@ class TestAutoBlocking(TestCaseWithFactory):
         super().setUp()
         self.queue = TranslationImportQueue()
         # Our test queue operates on the master store instead of the
-        # slave store so we don't have to synchronize stores.
+        # standby store so we don't have to synchronize stores.
         master_store = IMasterStore(TranslationImportQueueEntry)
-        self.queue._getSlaveStore = FakeMethod(result=master_store)
+        self.queue._getStandbyStore = FakeMethod(result=master_store)
 
     def _copyTargetFromEntry(self, entry):
         """Return a dict representing `entry`'s translation target.
diff --git a/lib/lp/translations/tests/test_translationimportqueue.py b/lib/lp/translations/tests/test_translationimportqueue.py
index 671195a..06abfdb 100644
--- a/lib/lp/translations/tests/test_translationimportqueue.py
+++ b/lib/lp/translations/tests/test_translationimportqueue.py
@@ -12,7 +12,7 @@ from zope.security.proxy import removeSecurityProxy
 from lp.app.enums import InformationType
 from lp.app.interfaces.launchpad import ILaunchpadCelebrities
 from lp.services.database.interfaces import (
-    ISlaveStore,
+    IStandbyStore,
     IStore,
     )
 from lp.services.librarianserver.testing.fake import FakeLibrarian
@@ -484,14 +484,14 @@ class TestTranslationImportQueue(TestCaseWithFactory):
         # reshuffled to see if reportApprovalConflict can be fooled into
         # thinking it's a different error.  Make as sure as we can that
         # entry.error_output is not modified.
-        slave_entry = ISlaveStore(entry).get(
+        standby_entry = IStandbyStore(entry).get(
             TranslationImportQueueEntry, entry.id)
-        slave_entry.setErrorOutput = FakeMethod()
-        slave_entry.reportApprovalConflict(
+        standby_entry.setErrorOutput = FakeMethod()
+        standby_entry.reportApprovalConflict(
             domain, len(templates), reversed(templates))
-        self.assertEqual(original_error, slave_entry.error_output)
+        self.assertEqual(original_error, standby_entry.error_output)
         self.assertIn(domain, original_error)
-        self.assertEqual(0, slave_entry.setErrorOutput.call_count)
+        self.assertEqual(0, standby_entry.setErrorOutput.call_count)
 
 
 class TestHelpers(TestCaseWithFactory):
diff --git a/scripts/get-stacked-on-branches.py b/scripts/get-stacked-on-branches.py
index 9a60b7f..6c15bc0 100755
--- a/scripts/get-stacked-on-branches.py
+++ b/scripts/get-stacked-on-branches.py
@@ -27,7 +27,7 @@ from optparse import OptionParser
 
 from storm.locals import Not
 
-from lp.services.database.interfaces import ISlaveStore
+from lp.services.database.interfaces import IStandbyStore
 from lp.services.scripts import execute_zcml_for_scripts
 
 
@@ -35,7 +35,7 @@ def get_stacked_branches():
     """Iterate over all branches that, according to the db, are stacked."""
     # Avoiding circular import.
     from lp.code.model.branch import Branch
-    return ISlaveStore(Branch).find(Branch, Not(Branch.stacked_on == None))
+    return IStandbyStore(Branch).find(Branch, Not(Branch.stacked_on == None))
 
 
 def main():
diff --git a/utilities/soyuz-sampledata-setup.py b/utilities/soyuz-sampledata-setup.py
index b8cb28e..cde53e9 100755
--- a/utilities/soyuz-sampledata-setup.py
+++ b/utilities/soyuz-sampledata-setup.py
@@ -40,7 +40,7 @@ from lp.registry.interfaces.series import SeriesStatus
 from lp.registry.model.codeofconduct import SignedCodeOfConduct
 from lp.services.database.interfaces import (
     IMasterStore,
-    ISlaveStore,
+    IStandbyStore,
     )
 from lp.services.scripts.base import LaunchpadScript
 from lp.soyuz.enums import SourcePackageFormat
@@ -78,7 +78,7 @@ def check_preconditions(options):
     This script must not run on a production server, or anything
     remotely like it.
     """
-    store = ISlaveStore(ComponentSelection)
+    store = IStandbyStore(ComponentSelection)
 
     # Just a guess, but dev systems aren't likely to have ids this high
     # in this table.  Production data does.