← Back to team overview

launchpad-reviewers team mailing list archive

[Merge] ~cjwatson/launchpad:rename-master-slave-flavors into launchpad:master

 

Colin Watson has proposed merging ~cjwatson/launchpad:rename-master-slave-flavors into launchpad:master.

Commit message:
Rename master/slave DB flavors to primary/standby

Requested reviews:
  Launchpad code reviewers (launchpad-reviewers)

For more details, see:
https://code.launchpad.net/~cjwatson/launchpad/+git/launchpad/+merge/411554

The corresponding `I*Store` interfaces are still called master/slave, so there's still some mixed terminology, but we're getting closer.
-- 
Your team Launchpad code reviewers is requested to review the proposed merge of ~cjwatson/launchpad:rename-master-slave-flavors into launchpad:master.
diff --git a/lib/lp/services/database/doc/db-policy.txt b/lib/lp/services/database/doc/db-policy.txt
index 17dc147..8a1cce2 100644
--- a/lib/lp/services/database/doc/db-policy.txt
+++ b/lib/lp/services/database/doc/db-policy.txt
@@ -83,7 +83,7 @@ resources.
     ...         Person, Person.name == 'janitor').one()
     Traceback (most recent call last):
     ...
-    lp.services.database.interfaces.DisallowedStore: master
+    lp.services.database.interfaces.DisallowedStore: primary
 
 We can even ensure no database activity occurs at all, for instance
 if we need to guarantee a potentially long running call doesn't access
diff --git a/lib/lp/services/database/interfaces.py b/lib/lp/services/database/interfaces.py
index c9709ef..14407be 100644
--- a/lib/lp/services/database/interfaces.py
+++ b/lib/lp/services/database/interfaces.py
@@ -14,8 +14,8 @@ __all__ = [
     'IStore',
     'IStoreSelector',
     'MAIN_STORE',
-    'MASTER_FLAVOR',
-    'SLAVE_FLAVOR',
+    'PRIMARY_FLAVOR',
+    'STANDBY_FLAVOR',
     ]
 
 
@@ -46,8 +46,8 @@ MAIN_STORE = 'main'  # The main database.
 ALL_STORES = frozenset([MAIN_STORE])
 
 DEFAULT_FLAVOR = 'default'  # Default flavor for current state.
-MASTER_FLAVOR = 'master'  # The master database.
-SLAVE_FLAVOR = 'slave'  # A slave database.
+PRIMARY_FLAVOR = 'primary'  # The primary database.
+STANDBY_FLAVOR = 'standby'  # A standby database.
 
 
 class IDatabasePolicy(Interface):
@@ -75,7 +75,7 @@ class IDatabasePolicy(Interface):
 
         :param name: one of ALL_STORES.
 
-        :param flavor: MASTER_FLAVOR, SLAVE_FLAVOR, or DEFAULT_FLAVOR.
+        :param flavor: PRIMARY_FLAVOR, STANDBY_FLAVOR, or DEFAULT_FLAVOR.
         """
 
     def install():
@@ -94,15 +94,15 @@ class DisallowedStore(Exception):
 class IStoreSelector(Interface):
     """Get a Storm store with a desired flavor.
 
-    Stores come in two flavors - MASTER_FLAVOR and SLAVE_FLAVOR.
+    Stores come in two flavors - PRIMARY_FLAVOR and STANDBY_FLAVOR.
 
-    The master is writable and up to date, but we should not use it
-    whenever possible because there is only one master and we don't want
+    The primary is writable and up to date, but we should not use it
+    whenever possible because there is only one primary and we don't want
     it to be overloaded.
 
-    The slave is read only replica of the master and may lag behind the
-    master. For many purposes such as serving unauthenticated web requests
-    and generating reports this is fine. We can also have as many slave
+    The standby is a read-only replica of the primary and may lag behind the
+    primary. For many purposes such as serving unauthenticated web requests
+    and generating reports this is fine. We can also have as many standby
     databases as we are prepared to pay for, so they will perform better
     because they are less loaded.
     """
@@ -126,16 +126,16 @@ class IStoreSelector(Interface):
         returned for a given name or flavor can depend on thread state
         (eg. the HTTP request currently being handled).
 
-        If a SLAVE_FLAVOR is requested, the MASTER_FLAVOR may be returned
+        If a STANDBY_FLAVOR is requested, the PRIMARY_FLAVOR may be returned
         anyway.
 
-        The DEFAULT_FLAVOR flavor may return either a master or slave
+        The DEFAULT_FLAVOR flavor may return either a primary or standby
         depending on process state. Application code using the
-        DEFAULT_FLAVOR flavor should assume they have a MASTER and that
+        DEFAULT_FLAVOR flavor should assume they have a PRIMARY and that
         a higher level will catch the exception raised if an attempt is
-        made to write changes to a read only store. DEFAULT_FLAVOR exists
+        made to write changes to a read-only store. DEFAULT_FLAVOR exists
         for backwards compatibility, and new code should explicitly state
-        if they want a master or a slave.
+        if they want a primary or a standby.
 
         :raises DisconnectionError:
 
diff --git a/lib/lp/services/database/policy.py b/lib/lp/services/database/policy.py
index a347060..838df09 100644
--- a/lib/lp/services/database/policy.py
+++ b/lib/lp/services/database/policy.py
@@ -47,8 +47,8 @@ from lp.services.database.interfaces import (
     ISlaveStore,
     IStoreSelector,
     MAIN_STORE,
-    MASTER_FLAVOR,
-    SLAVE_FLAVOR,
+    PRIMARY_FLAVOR,
+    STANDBY_FLAVOR,
     )
 from lp.services.database.sqlbase import StupidCache
 
@@ -107,7 +107,7 @@ class BaseDatabasePolicy:
     """Base class for database policies."""
 
     # The default flavor to use.
-    default_flavor = MASTER_FLAVOR
+    default_flavor = PRIMARY_FLAVOR
 
     def __init__(self, request=None):
         pass
@@ -121,21 +121,21 @@ class BaseDatabasePolicy:
             store = get_connected_store(name, flavor)
         except DisconnectionError:
 
-            # A request for a master database connection was made
+            # A request for a primary database connection was made
             # and failed. Nothing we can do so reraise the exception.
-            if flavor != SLAVE_FLAVOR:
+            if flavor != STANDBY_FLAVOR:
                 raise
 
-            # A request for a slave database connection was made
-            # and failed. Try to return a master connection, this
+            # A request for a standby database connection was made
+            # and failed. Try to return a primary connection, this
             # will be good enough. Note we don't call self.getStore()
             # recursively because we want to make this attempt even if
-            # the DatabasePolicy normally disallows master database
+            # the DatabasePolicy normally disallows primary database
             # connections. All this behaviour allows read-only requests
-            # to keep working when slave databases are being rebuilt or
+            # to keep working when standby databases are being rebuilt or
             # updated.
             try:
-                flavor = MASTER_FLAVOR
+                flavor = PRIMARY_FLAVOR
                 store = get_connected_store(name, flavor)
             except DisconnectionError:
                 store = None
@@ -155,7 +155,7 @@ class BaseDatabasePolicy:
             store._cache = storm_cache_factory()
 
             # Attach our marker interfaces so our adapters don't lie.
-            if flavor == MASTER_FLAVOR:
+            if flavor == PRIMARY_FLAVOR:
                 alsoProvides(store, IMasterStore)
             else:
                 alsoProvides(store, ISlaveStore)
@@ -193,7 +193,7 @@ class DatabaseBlockedPolicy(BaseDatabasePolicy):
 
 
 class PrimaryDatabasePolicy(BaseDatabasePolicy):
-    """`IDatabasePolicy` that selects the MASTER_FLAVOR by default.
+    """`IDatabasePolicy` that selects the PRIMARY_FLAVOR by default.
 
     Standby databases can still be accessed if requested explicitly.
 
@@ -201,29 +201,29 @@ class PrimaryDatabasePolicy(BaseDatabasePolicy):
     support session cookies. It is also used when no policy has been
     installed.
     """
-    default_flavor = MASTER_FLAVOR
+    default_flavor = PRIMARY_FLAVOR
 
 
 class StandbyDatabasePolicy(BaseDatabasePolicy):
-    """`IDatabasePolicy` that selects the SLAVE_FLAVOR by default.
+    """`IDatabasePolicy` that selects the STANDBY_FLAVOR by default.
 
     Access to the primary can still be made if requested explicitly.
     """
-    default_flavor = SLAVE_FLAVOR
+    default_flavor = STANDBY_FLAVOR
 
 
 class StandbyOnlyDatabasePolicy(BaseDatabasePolicy):
-    """`IDatabasePolicy` that only allows access to SLAVE_FLAVOR stores.
+    """`IDatabasePolicy` that only allows access to STANDBY_FLAVOR stores.
 
     This policy is used for Feeds requests and other always-read only request.
     """
-    default_flavor = SLAVE_FLAVOR
+    default_flavor = STANDBY_FLAVOR
 
     def getStore(self, name, flavor):
         """See `IDatabasePolicy`."""
-        if flavor == MASTER_FLAVOR:
+        if flavor == PRIMARY_FLAVOR:
             raise DisallowedStore(flavor)
-        return super().getStore(name, SLAVE_FLAVOR)
+        return super().getStore(name, STANDBY_FLAVOR)
 
 
 def LaunchpadDatabasePolicyFactory(request):
@@ -263,25 +263,25 @@ class LaunchpadDatabasePolicy(BaseDatabasePolicy):
         """See `IDatabasePolicy`."""
         default_flavor = None
 
-        # If this is a Retry attempt, force use of the master database.
+        # If this is a Retry attempt, force use of the primary database.
         if getattr(self.request, '_retry_count', 0) > 0:
-            default_flavor = MASTER_FLAVOR
+            default_flavor = PRIMARY_FLAVOR
 
-        # Select if the DEFAULT_FLAVOR Store will be the master or a
-        # slave. We select slave if this is a readonly request, and
+        # Select if the DEFAULT_FLAVOR Store will be the primary or a
+        # standby. We select standby if this is a readonly request, and
         # only readonly requests have been made by this user recently.
         # This ensures that a user will see any changes they just made
-        # on the master, despite the fact it might take a while for
-        # those changes to propagate to the slave databases.
+        # on the primary, despite the fact it might take a while for
+        # those changes to propagate to the standby databases.
         elif self.read_only:
             lag = self.getReplicationLag()
             if (lag is not None
                 and lag > timedelta(seconds=config.database.max_usable_lag)):
-                # Don't use the slave at all if lag is greater than the
+                # Don't use the standby at all if lag is greater than the
                 # configured threshold. This reduces replication oddities
                 # noticed by users, as well as reducing load on the
-                # slave allowing it to catch up quicker.
-                default_flavor = MASTER_FLAVOR
+                # standby allowing it to catch up quicker.
+                default_flavor = PRIMARY_FLAVOR
             else:
                 # We don't want to even make a DB query to read the session
                 # if we can tell that it is not around.  This can be
@@ -300,11 +300,11 @@ class LaunchpadDatabasePolicy(BaseDatabasePolicy):
                 else:
                     recently = timedelta(minutes=2) + lag
                 if last_write is None or last_write < now - recently:
-                    default_flavor = SLAVE_FLAVOR
+                    default_flavor = STANDBY_FLAVOR
                 else:
-                    default_flavor = MASTER_FLAVOR
+                    default_flavor = PRIMARY_FLAVOR
         else:
-            default_flavor = MASTER_FLAVOR
+            default_flavor = PRIMARY_FLAVOR
 
         assert default_flavor is not None, 'default_flavor not set!'
 
@@ -315,7 +315,7 @@ class LaunchpadDatabasePolicy(BaseDatabasePolicy):
 
         If the request just handled was not read_only, we need to store
         this fact and the timestamp in the session. Subsequent requests
-        can then keep using the master until they are sure any changes
+        can then keep using the primary until they are sure any changes
         made have been propagated.
         """
         if not self.read_only:
@@ -352,15 +352,15 @@ class LaunchpadDatabasePolicy(BaseDatabasePolicy):
             return _test_lag
 
         # Attempt to retrieve PostgreSQL streaming replication lag
-        # from the slave.
-        slave_store = self.getStore(MAIN_STORE, SLAVE_FLAVOR)
-        hot_standby, streaming_lag = slave_store.execute("""
+        # from the standby.
+        standby_store = self.getStore(MAIN_STORE, STANDBY_FLAVOR)
+        hot_standby, streaming_lag = standby_store.execute("""
             SELECT
                 pg_is_in_recovery(),
                 now() - pg_last_xact_replay_timestamp()
             """).get_one()
         if hot_standby and streaming_lag is not None:
-            # Slave is a PG 9.1 streaming replication hot standby.
+            # standby is a PG 9.1 streaming replication hot standby.
             # Return the lag.
             return streaming_lag
 
diff --git a/lib/lp/services/oauth/tests/test_oauth.py b/lib/lp/services/oauth/tests/test_oauth.py
index 1d95ccc..20c192c 100644
--- a/lib/lp/services/oauth/tests/test_oauth.py
+++ b/lib/lp/services/oauth/tests/test_oauth.py
@@ -12,7 +12,7 @@ from zope.component import getUtility
 
 from lp.services.database.interfaces import (
     MAIN_STORE,
-    MASTER_FLAVOR,
+    PRIMARY_FLAVOR,
     )
 from lp.services.oauth.model import (
     OAuthAccessToken,
@@ -26,14 +26,15 @@ class BaseOAuthTestCase(unittest.TestCase):
     """Base tests for the OAuth database classes."""
     layer = DatabaseFunctionalLayer
 
-    def test__getStore_should_return_the_main_master_store(self):
-        """We want all OAuth classes to use the master store.
+    def test__getStore_should_return_the_main_primary_store(self):
+        """We want all OAuth classes to use the primary store.
         Otherwise, the OAuth exchanges will fail because the authorize
-        screen won't probably find the new request token on the slave store.
+        screen won't probably find the new request token on the standby
+        store.
         """
         zstorm = getUtility(IZStorm)
         self.assertEqual(
-            '%s-%s' % (MAIN_STORE, MASTER_FLAVOR),
+            '%s-%s' % (MAIN_STORE, PRIMARY_FLAVOR),
             zstorm.get_name(self.class_._getStore()))
 
 
diff --git a/lib/lp/services/verification/model/logintoken.py b/lib/lp/services/verification/model/logintoken.py
index ca24bf8..9243471 100644
--- a/lib/lp/services/verification/model/logintoken.py
+++ b/lib/lp/services/verification/model/logintoken.py
@@ -279,7 +279,7 @@ class LoginTokenSet:
                 "consumed should be one of {True, False, None}. Got '%s'."
                 % consumed)
 
-        # It's important to always use the MASTER_FLAVOR store here
+        # It's important to always use the PRIMARY_FLAVOR store here
         # because we don't want replication lag to cause a 404 error.
         return IMasterStore(LoginToken).find(LoginToken, conditions)
 
@@ -306,7 +306,7 @@ class LoginTokenSet:
                 "consumed should be one of {True, False, None}. Got '%s'."
                 % consumed)
 
-        # It's important to always use the MASTER_FLAVOR store here
+        # It's important to always use the PRIMARY_FLAVOR store here
         # because we don't want replication lag to cause a 404 error.
         return IMasterStore(LoginToken).find(LoginToken, conditions)
 
diff --git a/lib/lp/services/webapp/adapter.py b/lib/lp/services/webapp/adapter.py
index 816f206..00e0f26 100644
--- a/lib/lp/services/webapp/adapter.py
+++ b/lib/lp/services/webapp/adapter.py
@@ -57,8 +57,8 @@ from lp.services.database.interfaces import (
     IRequestExpired,
     IStoreSelector,
     MAIN_STORE,
-    MASTER_FLAVOR,
-    SLAVE_FLAVOR,
+    PRIMARY_FLAVOR,
+    STANDBY_FLAVOR,
     )
 from lp.services.database.policy import PrimaryDatabasePolicy
 from lp.services.database.postgresql import ConnectionString
@@ -467,7 +467,7 @@ class LaunchpadDatabase(Postgres):
                 % repr(self._uri.database))
 
         assert realm == 'main', 'Unknown realm %s' % realm
-        assert flavor in ('master', 'slave'), 'Unknown flavor %s' % flavor
+        assert flavor in ('primary', 'standby'), 'Unknown flavor %s' % flavor
 
         # We set self._dsn here rather than in __init__ so when the Store
         # is reconnected it pays attention to any config changes.
@@ -496,13 +496,14 @@ class LaunchpadDatabase(Postgres):
         # An alternative would be to use the _ro users generated by
         # security.py, but this would needlessly double the number
         # of database users we need to maintain ACLs for on production.
-        if flavor == SLAVE_FLAVOR:
+        if flavor == STANDBY_FLAVOR:
             raw_connection.cursor().execute(
                 'SET DEFAULT_TRANSACTION_READ_ONLY TO TRUE')
             # Make the altered session setting stick.
             raw_connection.commit()
-        else:
-            assert config_entry.endswith('_master'), (
+        elif (not config_entry.endswith('_master') and
+                not config_entry.endswith('_primary')):
+            raise AssertionError(
                 'DB connection URL %s does not meet naming convention.')
 
         _reset_dirty_commit_flags(*flags)
@@ -769,12 +770,12 @@ def get_store(storm_class, flavor=DEFAULT_FLAVOR):
 
 def get_master_store(storm_class):
     """Return the master Store for the given database class."""
-    return get_store(storm_class, MASTER_FLAVOR)
+    return get_store(storm_class, PRIMARY_FLAVOR)
 
 
 def get_slave_store(storm_class):
     """Return the master Store for the given database class."""
-    return get_store(storm_class, SLAVE_FLAVOR)
+    return get_store(storm_class, STANDBY_FLAVOR)
 
 
 def get_object_from_master_store(obj):
diff --git a/lib/lp/services/webapp/doc/test_adapter.txt b/lib/lp/services/webapp/doc/test_adapter.txt
index 0b6712c..abe5036 100644
--- a/lib/lp/services/webapp/doc/test_adapter.txt
+++ b/lib/lp/services/webapp/doc/test_adapter.txt
@@ -12,7 +12,7 @@ Imports and test setup:
     >>> from lazr.restful.utils import get_current_browser_request
     >>> from storm.zope.interfaces import IZStorm
     >>> from lp.services.database.interfaces import (
-    ...     IStoreSelector, MAIN_STORE, MASTER_FLAVOR)
+    ...     IStoreSelector, MAIN_STORE, PRIMARY_FLAVOR)
     >>> from lp.services.config import config
     >>> import lp.services.webapp.adapter
     >>> from lp.services.webapp.adapter import (
@@ -24,7 +24,7 @@ Imports and test setup:
 There are several possible database connections available via the
 IStoreSelector utility.
 
-    >>> store = getUtility(IStoreSelector).get(MAIN_STORE, MASTER_FLAVOR)
+    >>> store = getUtility(IStoreSelector).get(MAIN_STORE, PRIMARY_FLAVOR)
     >>> dbname = DatabaseLayer._db_fixture.dbname
     >>> active_name = store.execute("SELECT current_database()").get_one()[0]
     >>> if active_name != dbname: print('%s != %s' % (active_name, dbname))
@@ -190,7 +190,7 @@ the Postgres statement timeout (a value of zero means no timeout):
     ...     zstorm.remove(store)
     ...     transaction.abort()
     ...     store.close()
-    ...     store = getUtility(IStoreSelector).get(MAIN_STORE, MASTER_FLAVOR)
+    ...     store = getUtility(IStoreSelector).get(MAIN_STORE, PRIMARY_FLAVOR)
 
     >>> set_request_started()
     >>> print(current_statement_timeout(store))
@@ -357,7 +357,7 @@ The request time limit was exceeded before the statement was issued to
 the database.
 
     >>> print(pretty(get_request_statements()))
-    [(0, ..., 'SQL-main-master', 'SELECT 2', ...)]
+    [(0, ..., 'SQL-main-primary', 'SELECT 2', ...)]
 
 
 When a RequestExpired exception is raised, the current
diff --git a/lib/lp/services/webapp/doc/test_adapter_permissions.txt b/lib/lp/services/webapp/doc/test_adapter_permissions.txt
index 8cf15f6..2ae057f 100644
--- a/lib/lp/services/webapp/doc/test_adapter_permissions.txt
+++ b/lib/lp/services/webapp/doc/test_adapter_permissions.txt
@@ -1,24 +1,25 @@
-Our database adapters need to trap writes to tables in slave replication
-sets. These tables may be reached directly using a SLAVE_FLAVOR store, or
-traversed to from a MASTER_FLAVOR store.
+Our database adapters need to trap writes to tables in standby replication
+sets. These tables may be reached directly using a STANDBY_FLAVOR store, or
+traversed to from a PRIMARY_FLAVOR store.
 
 Because our development environment is not replicated, we use database
-permissions to ensure that tables we should not be writing too cannot
+permissions to ensure that tables we should not be writing to cannot
 be written to. The same permissions structure is also used on production,
-so the Slony-I triggers blocking writes to slaved tables will never
+so the Slony-I triggers blocking writes to some tables will never
 actually be invoked.
 
     >>> from lp.registry.model.person import Person
     >>> from lp.services.database.interfaces import (
-    ...     IStoreSelector, MAIN_STORE, MASTER_FLAVOR, SLAVE_FLAVOR)
+    ...     IStoreSelector, MAIN_STORE, PRIMARY_FLAVOR, STANDBY_FLAVOR)
     >>> import transaction
     >>> from zope.component import getUtility
 
-If a SLAVE_FLAVOR store is requested, it should trap all writes.
+If a STANDBY_FLAVOR store is requested, it should trap all writes.
 
     >>> t = transaction.begin()
-    >>> main_slave = getUtility(IStoreSelector).get(MAIN_STORE, SLAVE_FLAVOR)
-    >>> janitor = main_slave.find(Person, name='janitor').one()
+    >>> main_standby = getUtility(IStoreSelector).get(
+    ...     MAIN_STORE, STANDBY_FLAVOR)
+    >>> janitor = main_standby.find(Person, name='janitor').one()
     >>> janitor.display_name = 'Ben Dover'
     >>> transaction.commit()
     Traceback (most recent call last):
@@ -29,21 +30,21 @@ Test this once more to ensure the settings stick across transactions.
 
     >>> transaction.abort()
     >>> t = transaction.begin()
-    >>> main_slave.find(Person, name='janitor').one().display_name = 'BenD'
+    >>> main_standby.find(Person, name='janitor').one().display_name = 'BenD'
     >>> transaction.commit()
     Traceback (most recent call last):
     ...
     storm.database.InternalError: ...
 
-If a MASTER_FLAVOR is requested, it should allow writes to table in that
+If a PRIMARY_FLAVOR is requested, it should allow writes to table in that
 Store's replication set.
 
     >>> t = transaction.begin()
-    >>> main_master = getUtility(IStoreSelector).get(
-    ...     MAIN_STORE, MASTER_FLAVOR)
-    >>> main_master.find(Person, name='janitor').one().display_name = 'BenD'
+    >>> main_primary = getUtility(IStoreSelector).get(
+    ...     MAIN_STORE, PRIMARY_FLAVOR)
+    >>> main_primary.find(Person, name='janitor').one().display_name = 'BenD'
     >>> transaction.commit()
     >>> t = transaction.begin()
-    >>> print(main_master.find(Person, name='janitor').one().display_name)
+    >>> print(main_primary.find(Person, name='janitor').one().display_name)
     BenD
     >>> transaction.abort()
diff --git a/lib/lp/services/webapp/doc/test_adapter_timeout.txt.disabled b/lib/lp/services/webapp/doc/test_adapter_timeout.txt.disabled
index dbf7a36..9585e0a 100644
--- a/lib/lp/services/webapp/doc/test_adapter_timeout.txt.disabled
+++ b/lib/lp/services/webapp/doc/test_adapter_timeout.txt.disabled
@@ -29,7 +29,7 @@ exception, and a time machine.
     >>> from lp.services.config import config
     >>> import lp.services.webapp.adapter
     >>> from lp.services.webapp.interfaces import (
-    ...     IStoreSelector, MAIN_STORE, MASTER_FLAVOR)
+    ...     IStoreSelector, MAIN_STORE, PRIMARY_FLAVOR)
     >>> from lp.testing.pages import setupBrowser
 
     >>> config.push('set_timeout', dedent('''
@@ -53,7 +53,7 @@ exception, and a time machine.
     ...         return self, None
     ...     def __call__(self):
     ...         store = zope.component.getUtility(IStoreSelector).get(
-    ...             MAIN_STORE, MASTER_FLAVOR)
+    ...             MAIN_STORE, PRIMARY_FLAVOR)
     ...         time_travel(config.database.db_statement_timeout +
     ...                     config.database.db_statement_timeout_precision)
     ...         store.execute('SELECT TRUE', noresult=True)
@@ -63,7 +63,7 @@ exception, and a time machine.
     ...     def __call__(self):
     ...         global timeout_in_exception_view
     ...         store = zope.component.getUtility(IStoreSelector).get(
-    ...             MAIN_STORE, MASTER_FLAVOR)
+    ...             MAIN_STORE, PRIMARY_FLAVOR)
     ...         try:
     ...             store.execute('SELECT TRUE', noresult=True)
     ...         except TimeoutError:
diff --git a/lib/lp/services/webapp/publication.py b/lib/lp/services/webapp/publication.py
index 717cc65..27c29d4 100644
--- a/lib/lp/services/webapp/publication.py
+++ b/lib/lp/services/webapp/publication.py
@@ -75,7 +75,7 @@ from lp.services.config import config
 from lp.services.database.interfaces import (
     IDatabasePolicy,
     IStoreSelector,
-    MASTER_FLAVOR,
+    PRIMARY_FLAVOR,
     )
 from lp.services.database.policy import LaunchpadDatabasePolicy
 from lp.services.features.flags import NullFeatureController
@@ -707,10 +707,10 @@ class LaunchpadBrowserPublication(
             # used is a replica, raise a Retry exception instead of
             # returning the 404 error page. We do this in case the
             # LookupError is caused by replication lag. Our database
-            # policy forces the use of the master database for retries.
+            # policy forces the use of the primary database for retries.
             if (isinstance(exc_info[1], LookupError)
                 and isinstance(db_policy, LaunchpadDatabasePolicy)):
-                if db_policy.default_flavor == MASTER_FLAVOR:
+                if db_policy.default_flavor == PRIMARY_FLAVOR:
                     return False
                 else:
                     return True
@@ -861,9 +861,9 @@ class LaunchpadBrowserPublication(
             # Reset all Storm stores when not running the test suite.
             # We could reset them when running the test suite but
             # that'd make writing tests a much more painful task. We
-            # still reset the slave stores though to minimize stale
+            # still reset the standby stores though to minimize stale
             # cache issues.
-            if thread_name != 'MainThread' or name.endswith('-slave'):
+            if thread_name != 'MainThread' or name.endswith('-standby'):
                 store.reset()
 
 
diff --git a/lib/lp/services/webapp/tests/test_dbpolicy.py b/lib/lp/services/webapp/tests/test_dbpolicy.py
index a8eac12..b47c3ba 100644
--- a/lib/lp/services/webapp/tests/test_dbpolicy.py
+++ b/lib/lp/services/webapp/tests/test_dbpolicy.py
@@ -39,8 +39,8 @@ from lp.services.database.interfaces import (
     ISlaveStore,
     IStoreSelector,
     MAIN_STORE,
-    MASTER_FLAVOR,
-    SLAVE_FLAVOR,
+    PRIMARY_FLAVOR,
+    STANDBY_FLAVOR,
     )
 from lp.services.database.policy import (
     BaseDatabasePolicy,
@@ -112,10 +112,10 @@ class StandbyDatabasePolicyTestCase(BaseDatabasePolicyTestCase):
                 getUtility(IStoreSelector).get(store, DEFAULT_FLAVOR),
                 ISlaveStore)
 
-    def test_master_allowed(self):
+    def test_primary_allowed(self):
         for store in ALL_STORES:
             self.assertProvides(
-                getUtility(IStoreSelector).get(store, MASTER_FLAVOR),
+                getUtility(IStoreSelector).get(store, PRIMARY_FLAVOR),
                 IMasterStore)
 
 
@@ -126,11 +126,11 @@ class StandbyOnlyDatabasePolicyTestCase(StandbyDatabasePolicyTestCase):
         self.policy = StandbyOnlyDatabasePolicy()
         super().setUp()
 
-    def test_master_allowed(self):
+    def test_primary_allowed(self):
         for store in ALL_STORES:
             self.assertRaises(
                 DisallowedStore,
-                getUtility(IStoreSelector).get, store, MASTER_FLAVOR)
+                getUtility(IStoreSelector).get, store, PRIMARY_FLAVOR)
 
 
 class PrimaryDatabasePolicyTestCase(BaseDatabasePolicyTestCase):
@@ -156,7 +156,7 @@ class PrimaryDatabasePolicyTestCase(BaseDatabasePolicyTestCase):
         # We get the primary store even if the standby was requested.
         for store in ALL_STORES:
             self.assertProvides(
-                getUtility(IStoreSelector).get(store, SLAVE_FLAVOR),
+                getUtility(IStoreSelector).get(store, STANDBY_FLAVOR),
                 ISlaveStore)
 
 
@@ -235,11 +235,11 @@ class LayerDatabasePolicyTestCase(TestCase):
         self.assertIsInstance(policy, LaunchpadDatabasePolicy)
 
 
-class MasterFallbackTestCase(TestCase):
+class PrimaryFallbackTestCase(TestCase):
     layer = DatabaseFunctionalLayer
 
     def setUp(self):
-        super(MasterFallbackTestCase, self).setUp()
+        super().setUp()
 
         self.pgbouncer_fixture = PGBouncerFixture()
 
@@ -264,7 +264,7 @@ class MasterFallbackTestCase(TestCase):
 
         self.useFixture(self.pgbouncer_fixture)
 
-    def test_can_shutdown_slave_only(self):
+    def test_can_shutdown_standby_only(self):
         '''Confirm that this TestCase's test infrastructure works as needed.
         '''
         master_store = IMasterStore(Person)
@@ -280,8 +280,8 @@ class MasterFallbackTestCase(TestCase):
         master_store.get(Person, 2)
         self.assertRaises(DisconnectionError, slave_store.get, Person, 2)
 
-    def test_startup_with_no_slave(self):
-        '''An attempt is made for the first time to connect to a slave.'''
+    def test_startup_with_no_standby(self):
+        '''An attempt is made for the first time to connect to a standby.'''
         self.pgbouncer_fixture.stop()
 
         master_store = IMasterStore(Person)
@@ -290,8 +290,8 @@ class MasterFallbackTestCase(TestCase):
         # The master and slave Stores are the same object.
         self.assertIs(master_store, slave_store)
 
-    def test_slave_shutdown_during_transaction(self):
-        '''Slave is shutdown while running, but we can recover.'''
+    def test_standby_shutdown_during_transaction(self):
+        '''Standby is shutdown while running, but we can recover.'''
         master_store = IMasterStore(Person)
         slave_store = ISlaveStore(Person)
 
@@ -313,8 +313,8 @@ class MasterFallbackTestCase(TestCase):
 
         self.assertIs(master_store, slave_store)
 
-    def test_slave_shutdown_between_transactions(self):
-        '''Slave is shutdown in between transactions.'''
+    def test_standby_shutdown_between_transactions(self):
+        '''Standby is shutdown in between transactions.'''
         master_store = IMasterStore(Person)
         slave_store = ISlaveStore(Person)
         self.assertIsNot(master_store, slave_store)
@@ -322,8 +322,8 @@ class MasterFallbackTestCase(TestCase):
         transaction.abort()
         self.pgbouncer_fixture.stop()
 
-        # The process doesn't notice the slave going down, and things
-        # will fail the next time the slave is used.
+        # The process doesn't notice the standby going down, and things
+        # will fail the next time the standby is used.
         master_store = IMasterStore(Person)
         slave_store = ISlaveStore(Person)
         self.assertIsNot(master_store, slave_store)
@@ -336,8 +336,8 @@ class MasterFallbackTestCase(TestCase):
         slave_store = ISlaveStore(Person)
         self.assertIs(master_store, slave_store)
 
-    def test_slave_reconnect_after_outage(self):
-        '''The slave is again used once it becomes available.'''
+    def test_standby_reconnect_after_outage(self):
+        '''The standby is again used once it becomes available.'''
         self.pgbouncer_fixture.stop()
 
         master_store = IMasterStore(Person)
@@ -391,54 +391,54 @@ class TestFastDowntimeRollout(TestCase):
         except DisconnectionError:
             return False
 
-    def store_is_slave(self, store):
-        return store.get_database().name == 'main-slave'
+    def store_is_standby(self, store):
+        return store.get_database().name == 'main-standby'
 
-    def store_is_master(self, store):
-        return not self.store_is_slave(store)
+    def store_is_primary(self, store):
+        return not self.store_is_standby(store)
 
-    def test_slave_only_fast_downtime_rollout(self):
-        '''You can always access a working slave store during fast downtime.
+    def test_standby_only_fast_downtime_rollout(self):
+        '''You can always access a working standby store during fast downtime.
         '''
         # Everything is running happily.
         store = ISlaveStore(Person)
         original_store = store
         self.assertTrue(self.store_is_working(store))
-        self.assertTrue(self.store_is_slave(store))
+        self.assertTrue(self.store_is_standby(store))
 
         # But fast downtime is about to happen.
 
-        # Replication is stopped on the slave, and lag starts
+        # Replication is stopped on the standby, and lag starts
         # increasing.
 
-        # All connections to the master are killed so database schema
+        # All connections to the primary are killed so database schema
         # updates can be applied.
         self.pgbouncer_cur.execute('DISABLE %s' % self.primary_dbname)
         self.pgbouncer_cur.execute('KILL %s' % self.primary_dbname)
 
-        # Of course, slave connections are unaffected.
+        # Of course, standby connections are unaffected.
         self.assertTrue(self.store_is_working(store))
 
-        # After schema updates have been made to the master, it is
+        # After schema updates have been made to the primary, it is
         # reenabled.
         self.pgbouncer_cur.execute('RESUME %s' % self.primary_dbname)
         self.pgbouncer_cur.execute('ENABLE %s' % self.primary_dbname)
 
-        # And the slaves taken down, and replication reenabled so the
+        # And the standbys taken down, and replication reenabled so the
         # schema updates can replicate.
         self.pgbouncer_cur.execute('DISABLE %s' % self.standby_dbname)
         self.pgbouncer_cur.execute('KILL %s' % self.standby_dbname)
 
-        # The next attempt at accessing the slave store will fail
+        # The next attempt at accessing the standby store will fail
         # with a DisconnectionError.
         self.assertRaises(DisconnectionError, store.execute, 'SELECT TRUE')
         transaction.abort()
 
         # But if we handle that and retry, we can continue.
         # Now the failed connection has been detected, the next Store
-        # we are handed is a master Store instead of a slave.
+        # we are handed is a primary Store instead of a standby.
         store = ISlaveStore(Person)
-        self.assertTrue(self.store_is_master(store))
+        self.assertTrue(self.store_is_primary(store))
         self.assertIsNot(ISlaveStore(Person), original_store)
 
         # But alas, it might not work the first transaction. If it has
@@ -447,10 +447,10 @@ class TestFastDowntimeRollout(TestCase):
         self.assertFalse(self.store_is_working(store))
         transaction.abort()
 
-        # Next retry attempt, everything is fine using the master
-        # connection, even though our code only asked for a slave.
+        # Next retry attempt, everything is fine using the primary
+        # connection, even though our code only asked for a standby.
         store = ISlaveStore(Person)
-        self.assertTrue(self.store_is_master(store))
+        self.assertTrue(self.store_is_primary(store))
         self.assertTrue(self.store_is_working(store))
 
         # The original Store is busted though. You cannot reuse Stores
@@ -459,85 +459,85 @@ class TestFastDowntimeRollout(TestCase):
         self.assertFalse(self.store_is_working(original_store))
         transaction.abort()
 
-        # Once replication has caught up, the slave is reenabled.
+        # Once replication has caught up, the standby is reenabled.
         self.pgbouncer_cur.execute('RESUME %s' % self.standby_dbname)
         self.pgbouncer_cur.execute('ENABLE %s' % self.standby_dbname)
 
         # And next transaction, we are back to normal.
         store = ISlaveStore(Person)
         self.assertTrue(self.store_is_working(store))
-        self.assertTrue(self.store_is_slave(store))
+        self.assertTrue(self.store_is_standby(store))
         self.assertIs(original_store, store)
 
-    def test_master_slave_fast_downtime_rollout(self):
+    def test_primary_standby_fast_downtime_rollout(self):
         '''Parts of your app can keep working during a fast downtime update.
         '''
         # Everything is running happily.
         master_store = IMasterStore(Person)
-        self.assertTrue(self.store_is_master(master_store))
+        self.assertTrue(self.store_is_primary(master_store))
         self.assertTrue(self.store_is_working(master_store))
 
         slave_store = ISlaveStore(Person)
-        self.assertTrue(self.store_is_slave(slave_store))
+        self.assertTrue(self.store_is_standby(slave_store))
         self.assertTrue(self.store_is_working(slave_store))
 
         # But fast downtime is about to happen.
 
-        # Replication is stopped on the slave, and lag starts
+        # Replication is stopped on the standby, and lag starts
         # increasing.
 
-        # All connections to the master are killed so database schema
+        # All connections to the primary are killed so database schema
         # updates can be applied.
         self.pgbouncer_cur.execute('DISABLE %s' % self.primary_dbname)
         self.pgbouncer_cur.execute('KILL %s' % self.primary_dbname)
 
-        # Of course, slave connections are unaffected.
+        # Of course, standby connections are unaffected.
         self.assertTrue(self.store_is_working(slave_store))
 
-        # But attempts to use a master store will fail.
+        # But attempts to use a primary store will fail.
         self.assertFalse(self.store_is_working(master_store))
         transaction.abort()
 
-        # After schema updates have been made to the master, it is
+        # After schema updates have been made to the primary, it is
         # reenabled.
         self.pgbouncer_cur.execute('RESUME %s' % self.primary_dbname)
         self.pgbouncer_cur.execute('ENABLE %s' % self.primary_dbname)
 
-        # And the slaves taken down, and replication reenabled so the
+        # And the standbys taken down, and replication reenabled so the
         # schema updates can replicate.
         self.pgbouncer_cur.execute('DISABLE %s' % self.standby_dbname)
         self.pgbouncer_cur.execute('KILL %s' % self.standby_dbname)
 
-        # The master store is working again.
+        # The primary store is working again.
         master_store = IMasterStore(Person)
-        self.assertTrue(self.store_is_master(master_store))
+        self.assertTrue(self.store_is_primary(master_store))
         self.assertTrue(self.store_is_working(master_store))
 
-        # The next attempt at accessing the slave store will fail
+        # The next attempt at accessing the standby store will fail
         # with a DisconnectionError.
         slave_store = ISlaveStore(Person)
-        self.assertTrue(self.store_is_slave(slave_store))
+        self.assertTrue(self.store_is_standby(slave_store))
         self.assertRaises(
             DisconnectionError, slave_store.execute, 'SELECT TRUE')
         transaction.abort()
 
         # But if we handle that and retry, we can continue.
         # Now the failed connection has been detected, the next Store
-        # we are handed is a master Store instead of a slave.
+        # we are handed is a primary Store instead of a standby.
         slave_store = ISlaveStore(Person)
-        self.assertTrue(self.store_is_master(slave_store))
+        self.assertTrue(self.store_is_primary(slave_store))
         self.assertTrue(self.store_is_working(slave_store))
 
-        # Once replication has caught up, the slave is reenabled.
+        # Once replication has caught up, the standby is reenabled.
         self.pgbouncer_cur.execute('RESUME %s' % self.standby_dbname)
         self.pgbouncer_cur.execute('ENABLE %s' % self.standby_dbname)
 
         # And next transaction, we are back to normal.
         transaction.abort()
         master_store = IMasterStore(Person)
-        self.assertTrue(self.store_is_master(master_store))
+        self.assertTrue(self.store_is_primary(master_store))
         self.assertTrue(self.store_is_working(master_store))
 
         slave_store = ISlaveStore(Person)
-        self.assertTrue(self.store_is_slave(slave_store))
+        self.assertTrue(self.store_is_standby(slave_store))
         self.assertTrue(self.store_is_working(slave_store))