← Back to team overview

launchpad-reviewers team mailing list archive

[Merge] ~cjwatson/launchpad:rename-builder-interactor-slave into launchpad:master

 

Colin Watson has proposed merging ~cjwatson/launchpad:rename-builder-interactor-slave into launchpad:master with ~cjwatson/launchpad:rename-bfj-behaviour-slave as a prerequisite.

Commit message:
Rename "slave" in BuilderInteractor to "worker"

Requested reviews:
  Launchpad code reviewers (launchpad-reviewers)

For more details, see:
https://code.launchpad.net/~cjwatson/launchpad/+git/launchpad/+merge/414045
-- 
Your team Launchpad code reviewers is requested to review the proposed merge of ~cjwatson/launchpad:rename-builder-interactor-slave into launchpad:master.
diff --git a/lib/lp/buildmaster/interactor.py b/lib/lp/buildmaster/interactor.py
index 3aaae63..e8a1d7f 100644
--- a/lib/lp/buildmaster/interactor.py
+++ b/lib/lp/buildmaster/interactor.py
@@ -345,7 +345,7 @@ def extract_vitals_from_db(builder, build_queue=_BQ_UNSPECIFIED):
 class BuilderInteractor:
 
     @staticmethod
-    def makeSlaveFromVitals(vitals):
+    def makeWorkerFromVitals(vitals):
         if vitals.virtualized:
             timeout = config.builddmaster.virtualized_socket_timeout
         else:
@@ -354,19 +354,19 @@ class BuilderInteractor:
             vitals.url, vitals.vm_host, timeout)
 
     @staticmethod
-    def getBuildBehaviour(queue_item, builder, slave):
+    def getBuildBehaviour(queue_item, builder, worker):
         if queue_item is None:
             return None
         behaviour = IBuildFarmJobBehaviour(queue_item.specific_build)
-        behaviour.setBuilder(builder, slave)
+        behaviour.setBuilder(builder, worker)
         return behaviour
 
     @classmethod
-    def resumeSlaveHost(cls, vitals, slave):
-        """Resume the slave host to a known good condition.
+    def resumeWorkerHost(cls, vitals, worker):
+        """Resume the worker host to a known good condition.
 
         Issues 'builddmaster.vm_resume_command' specified in the configuration
-        to resume the slave.
+        to resume the worker.
 
         :raises: CannotResumeHost: if builder is not virtual or if the
             configuration command has failed.
@@ -384,7 +384,7 @@ class BuilderInteractor:
         logger = cls._getWorkerScannerLogger()
         logger.info("Resuming %s (%s)" % (vitals.name, vitals.url))
 
-        d = slave.resume()
+        d = worker.resume()
 
         def got_resume_ok(args):
             stdout, stderr, returncode = args
@@ -400,38 +400,38 @@ class BuilderInteractor:
 
     @classmethod
     @defer.inlineCallbacks
-    def cleanSlave(cls, vitals, slave, builder_factory):
-        """Prepare a slave for a new build.
+    def cleanWorker(cls, vitals, worker, builder_factory):
+        """Prepare a worker for a new build.
 
         :return: A Deferred that fires when this stage of the resume
-            operations finishes. If the value is True, the slave is now clean.
+            operations finishes. If the value is True, the worker is now clean.
             If it's False, the clean is still in progress and this must be
             called again later.
         """
         if vitals.virtualized:
             if vitals.vm_reset_protocol == BuilderResetProtocol.PROTO_1_1:
                 # In protocol 1.1 the reset trigger is synchronous, so
-                # once resumeSlaveHost returns the slave should be
+                # once resumeWorkerHost returns the worker should be
                 # running.
                 builder_factory[vitals.name].setCleanStatus(
                     BuilderCleanStatus.CLEANING)
                 transaction.commit()
-                yield cls.resumeSlaveHost(vitals, slave)
-                # We ping the resumed slave before we try to do anything
+                yield cls.resumeWorkerHost(vitals, worker)
+                # We ping the resumed worker before we try to do anything
                 # useful with it. This is to ensure it's accepting
                 # packets from the outside world, because testing has
                 # shown that the first packet will randomly fail for no
                 # apparent reason.  This could be a quirk of the Xen
                 # guest, we're not sure. See bug 586359.
-                yield slave.echo("ping")
+                yield worker.echo("ping")
                 return True
             elif vitals.vm_reset_protocol == BuilderResetProtocol.PROTO_2_0:
                 # In protocol 2.0 the reset trigger is asynchronous.
-                # If the trigger succeeds we'll leave the slave in
-                # CLEANING, and the non-LP slave management code will
+                # If the trigger succeeds we'll leave the worker in
+                # CLEANING, and the non-LP worker management code will
                 # set it back to CLEAN later using the webservice.
                 if vitals.clean_status == BuilderCleanStatus.DIRTY:
-                    yield cls.resumeSlaveHost(vitals, slave)
+                    yield cls.resumeWorkerHost(vitals, worker)
                     builder_factory[vitals.name].setCleanStatus(
                         BuilderCleanStatus.CLEANING)
                     transaction.commit()
@@ -441,28 +441,28 @@ class BuilderInteractor:
             raise CannotResumeHost(
                 "Invalid vm_reset_protocol: %r" % vitals.vm_reset_protocol)
         else:
-            worker_status = yield slave.status()
+            worker_status = yield worker.status()
             status = worker_status.get('builder_status', None)
             if status == 'BuilderStatus.IDLE':
                 # This is as clean as we can get it.
                 return True
             elif status == 'BuilderStatus.BUILDING':
-                # Asynchronously abort() the slave and wait until WAITING.
-                yield slave.abort()
+                # Asynchronously abort() the worker and wait until WAITING.
+                yield worker.abort()
                 return False
             elif status == 'BuilderStatus.ABORTING':
                 # Wait it out until WAITING.
                 return False
             elif status == 'BuilderStatus.WAITING':
                 # Just a synchronous clean() call and we'll be idle.
-                yield slave.clean()
+                yield worker.clean()
                 return True
             raise BuildDaemonError(
                 "Invalid status during clean: %r" % status)
 
     @classmethod
     @defer.inlineCallbacks
-    def _startBuild(cls, build_queue_item, vitals, builder, slave, behaviour,
+    def _startBuild(cls, build_queue_item, vitals, builder, worker, behaviour,
                     logger):
         """Start a build on this builder.
 
@@ -482,7 +482,7 @@ class BuilderInteractor:
 
         if builder.clean_status != BuilderCleanStatus.CLEAN:
             raise BuildDaemonIsolationError(
-                "Attempted to start build on a dirty slave.")
+                "Attempted to start build on a dirty worker.")
 
         builder.setCleanStatus(BuilderCleanStatus.DIRTY)
         transaction.commit()
@@ -491,8 +491,8 @@ class BuilderInteractor:
 
     @classmethod
     @defer.inlineCallbacks
-    def findAndStartJob(cls, vitals, builder, slave, builder_factory):
-        """Find a job to run and send it to the buildd slave.
+    def findAndStartJob(cls, vitals, builder, worker, builder_factory):
+        """Find a job to run and send it to the buildd worker.
 
         :return: A Deferred whose value is the `IBuildQueue` instance
             found or None if no job was found.
@@ -514,7 +514,7 @@ class BuilderInteractor:
             logger.debug("No build candidates available for builder.")
             return None
 
-        new_behaviour = cls.getBuildBehaviour(candidate, builder, slave)
+        new_behaviour = cls.getBuildBehaviour(candidate, builder, worker)
         needed_bfjb = type(removeSecurityProxy(
             IBuildFarmJobBehaviour(candidate.specific_build)))
         if not zope_isinstance(new_behaviour, needed_bfjb):
@@ -522,7 +522,7 @@ class BuilderInteractor:
                 "Inappropriate IBuildFarmJobBehaviour: %r is not a %r" %
                 (new_behaviour, needed_bfjb))
         yield cls._startBuild(
-            candidate, vitals, builder, slave, new_behaviour, logger)
+            candidate, vitals, builder, worker, new_behaviour, logger)
         return candidate
 
     @staticmethod
@@ -549,7 +549,7 @@ class BuilderInteractor:
         """
         builder_status = worker_status["builder_status"]
         if builder_status == "BuilderStatus.ABORTING":
-            logtail = "Waiting for slave process to be terminated"
+            logtail = "Waiting for worker process to be terminated"
         elif worker_status.get("logtail") is not None:
             # worker_status["logtail"] is an xmlrpc.client.Binary instance,
             # and the contents might include invalid UTF-8 due to being a
@@ -567,16 +567,16 @@ class BuilderInteractor:
 
     @classmethod
     @defer.inlineCallbacks
-    def updateBuild(cls, vitals, slave, worker_status, builder_factory,
+    def updateBuild(cls, vitals, worker, worker_status, builder_factory,
                     behaviour_factory, manager):
         """Verify the current build job status.
 
         Perform the required actions for each state.
 
-        :return: A Deferred that fires when the slave dialog is finished.
+        :return: A Deferred that fires when the worker dialog is finished.
         """
         # IDLE is deliberately not handled here, because it should be
-        # impossible to get past the cookie check unless the slave
+        # impossible to get past the cookie check unless the worker
         # matches the DB, and this method isn't called unless the DB
         # says there's a job.
         builder_status = worker_status['builder_status']
@@ -592,7 +592,7 @@ class BuilderInteractor:
         elif builder_status == 'BuilderStatus.WAITING':
             # Build has finished. Delegate handling to the build itself.
             builder = builder_factory[vitals.name]
-            behaviour = behaviour_factory(vitals.build_queue, builder, slave)
+            behaviour = behaviour_factory(vitals.build_queue, builder, worker)
             yield behaviour.handleStatus(
                 vitals.build_queue, cls.extractBuildStatus(worker_status),
                 worker_status)
diff --git a/lib/lp/buildmaster/manager.py b/lib/lp/buildmaster/manager.py
index ba9c3f1..850b83b 100644
--- a/lib/lp/buildmaster/manager.py
+++ b/lib/lp/buildmaster/manager.py
@@ -431,7 +431,7 @@ class WorkerScanner:
 
     def __init__(self, builder_name, builder_factory, manager, logger,
                  clock=None, interactor_factory=BuilderInteractor,
-                 worker_factory=BuilderInteractor.makeSlaveFromVitals,
+                 worker_factory=BuilderInteractor.makeWorkerFromVitals,
                  behaviour_factory=BuilderInteractor.getBuildBehaviour):
         self.builder_name = builder_name
         self.builder_factory = builder_factory
@@ -681,11 +681,11 @@ class WorkerScanner:
                     builder.resetFailureCount()
                     transaction.commit()
             else:
-                # Ask the BuilderInteractor to clean the slave. It might
+                # Ask the BuilderInteractor to clean the worker. It might
                 # be immediately cleaned on return, in which case we go
                 # straight back to CLEAN, or we might have to spin
                 # through another few cycles.
-                done = yield interactor.cleanSlave(
+                done = yield interactor.cleanWorker(
                     vitals, slave, self.builder_factory)
                 if done:
                     builder = self.builder_factory[self.builder_name]
diff --git a/lib/lp/buildmaster/tests/test_interactor.py b/lib/lp/buildmaster/tests/test_interactor.py
index fc102be..6e4a790 100644
--- a/lib/lp/buildmaster/tests/test_interactor.py
+++ b/lib/lp/buildmaster/tests/test_interactor.py
@@ -123,7 +123,7 @@ class TestBuilderInteractor(TestCase):
 
     def test_extractBuildStatus_baseline(self):
         # extractBuildStatus picks the name of the build status out of a
-        # dict describing the slave's status.
+        # dict describing the worker's status.
         worker_status = {'build_status': 'BuildStatus.BUILDING'}
         self.assertEqual(
             'BUILDING', BuilderInteractor.extractBuildStatus(worker_status))
@@ -136,20 +136,20 @@ class TestBuilderInteractor(TestCase):
             AssertionError, BuilderInteractor.extractBuildStatus,
             worker_status)
 
-    def resumeSlaveHost(self, builder):
+    def resumeWorkerHost(self, builder):
         vitals = extract_vitals_from_db(builder)
-        return BuilderInteractor.resumeSlaveHost(
-            vitals, BuilderInteractor.makeSlaveFromVitals(vitals))
+        return BuilderInteractor.resumeWorkerHost(
+            vitals, BuilderInteractor.makeWorkerFromVitals(vitals))
 
-    def test_resumeSlaveHost_nonvirtual(self):
-        d = self.resumeSlaveHost(MockBuilder(virtualized=False))
+    def test_resumeWorkerHost_nonvirtual(self):
+        d = self.resumeWorkerHost(MockBuilder(virtualized=False))
         return assert_fails_with(d, CannotResumeHost)
 
-    def test_resumeSlaveHost_no_vmhost(self):
-        d = self.resumeSlaveHost(MockBuilder(virtualized=False, vm_host=None))
+    def test_resumeWorkerHost_no_vmhost(self):
+        d = self.resumeWorkerHost(MockBuilder(virtualized=False, vm_host=None))
         return assert_fails_with(d, CannotResumeHost)
 
-    def test_resumeSlaveHost_success(self):
+    def test_resumeWorkerHost_success(self):
         reset_config = """
             [builddmaster]
             vm_resume_command: /bin/echo -n snap %(buildd_name)s %(vm_host)s
@@ -157,45 +157,45 @@ class TestBuilderInteractor(TestCase):
         config.push('reset', reset_config)
         self.addCleanup(config.pop, 'reset')
 
-        d = self.resumeSlaveHost(MockBuilder(
+        d = self.resumeWorkerHost(MockBuilder(
             url="http://crackle.ppa/";, virtualized=True, vm_host="pop"))
 
         def got_resume(output):
             self.assertEqual((b'snap crackle pop', b''), output)
         return d.addCallback(got_resume)
 
-    def test_resumeSlaveHost_command_failed(self):
+    def test_resumeWorkerHost_command_failed(self):
         reset_fail_config = """
             [builddmaster]
             vm_resume_command: /bin/false"""
         config.push('reset fail', reset_fail_config)
         self.addCleanup(config.pop, 'reset fail')
-        d = self.resumeSlaveHost(MockBuilder(virtualized=True, vm_host="pop"))
+        d = self.resumeWorkerHost(MockBuilder(virtualized=True, vm_host="pop"))
         return assert_fails_with(d, CannotResumeHost)
 
-    def test_makeSlaveFromVitals(self):
-        # Builder.slave is a BuilderWorker that points at the actual Builder.
-        # The Builder is only ever used in scripts that run outside of the
-        # security context.
+    def test_makeWorkerFromVitals(self):
+        # BuilderInteractor.makeWorkerFromVitals returns a BuilderWorker
+        # that points at the actual Builder.  The Builder is only ever used
+        # in scripts that run outside of the security context.
         builder = MockBuilder(virtualized=False)
         vitals = extract_vitals_from_db(builder)
-        slave = BuilderInteractor.makeSlaveFromVitals(vitals)
-        self.assertEqual(builder.url, slave.url)
-        self.assertEqual(10, slave.timeout)
+        worker = BuilderInteractor.makeWorkerFromVitals(vitals)
+        self.assertEqual(builder.url, worker.url)
+        self.assertEqual(10, worker.timeout)
 
         builder = MockBuilder(virtualized=True)
         vitals = extract_vitals_from_db(builder)
-        slave = BuilderInteractor.makeSlaveFromVitals(vitals)
-        self.assertEqual(5, slave.timeout)
+        worker = BuilderInteractor.makeWorkerFromVitals(vitals)
+        self.assertEqual(5, worker.timeout)
 
 
-class TestBuilderInteractorCleanSlave(TestCase):
+class TestBuilderInteractorCleanWorker(TestCase):
 
     run_tests_with = AsynchronousDeferredRunTest
 
     @defer.inlineCallbacks
     def assertCleanCalls(self, builder, worker, calls, done):
-        actually_done = yield BuilderInteractor.cleanSlave(
+        actually_done = yield BuilderInteractor.cleanWorker(
             extract_vitals_from_db(builder), worker,
             MockBuilderFactory(builder, None))
         self.assertEqual(done, actually_done)
@@ -243,7 +243,7 @@ class TestBuilderInteractorCleanSlave(TestCase):
         builder.vm_reset_protocol = None
         with ExpectedException(
                 CannotResumeHost, "Invalid vm_reset_protocol: None"):
-            yield BuilderInteractor.cleanSlave(
+            yield BuilderInteractor.cleanWorker(
                 extract_vitals_from_db(builder), OkWorker(),
                 MockBuilderFactory(builder, None))
 
@@ -290,7 +290,7 @@ class TestBuilderInteractorCleanSlave(TestCase):
         vitals = extract_vitals_from_db(builder)
         worker = LostBuildingBrokenWorker()
         try:
-            yield BuilderInteractor.cleanSlave(
+            yield BuilderInteractor.cleanWorker(
                 vitals, worker, MockBuilderFactory(builder, None))
         except xmlrpc.client.Fault:
             self.assertEqual(['status', 'abort'], worker.call_log)
@@ -470,8 +470,8 @@ class TestBuilderInteractorDB(TestCaseWithFactory):
         return d.addCallback(check_build_started)
 
     @defer.inlineCallbacks
-    def test_findAndStartJob_requires_clean_slave(self):
-        # findAndStartJob ensures that its slave starts CLEAN.
+    def test_findAndStartJob_requires_clean_worker(self):
+        # findAndStartJob ensures that its worker starts CLEAN.
         builder, build = self._setupBinaryBuildAndBuilder()
         builder.setCleanStatus(BuilderCleanStatus.DIRTY)
         candidate = build.queueBuild()
@@ -480,12 +480,12 @@ class TestBuilderInteractorDB(TestCaseWithFactory):
         vitals = extract_vitals_from_db(builder)
         with ExpectedException(
                 BuildDaemonIsolationError,
-                "Attempted to start build on a dirty slave."):
+                "Attempted to start build on a dirty worker."):
             yield BuilderInteractor.findAndStartJob(
                 vitals, builder, OkWorker(), builder_factory)
 
     @defer.inlineCallbacks
-    def test_findAndStartJob_dirties_slave(self):
+    def test_findAndStartJob_dirties_worker(self):
         # findAndStartJob marks its builder DIRTY before dispatching.
         builder, build = self._setupBinaryBuildAndBuilder()
         candidate = build.queueBuild()
diff --git a/lib/lp/soyuz/tests/test_binarypackagebuildbehaviour.py b/lib/lp/soyuz/tests/test_binarypackagebuildbehaviour.py
index 89f8474..73bff8d 100644
--- a/lib/lp/soyuz/tests/test_binarypackagebuildbehaviour.py
+++ b/lib/lp/soyuz/tests/test_binarypackagebuildbehaviour.py
@@ -615,7 +615,7 @@ class TestBinaryBuildPackageBehaviourBuildCollection(TestCaseWithFactory):
         # The builder is in the process of aborting.
         def got_update(ignored):
             self.assertEqual(
-                "Waiting for slave process to be terminated",
+                "Waiting for worker process to be terminated",
                 self.candidate.logtail)
 
         d = self.updateBuild(self.candidate, AbortingWorker())