launchpad-reviewers team mailing list archive
-
launchpad-reviewers team
-
Mailing list archive
-
Message #15797
[Merge] lp:~wgrant/launchpad/separate-builder-twisted into lp:launchpad
William Grant has proposed merging lp:~wgrant/launchpad/separate-builder-twisted into lp:launchpad.
Requested reviews:
William Grant (wgrant)
For more details, see:
https://code.launchpad.net/~wgrant/launchpad/separate-builder-twisted/+merge/181439
The Builder DB object has lots of Twisted methods on it. This makes interfaces unclear, tightens coupling, and runs the risk of someone doing something stupid outside buildd-manager. This branch splits the Twisted stuff out into a separate class, BuilderInteractor, that encapsulates interactions involving a Builder and a BuilderSlave.
It's a bit ugly at the moment, but cleanup is coming.
--
https://code.launchpad.net/~wgrant/launchpad/separate-builder-twisted/+merge/181439
Your team Launchpad code reviewers is subscribed to branch lp:launchpad.
=== modified file 'lib/lp/buildmaster/interfaces/builder.py'
--- lib/lp/buildmaster/interfaces/builder.py 2013-08-06 15:28:14 +0000
+++ lib/lp/buildmaster/interfaces/builder.py 2013-08-22 01:38:28 +0000
@@ -163,8 +163,6 @@
title=_('Publicly Visible'), required=True, default=True,
description=_('Whether or not to present the builder publicly.')))
- slave = Attribute("xmlrpclib.Server instance corresponding to builder.")
-
currentjob = Attribute("BuildQueue instance for job being processed.")
failure_count = exported(Int(
@@ -184,24 +182,6 @@
def failBuilder(reason):
"""Mark builder as failed for a given reason."""
- def verifySlaveBuildCookie(slave_build_id):
- """Verify that a slave's build cookie is consistent.
-
- This should delegate to the current `IBuildFarmJobBehavior`.
- """
-
- def transferSlaveFileToLibrarian(file_sha1, filename, private):
- """Transfer a file from the slave to the librarian.
-
- :param file_sha1: The file's sha1, which is how the file is addressed
- in the slave XMLRPC protocol. Specially, the file_sha1 'buildlog'
- will cause the build log to be retrieved and gzipped.
- :param filename: The name of the file to be given to the librarian
- file alias.
- :param private: True if the build is for a private archive.
- :return: A Deferred that calls back with a librarian file alias.
- """
-
def getBuildQueue():
"""Return a `BuildQueue` if there's an active job on this builder.
@@ -211,102 +191,20 @@
def getCurrentBuildFarmJob():
"""Return a `BuildFarmJob` for this builder."""
- # All methods below here return Deferred.
-
- def isAvailable():
- """Whether or not a builder is available for building new jobs.
-
- :return: A Deferred that fires with True or False, depending on
- whether the builder is available or not.
- """
-
- def rescueIfLost(logger=None):
- """Reset the slave if its job information doesn't match the DB.
-
- This checks the build ID reported in the slave status against the
- database. If it isn't building what we think it should be, the current
- build will be aborted and the slave cleaned in preparation for a new
- task. The decision about the slave's correctness is left up to
- `IBuildFarmJobBehavior.verifySlaveBuildCookie`.
-
- :return: A Deferred that fires when the dialog with the slave is
- finished. It does not have a return value.
- """
-
- def updateStatus(logger=None):
- """Update the builder's status by probing it.
-
- :return: A Deferred that fires when the dialog with the slave is
- finished. It does not have a return value.
- """
-
- def cleanSlave():
- """Clean any temporary files from the slave.
-
- :return: A Deferred that fires when the dialog with the slave is
- finished. It does not have a return value.
- """
-
- def requestAbort():
- """Ask that a build be aborted.
-
- This takes place asynchronously: Actually killing everything running
- can take some time so the slave status should be queried again to
- detect when the abort has taken effect. (Look for status ABORTED).
-
- :return: A Deferred that fires when the dialog with the slave is
- finished. It does not have a return value.
- """
-
- def resumeSlaveHost():
- """Resume the slave host to a known good condition.
-
- Issues 'builddmaster.vm_resume_command' specified in the configuration
- to resume the slave.
-
- :raises: CannotResumeHost: if builder is not virtual or if the
- configuration command has failed.
-
- :return: A Deferred that fires when the resume operation finishes,
- whose value is a (stdout, stderr) tuple for success, or a Failure
- whose value is a CannotResumeHost exception.
- """
-
- def slaveStatus():
- """Get the slave status for this builder.
-
- :return: A Deferred which fires when the slave dialog is complete.
- Its value is a dict containing at least builder_status, but
- potentially other values included by the current build
- behavior.
- """
-
- def slaveStatusSentence():
- """Get the slave status sentence for this builder.
-
- :return: A Deferred which fires when the slave dialog is complete.
- Its value is a tuple with the first element containing the
- slave status, build_id-queue-id and then optionally more
- elements depending on the status.
- """
-
- def updateBuild(queueItem):
- """Verify the current build job status.
-
- Perform the required actions for each state.
-
- :return: A Deferred that fires when the slave dialog is finished.
- """
-
- def startBuild(build_queue_item, logger):
- """Start a build on this builder.
-
- :param build_queue_item: A BuildQueueItem to build.
- :param logger: A logger to be used to log diagnostic information.
-
- :return: A Deferred that fires after the dispatch has completed whose
- value is None, or a Failure that contains an exception
- explaining what went wrong.
+ def acquireBuildCandidate():
+ """Acquire a build candidate in an atomic fashion.
+
+ When retrieiving a candidate we need to mark it as building
+ immediately so that it is not dispatched by another builder in the
+ build manager.
+
+ We can consider this to be atomic because although the build manager
+ is a Twisted app and gives the appearance of doing lots of things at
+ once, it's still single-threaded so no more than one builder scan
+ can be in this code at the same time.
+
+ If there's ever more than one build manager running at once, then
+ this code will need some sort of mutex.
"""
def handleFailure(logger):
@@ -315,32 +213,6 @@
Increment builder and (if possible) job failure counts.
"""
- def resetOrFail(logger, exception):
- """Handle "confirmed" build slave failures.
-
- Call this when there have been multiple failures that are not just
- the fault of failing jobs, or when the builder has entered an
- ABORTED state without having been asked to do so.
-
- In case of a virtualized/PPA buildd slave an attempt will be made
- to reset it (using `resumeSlaveHost`).
-
- Conversely, a non-virtualized buildd slave will be (marked as)
- failed straightaway (using `failBuilder`).
-
- :param logger: The logger object to be used for logging.
- :param exception: An exception to be used for logging.
- :return: A Deferred that fires after the virtual slave was resumed
- or immediately if it's a non-virtual slave.
- """
-
- def findAndStartJob():
- """Find a job to run and send it to the buildd slave.
-
- :return: A Deferred whose value is the `IBuildQueue` instance
- found or None if no job was found.
- """
-
class IBuilderSet(Interface):
"""Collections of builders.
=== modified file 'lib/lp/buildmaster/interfaces/buildfarmjobbehavior.py'
--- lib/lp/buildmaster/interfaces/buildfarmjobbehavior.py 2013-01-07 02:40:55 +0000
+++ lib/lp/buildmaster/interfaces/buildfarmjobbehavior.py 2013-08-22 01:38:28 +0000
@@ -22,8 +22,8 @@
class IBuildFarmJobBehavior(Interface):
- def setBuilder(builder):
- """Sets the associated builder reference for this instance."""
+ def setBuilderInteractor(interactor):
+ """Sets the associated `BuilderInteractor` for this instance."""
def logStartBuild(logger):
"""Log the start of a specific build queue item.
=== modified file 'lib/lp/buildmaster/manager.py'
--- lib/lp/buildmaster/manager.py 2013-08-06 15:28:14 +0000
+++ lib/lp/buildmaster/manager.py 2013-08-22 01:38:28 +0000
@@ -33,7 +33,10 @@
from lp.buildmaster.interfaces.buildfarmjobbehavior import (
BuildBehaviorMismatch,
)
-from lp.buildmaster.model.builder import Builder
+from lp.buildmaster.model.builder import (
+ Builder,
+ BuilderInteractor,
+ )
from lp.services.propertycache import get_property_cache
@@ -197,7 +200,7 @@
self.logger.info("Cancelling build '%s'" % build.title)
buildqueue.cancel()
transaction.commit()
- d = builder.resumeSlaveHost()
+ d = self.interactor.resumeSlaveHost()
d.addCallback(resume_done)
return d
@@ -228,6 +231,7 @@
# Storm store is invalidated over transaction boundaries.
self.builder = get_builder(self.builder_name)
+ self.interactor = BuilderInteractor(self.builder)
def status_updated(ignored):
# Commit the changes done while possibly rescuing jobs, to
@@ -240,7 +244,7 @@
# Scan the slave and get the logtail, or collect the build if
# it's ready. Yes, "updateBuild" is a bad name.
if buildqueue is not None:
- return self.builder.updateBuild(buildqueue)
+ return self.interactor.updateBuild(buildqueue)
def build_updated(ignored):
# Commit changes done while updating the build, to avoid
@@ -261,7 +265,7 @@
# the build thusly forcing it to get re-dispatched to another
# builder.
- return self.builder.isAvailable().addCallback(got_available)
+ return self.interactor.isAvailable().addCallback(got_available)
def got_available(available):
if not available:
@@ -276,7 +280,7 @@
# See if there is a job we can dispatch to the builder slave.
- d = self.builder.findAndStartJob()
+ d = self.interactor.findAndStartJob()
def job_started(candidate):
if self.builder.currentjob is not None:
@@ -284,7 +288,7 @@
# failure_count.
self.builder.resetFailureCount()
transaction.commit()
- return self.builder.slave
+ return self.interactor.slave
else:
return None
return d.addCallback(job_started)
@@ -292,7 +296,7 @@
def cancellation_checked(cancelled):
if cancelled:
return defer.succeed(None)
- d = self.builder.updateStatus(self.logger)
+ d = self.interactor.updateStatus(self.logger)
d.addCallback(status_updated)
d.addCallback(build_updated)
return d
=== modified file 'lib/lp/buildmaster/model/builder.py'
--- lib/lp/buildmaster/model/builder.py 2013-08-06 15:28:14 +0000
+++ lib/lp/buildmaster/model/builder.py 2013-08-22 01:38:28 +0000
@@ -5,6 +5,7 @@
__all__ = [
'Builder',
+ 'BuilderInteractor',
'BuilderSet',
'ProxyWithConnectionTimeout',
'rescueBuilderIfLost',
@@ -41,6 +42,7 @@
from twisted.web.client import downloadPage
from zope.component import getUtility
from zope.interface import implements
+from zope.security.proxy import removeSecurityProxy
from lp.app.errors import NotFoundError
from lp.buildmaster.interfaces.builder import (
@@ -305,7 +307,7 @@
# This is a separate function since MockBuilder needs to use it too.
# Do not use it -- (Mock)Builder.rescueIfLost should be used instead.
-def rescueBuilderIfLost(builder, logger=None):
+def rescueBuilderIfLost(interactor, logger=None):
"""See `IBuilder`."""
# 'ident_position' dict relates the position of the job identifier
# token in the sentence received from status(), according to the
@@ -316,7 +318,7 @@
'BuilderStatus.WAITING': 2
}
- d = builder.slaveStatusSentence()
+ d = interactor.slaveStatusSentence()
def got_status(status_sentence):
"""After we get the status, clean if we have to.
@@ -334,22 +336,23 @@
# This situation is usually caused by a temporary loss of
# communications with the slave and the build manager had to reset
# the job.
- if status == 'BuilderStatus.ABORTED' and builder.currentjob is None:
- if not builder.virtualized:
+ if (status == 'BuilderStatus.ABORTED'
+ and interactor.builder.currentjob is None):
+ if not interactor.builder.virtualized:
# We can't reset non-virtual builders reliably as the
# abort() function doesn't kill the actual build job,
# only the sbuild process! All we can do here is fail
# the builder with a message indicating the problem and
# wait for an admin to reboot it.
- builder.failBuilder(
+ interactor.builder.failBuilder(
"Non-virtual builder in ABORTED state, requires admin to "
"restart")
return "dummy status"
if logger is not None:
logger.info(
"Builder '%s' being cleaned up from ABORTED" %
- (builder.name,))
- d = builder.cleanSlave()
+ (interactor.builder.name,))
+ d = interactor.cleanSlave()
return d.addCallback(lambda ignored: status_sentence)
else:
return status_sentence
@@ -361,18 +364,18 @@
return
slave_build_id = status_sentence[ident_position[status]]
try:
- builder.verifySlaveBuildCookie(slave_build_id)
+ interactor.verifySlaveBuildCookie(slave_build_id)
except CorruptBuildCookie as reason:
if status == 'BuilderStatus.WAITING':
- d = builder.cleanSlave()
+ d = interactor.cleanSlave()
else:
- d = builder.requestAbort()
+ d = interactor.requestAbort()
def log_rescue(ignored):
if logger:
logger.info(
"Builder '%s' rescued from '%s': '%s'" %
- (builder.name, slave_build_id, reason))
+ (interactor.builder.name, slave_build_id, reason))
return d.addCallback(log_rescue)
d.addCallback(got_status)
@@ -380,12 +383,364 @@
return d
-def updateBuilderStatus(builder, logger=None):
+def updateBuilderStatus(interactor, logger=None):
"""See `IBuilder`."""
if logger:
- logger.debug('Checking %s' % builder.name)
-
- return builder.rescueIfLost(logger)
+ logger.debug('Checking %s' % interactor.builder.name)
+
+ return interactor.rescueIfLost(logger)
+
+
+class BuilderInteractor(object):
+
+ def __init__(self, builder, override_slave=None):
+ self.builder = builder
+ self._slave = override_slave
+
+ @cachedproperty
+ def slave(self):
+ """See IBuilder."""
+ if self._slave is not None:
+ return self._slave
+ if self.builder.virtualized:
+ timeout = config.builddmaster.virtualized_socket_timeout
+ else:
+ timeout = config.builddmaster.socket_timeout
+ return BuilderSlave.makeBuilderSlave(
+ self.builder.url, self.builder.vm_host, timeout)
+
+ @property
+ def current_build_behavior(self):
+ return removeSecurityProxy(self.builder.current_build_behavior)
+
+ def slaveStatus(self):
+ """Get the slave status for this builder.
+
+ :return: A Deferred which fires when the slave dialog is complete.
+ Its value is a dict containing at least builder_status, but
+ potentially other values included by the current build
+ behavior.
+ """
+ d = self.slave.status()
+
+ def got_status(status_sentence):
+ status = {'builder_status': status_sentence[0]}
+
+ # Extract detailed status and log information if present.
+ # Although build_id is also easily extractable here, there is no
+ # valid reason for anything to use it, so we exclude it.
+ if status['builder_status'] == 'BuilderStatus.WAITING':
+ status['build_status'] = status_sentence[1]
+ else:
+ if status['builder_status'] == 'BuilderStatus.BUILDING':
+ status['logtail'] = status_sentence[2]
+
+ self.current_build_behavior.updateSlaveStatus(
+ status_sentence, status)
+ return status
+
+ return d.addCallback(got_status)
+
+ def slaveStatusSentence(self):
+ """Get the slave status sentence for this builder.
+
+ :return: A Deferred which fires when the slave dialog is complete.
+ Its value is a tuple with the first element containing the
+ slave status, build_id-queue-id and then optionally more
+ elements depending on the status.
+ """
+ return self.slave.status()
+
+ def verifySlaveBuildCookie(self, slave_build_id):
+ """Verify that a slave's build cookie is consistent.
+
+ This should delegate to the current `IBuildFarmJobBehavior`.
+ """
+ return self.current_build_behavior.verifySlaveBuildCookie(
+ slave_build_id)
+
+ def isAvailable(self):
+ """Whether or not a builder is available for building new jobs.
+
+ :return: A Deferred that fires with True or False, depending on
+ whether the builder is available or not.
+ """
+ if not self.builder.builderok:
+ return defer.succeed(False)
+ d = self.slaveStatusSentence()
+
+ def catch_fault(failure):
+ failure.trap(xmlrpclib.Fault, socket.error)
+ return False
+
+ def check_available(status):
+ return status[0] == 'BuilderStatus.IDLE'
+ return d.addCallbacks(check_available, catch_fault)
+
+ def rescueIfLost(self, logger=None):
+ """Reset the slave if its job information doesn't match the DB.
+
+ This checks the build ID reported in the slave status against the
+ database. If it isn't building what we think it should be, the current
+ build will be aborted and the slave cleaned in preparation for a new
+ task. The decision about the slave's correctness is left up to
+ `IBuildFarmJobBehavior.verifySlaveBuildCookie`.
+
+ :return: A Deferred that fires when the dialog with the slave is
+ finished. It does not have a return value.
+ """
+ return rescueBuilderIfLost(self, logger)
+
+ def updateStatus(self, logger=None):
+ """Update the builder's status by probing it.
+
+ :return: A Deferred that fires when the dialog with the slave is
+ finished. It does not have a return value.
+ """
+ return updateBuilderStatus(self, logger)
+
+ def cleanSlave(self):
+ """Clean any temporary files from the slave.
+
+ :return: A Deferred that fires when the dialog with the slave is
+ finished. It does not have a return value.
+ """
+ return self.slave.clean()
+
+ def requestAbort(self):
+ """Ask that a build be aborted.
+
+ This takes place asynchronously: Actually killing everything running
+ can take some time so the slave status should be queried again to
+ detect when the abort has taken effect. (Look for status ABORTED).
+
+ :return: A Deferred that fires when the dialog with the slave is
+ finished. It does not have a return value.
+ """
+ return self.slave.abort()
+
+ def resumeSlaveHost(self):
+ """Resume the slave host to a known good condition.
+
+ Issues 'builddmaster.vm_resume_command' specified in the configuration
+ to resume the slave.
+
+ :raises: CannotResumeHost: if builder is not virtual or if the
+ configuration command has failed.
+
+ :return: A Deferred that fires when the resume operation finishes,
+ whose value is a (stdout, stderr) tuple for success, or a Failure
+ whose value is a CannotResumeHost exception.
+ """
+ if not self.builder.virtualized:
+ return defer.fail(CannotResumeHost('Builder is not virtualized.'))
+
+ if not self.builder.vm_host:
+ return defer.fail(CannotResumeHost('Undefined vm_host.'))
+
+ logger = self._getSlaveScannerLogger()
+ logger.info("Resuming %s (%s)" % (self.builder.name, self.builder.url))
+
+ d = self.slave.resume()
+
+ def got_resume_ok((stdout, stderr, returncode)):
+ return stdout, stderr
+
+ def got_resume_bad(failure):
+ stdout, stderr, code = failure.value
+ raise CannotResumeHost(
+ "Resuming failed:\nOUT:\n%s\nERR:\n%s\n" % (stdout, stderr))
+
+ return d.addCallback(got_resume_ok).addErrback(got_resume_bad)
+
+ def startBuild(self, build_queue_item, logger):
+ """Start a build on this builder.
+
+ :param build_queue_item: A BuildQueueItem to build.
+ :param logger: A logger to be used to log diagnostic information.
+
+ :return: A Deferred that fires after the dispatch has completed whose
+ value is None, or a Failure that contains an exception
+ explaining what went wrong.
+ """
+ removeSecurityProxy(self.builder).current_build_behavior = (
+ build_queue_item.required_build_behavior)
+ self.current_build_behavior.logStartBuild(logger)
+
+ # Make sure the request is valid; an exception is raised if it's not.
+ self.current_build_behavior.verifyBuildRequest(logger)
+
+ # Set the build behavior depending on the provided build queue item.
+ if not self.builder.builderok:
+ raise BuildDaemonError(
+ "Attempted to start a build on a known-bad builder.")
+
+ # If we are building a virtual build, resume the virtual machine.
+ if self.builder.virtualized:
+ d = self.resumeSlaveHost()
+ else:
+ d = defer.succeed(None)
+
+ def ping_done(ignored):
+ return self.current_build_behavior.dispatchBuildToSlave(
+ build_queue_item.id, logger)
+
+ def resume_done(ignored):
+ # Before we try and contact the resumed slave, we're going
+ # to send it a message. This is to ensure it's accepting
+ # packets from the outside world, because testing has shown
+ # that the first packet will randomly fail for no apparent
+ # reason. This could be a quirk of the Xen guest, we're not
+ # sure. We also don't care about the result from this message,
+ # just that it's sent, hence the "addBoth".
+ # See bug 586359.
+ if self.builder.virtualized:
+ d = self.slave.echo("ping")
+ else:
+ d = defer.succeed(None)
+ d.addBoth(ping_done)
+ return d
+
+ d.addCallback(resume_done)
+ return d
+
+ def _dispatchBuildCandidate(self, candidate):
+ """Dispatch the pending job to the associated buildd slave.
+
+ This method can only be executed in the builddmaster machine, since
+ it will actually issues the XMLRPC call to the buildd-slave.
+
+ :param candidate: The job to dispatch.
+ """
+ logger = self._getSlaveScannerLogger()
+ # Using maybeDeferred ensures that any exceptions are also
+ # wrapped up and caught later.
+ d = defer.maybeDeferred(self.startBuild, candidate, logger)
+ return d
+
+ def resetOrFail(self, logger, exception):
+ """Handle "confirmed" build slave failures.
+
+ Call this when there have been multiple failures that are not just
+ the fault of failing jobs, or when the builder has entered an
+ ABORTED state without having been asked to do so.
+
+ In case of a virtualized/PPA buildd slave an attempt will be made
+ to reset it (using `resumeSlaveHost`).
+
+ Conversely, a non-virtualized buildd slave will be (marked as)
+ failed straightaway (using `failBuilder`).
+
+ :param logger: The logger object to be used for logging.
+ :param exception: An exception to be used for logging.
+ :return: A Deferred that fires after the virtual slave was resumed
+ or immediately if it's a non-virtual slave.
+ """
+ error_message = str(exception)
+ if self.builder.virtualized:
+ # Virtualized/PPA builder: attempt a reset, unless the failure
+ # was itself a failure to reset. (In that case, the slave
+ # scanner will try again until we reach the failure threshold.)
+ if not isinstance(exception, CannotResumeHost):
+ logger.warn(
+ "Resetting builder: %s -- %s" % (
+ self.builder.url, error_message),
+ exc_info=True)
+ return self.resumeSlaveHost()
+ else:
+ # XXX: This should really let the failure bubble up to the
+ # scan() method that does the failure counting.
+ # Mark builder as 'failed'.
+ logger.warn(
+ "Disabling builder: %s -- %s" % (
+ self.builder.url, error_message))
+ self.builder.failBuilder(error_message)
+ return defer.succeed(None)
+
+ def findAndStartJob(self):
+ """Find a job to run and send it to the buildd slave.
+
+ :return: A Deferred whose value is the `IBuildQueue` instance
+ found or None if no job was found.
+ """
+ # XXX This method should be removed in favour of two separately
+ # called methods that find and dispatch the job. It will
+ # require a lot of test fixing.
+ logger = self._getSlaveScannerLogger()
+ candidate = self.builder.acquireBuildCandidate()
+
+ if candidate is None:
+ logger.debug("No build candidates available for builder.")
+ return defer.succeed(None)
+
+ d = self._dispatchBuildCandidate(candidate)
+ return d.addCallback(lambda ignored: candidate)
+
+ def updateBuild(self, queueItem):
+ """Verify the current build job status.
+
+ Perform the required actions for each state.
+
+ :return: A Deferred that fires when the slave dialog is finished.
+ """
+ return self.current_build_behavior.updateBuild(queueItem)
+
+ def transferSlaveFileToLibrarian(self, file_sha1, filename, private):
+ """Transfer a file from the slave to the librarian.
+
+ :param file_sha1: The file's sha1, which is how the file is addressed
+ in the slave XMLRPC protocol. Specially, the file_sha1 'buildlog'
+ will cause the build log to be retrieved and gzipped.
+ :param filename: The name of the file to be given to the librarian
+ file alias.
+ :param private: True if the build is for a private archive.
+ :return: A Deferred that calls back with a librarian file alias.
+ """
+ out_file_fd, out_file_name = tempfile.mkstemp(suffix=".buildlog")
+ out_file = os.fdopen(out_file_fd, "r+")
+
+ def got_file(ignored, filename, out_file, out_file_name):
+ try:
+ # If the requested file is the 'buildlog' compress it
+ # using gzip before storing in Librarian.
+ if file_sha1 == 'buildlog':
+ out_file = open(out_file_name)
+ filename += '.gz'
+ out_file_name += '.gz'
+ gz_file = gzip.GzipFile(out_file_name, mode='wb')
+ copy_and_close(out_file, gz_file)
+ os.remove(out_file_name.replace('.gz', ''))
+
+ # Reopen the file, seek to its end position, count and seek
+ # to beginning, ready for adding to the Librarian.
+ out_file = open(out_file_name)
+ out_file.seek(0, 2)
+ bytes_written = out_file.tell()
+ out_file.seek(0)
+
+ library_file = getUtility(ILibraryFileAliasSet).create(
+ filename, bytes_written, out_file,
+ contentType=filenameToContentType(filename),
+ restricted=private)
+ finally:
+ # Remove the temporary file. getFile() closes the file
+ # object.
+ os.remove(out_file_name)
+
+ return library_file.id
+
+ d = self.slave.getFile(file_sha1, out_file)
+ d.addCallback(got_file, filename, out_file, out_file_name)
+ return d
+
+ def _getSlaveScannerLogger(self):
+ """Return the logger instance from buildd-slave-scanner.py."""
+ # XXX cprov 20071120: Ideally the Launchpad logging system
+ # should be able to configure the root-logger instead of creating
+ # a new object, then the logger lookups won't require the specific
+ # name argument anymore. See bug 164203.
+ logger = logging.getLogger('slave-scanner')
+ return logger
class Builder(SQLBase):
@@ -436,7 +791,8 @@
# ...we'll set it based on our current job.
self._current_build_behavior = (
currentjob.required_build_behavior)
- self._current_build_behavior.setBuilder(self)
+ self._current_build_behavior.setBuilderInteractor(
+ BuilderInteractor(self))
return self._current_build_behavior
elif self._current_build_behavior is None:
# If we don't have a current job or an idle behavior
@@ -454,7 +810,8 @@
"""Set the current build behavior."""
self._current_build_behavior = new_behavior
if self._current_build_behavior is not None:
- self._current_build_behavior.setBuilder(self)
+ self._current_build_behavior.setBuilderInteractor(
+ BuilderInteractor(self))
current_build_behavior = property(
_getCurrentBuildBehavior, _setCurrentBuildBehavior)
@@ -479,18 +836,6 @@
self.failure_count = 0
self._clean_currentjob_cache()
- def rescueIfLost(self, logger=None):
- """See `IBuilder`."""
- return rescueBuilderIfLost(self, logger)
-
- def updateStatus(self, logger=None):
- """See `IBuilder`."""
- return updateBuilderStatus(self, logger)
-
- def cleanSlave(self):
- """See IBuilder."""
- return self.slave.clean()
-
@cachedproperty
def currentjob(self):
"""See IBuilder"""
@@ -499,84 +844,6 @@
def _clean_currentjob_cache(self):
del get_property_cache(self).currentjob
- def requestAbort(self):
- """See IBuilder."""
- return self.slave.abort()
-
- def resumeSlaveHost(self):
- """See IBuilder."""
- if not self.virtualized:
- return defer.fail(CannotResumeHost('Builder is not virtualized.'))
-
- if not self.vm_host:
- return defer.fail(CannotResumeHost('Undefined vm_host.'))
-
- logger = self._getSlaveScannerLogger()
- logger.info("Resuming %s (%s)" % (self.name, self.url))
-
- d = self.slave.resume()
-
- def got_resume_ok((stdout, stderr, returncode)):
- return stdout, stderr
-
- def got_resume_bad(failure):
- stdout, stderr, code = failure.value
- raise CannotResumeHost(
- "Resuming failed:\nOUT:\n%s\nERR:\n%s\n" % (stdout, stderr))
-
- return d.addCallback(got_resume_ok).addErrback(got_resume_bad)
-
- @cachedproperty
- def slave(self):
- """See IBuilder."""
- if self.virtualized:
- timeout = config.builddmaster.virtualized_socket_timeout
- else:
- timeout = config.builddmaster.socket_timeout
- return BuilderSlave.makeBuilderSlave(self.url, self.vm_host, timeout)
-
- def startBuild(self, build_queue_item, logger):
- """See IBuilder."""
- self.current_build_behavior = build_queue_item.required_build_behavior
- self.current_build_behavior.logStartBuild(logger)
-
- # Make sure the request is valid; an exception is raised if it's not.
- self.current_build_behavior.verifyBuildRequest(logger)
-
- # Set the build behavior depending on the provided build queue item.
- if not self.builderok:
- raise BuildDaemonError(
- "Attempted to start a build on a known-bad builder.")
-
- # If we are building a virtual build, resume the virtual machine.
- if self.virtualized:
- d = self.resumeSlaveHost()
- else:
- d = defer.succeed(None)
-
- def ping_done(ignored):
- return self.current_build_behavior.dispatchBuildToSlave(
- build_queue_item.id, logger)
-
- def resume_done(ignored):
- # Before we try and contact the resumed slave, we're going
- # to send it a message. This is to ensure it's accepting
- # packets from the outside world, because testing has shown
- # that the first packet will randomly fail for no apparent
- # reason. This could be a quirk of the Xen guest, we're not
- # sure. We also don't care about the result from this message,
- # just that it's sent, hence the "addBoth".
- # See bug 586359.
- if self.virtualized:
- d = self.slave.echo("ping")
- else:
- d = defer.succeed(None)
- d.addBoth(ping_done)
- return d
-
- d.addCallback(resume_done)
- return d
-
def failBuilder(self, reason):
"""See IBuilder"""
# XXX cprov 2007-04-17: ideally we should be able to notify the
@@ -601,94 +868,6 @@
return getUtility(IBuildFarmJobSet).getBuildsForBuilder(
self, status=build_state, user=user)
- def slaveStatus(self):
- """See IBuilder."""
- d = self.slave.status()
-
- def got_status(status_sentence):
- status = {'builder_status': status_sentence[0]}
-
- # Extract detailed status and log information if present.
- # Although build_id is also easily extractable here, there is no
- # valid reason for anything to use it, so we exclude it.
- if status['builder_status'] == 'BuilderStatus.WAITING':
- status['build_status'] = status_sentence[1]
- else:
- if status['builder_status'] == 'BuilderStatus.BUILDING':
- status['logtail'] = status_sentence[2]
-
- self.current_build_behavior.updateSlaveStatus(
- status_sentence, status)
- return status
-
- return d.addCallback(got_status)
-
- def slaveStatusSentence(self):
- """See IBuilder."""
- return self.slave.status()
-
- def verifySlaveBuildCookie(self, slave_build_id):
- """See `IBuilder`."""
- return self.current_build_behavior.verifySlaveBuildCookie(
- slave_build_id)
-
- def updateBuild(self, queueItem):
- """See `IBuilder`."""
- return self.current_build_behavior.updateBuild(queueItem)
-
- def transferSlaveFileToLibrarian(self, file_sha1, filename, private):
- """See IBuilder."""
- out_file_fd, out_file_name = tempfile.mkstemp(suffix=".buildlog")
- out_file = os.fdopen(out_file_fd, "r+")
-
- def got_file(ignored, filename, out_file, out_file_name):
- try:
- # If the requested file is the 'buildlog' compress it
- # using gzip before storing in Librarian.
- if file_sha1 == 'buildlog':
- out_file = open(out_file_name)
- filename += '.gz'
- out_file_name += '.gz'
- gz_file = gzip.GzipFile(out_file_name, mode='wb')
- copy_and_close(out_file, gz_file)
- os.remove(out_file_name.replace('.gz', ''))
-
- # Reopen the file, seek to its end position, count and seek
- # to beginning, ready for adding to the Librarian.
- out_file = open(out_file_name)
- out_file.seek(0, 2)
- bytes_written = out_file.tell()
- out_file.seek(0)
-
- library_file = getUtility(ILibraryFileAliasSet).create(
- filename, bytes_written, out_file,
- contentType=filenameToContentType(filename),
- restricted=private)
- finally:
- # Remove the temporary file. getFile() closes the file
- # object.
- os.remove(out_file_name)
-
- return library_file.id
-
- d = self.slave.getFile(file_sha1, out_file)
- d.addCallback(got_file, filename, out_file, out_file_name)
- return d
-
- def isAvailable(self):
- """See `IBuilder`."""
- if not self.builderok:
- return defer.succeed(False)
- d = self.slaveStatusSentence()
-
- def catch_fault(failure):
- failure.trap(xmlrpclib.Fault, socket.error)
- return False
-
- def check_available(status):
- return status[0] == 'BuilderStatus.IDLE'
- return d.addCallbacks(check_available, catch_fault)
-
def _getSlaveScannerLogger(self):
"""Return the logger instance from buildd-slave-scanner.py."""
# XXX cprov 20071120: Ideally the Launchpad logging system
@@ -699,20 +878,7 @@
return logger
def acquireBuildCandidate(self):
- """Acquire a build candidate in an atomic fashion.
-
- When retrieiving a candidate we need to mark it as building
- immediately so that it is not dispatched by another builder in the
- build manager.
-
- We can consider this to be atomic because although the build manager
- is a Twisted app and gives the appearance of doing lots of things at
- once, it's still single-threaded so no more than one builder scan
- can be in this code at the same time.
-
- If there's ever more than one build manager running at once, then
- this code will need some sort of mutex.
- """
+ """See `IBuilder`."""
candidate = self._findBuildCandidate()
if candidate is not None:
candidate.markAsBuilding(self)
@@ -789,20 +955,6 @@
return None
- def _dispatchBuildCandidate(self, candidate):
- """Dispatch the pending job to the associated buildd slave.
-
- This method can only be executed in the builddmaster machine, since
- it will actually issues the XMLRPC call to the buildd-slave.
-
- :param candidate: The job to dispatch.
- """
- logger = self._getSlaveScannerLogger()
- # Using maybeDeferred ensures that any exceptions are also
- # wrapped up and caught later.
- d = defer.maybeDeferred(self.startBuild, candidate, logger)
- return d
-
def handleFailure(self, logger):
"""See IBuilder."""
self.gotFailure()
@@ -818,42 +970,6 @@
"Builder %s failure count: %s" % (
self.name, self.failure_count))
- def resetOrFail(self, logger, exception):
- """See IBuilder."""
- error_message = str(exception)
- if self.virtualized:
- # Virtualized/PPA builder: attempt a reset, unless the failure
- # was itself a failure to reset. (In that case, the slave
- # scanner will try again until we reach the failure threshold.)
- if not isinstance(exception, CannotResumeHost):
- logger.warn(
- "Resetting builder: %s -- %s" % (self.url, error_message),
- exc_info=True)
- return self.resumeSlaveHost()
- else:
- # XXX: This should really let the failure bubble up to the
- # scan() method that does the failure counting.
- # Mark builder as 'failed'.
- logger.warn(
- "Disabling builder: %s -- %s" % (self.url, error_message))
- self.failBuilder(error_message)
- return defer.succeed(None)
-
- def findAndStartJob(self):
- """See IBuilder."""
- # XXX This method should be removed in favour of two separately
- # called methods that find and dispatch the job. It will
- # require a lot of test fixing.
- logger = self._getSlaveScannerLogger()
- candidate = self.acquireBuildCandidate()
-
- if candidate is None:
- logger.debug("No build candidates available for builder.")
- return defer.succeed(None)
-
- d = self._dispatchBuildCandidate(candidate)
- return d.addCallback(lambda ignored: candidate)
-
def getBuildQueue(self):
"""See `IBuilder`."""
# Return a single BuildQueue for the builder provided it's
=== modified file 'lib/lp/buildmaster/model/buildfarmjobbehavior.py'
--- lib/lp/buildmaster/model/buildfarmjobbehavior.py 2013-08-06 15:28:14 +0000
+++ lib/lp/buildmaster/model/buildfarmjobbehavior.py 2013-08-22 01:38:28 +0000
@@ -58,9 +58,10 @@
def build(self):
return self.buildfarmjob.build
- def setBuilder(self, builder):
+ def setBuilderInteractor(self, interactor):
"""The builder should be set once and not changed."""
- self._builder = builder
+ self._interactor = interactor
+ self._builder = interactor.builder
def verifyBuildRequest(self, logger):
"""The default behavior is a no-op."""
@@ -89,19 +90,18 @@
timestamp = now.strftime("%Y%m%d-%H%M%S")
return '%s-%s' % (timestamp, build_cookie)
- @staticmethod
- def getLogFromSlave(build, queue_item):
+ def getLogFromSlave(self, queue_item):
"""See `IPackageBuild`."""
- d = queue_item.builder.transferSlaveFileToLibrarian(
+ d = self._interactor.transferSlaveFileToLibrarian(
SLAVE_LOG_FILENAME, queue_item.getLogFileName(),
- build.is_private)
+ self.build.is_private)
return d
@defer.inlineCallbacks
def storeLogFromSlave(self, build_queue=None):
"""See `IBuildFarmJob`."""
lfa_id = yield self.getLogFromSlave(
- self.build, build_queue or self.build.buildqueue_record)
+ build_queue or self.build.buildqueue_record)
self.build.setLog(lfa_id)
transaction.commit()
@@ -109,7 +109,7 @@
"""See `IBuildFarmJobBehavior`."""
logger = logging.getLogger('slave-scanner')
- d = self._builder.slaveStatus()
+ d = self._interactor.slaveStatus()
def got_failure(failure):
failure.trap(xmlrpclib.Fault, socket.error)
@@ -189,7 +189,7 @@
Clean the builder for another jobs.
"""
- d = queueItem.builder.cleanSlave()
+ d = self._interactor.cleanSlave()
def got_cleaned(ignored):
queueItem.builder = None
@@ -276,7 +276,7 @@
if build.build_farm_job_type == BuildFarmJobType.PACKAGEBUILD:
build = build.buildqueue_record.specific_job.build
if not build.current_source_publication:
- yield self.build.buildqueue_record.builder.cleanSlave()
+ yield self._interactor.cleanSlave()
build.updateStatus(BuildStatus.SUPERSEDED)
self.build.buildqueue_record.destroySelf()
return
@@ -305,7 +305,6 @@
grab_dir, str(build.archive.id), build.distribution.name)
os.makedirs(upload_path)
- slave = removeSecurityProxy(build.buildqueue_record.builder.slave)
successful_copy_from_slave = True
filenames_to_download = {}
for filename in filemap:
@@ -321,7 +320,7 @@
"for the build %d." % (filename, build.id))
break
filenames_to_download[filemap[filename]] = out_file_name
- yield slave.getFiles(filenames_to_download)
+ yield self._interactor.slave.getFiles(filenames_to_download)
status = (
BuildStatus.UPLOADING if successful_copy_from_slave
@@ -353,7 +352,7 @@
if not os.path.exists(target_dir):
os.mkdir(target_dir)
- yield self.build.buildqueue_record.builder.cleanSlave()
+ yield self._interactor.cleanSlave()
self.build.buildqueue_record.destroySelf()
transaction.commit()
@@ -379,7 +378,7 @@
yield self.storeLogFromSlave()
if send_notification:
self.build.notify()
- yield self.build.buildqueue_record.builder.cleanSlave()
+ yield self._interactor.cleanSlave()
self.build.buildqueue_record.destroySelf()
transaction.commit()
@@ -423,17 +422,17 @@
If the build was explicitly cancelled, then mark it as such.
Otherwise, the build has failed in some unexpected way.
"""
- builder = self.build.buildqueue_record.builder
if self.build.status == BuildStatus.CANCELLING:
self.build.buildqueue_record.cancel()
transaction.commit()
- yield builder.cleanSlave()
+ yield self._interactor.cleanSlave()
else:
self.build.buildqueue_record.reset()
try:
- builder.handleFailure(logger)
- yield builder.resetOrFail(logger, BuildSlaveFailure(
- "Builder unexpectedly returned ABORTED"))
+ self._builder.handleFailure(logger)
+ yield self._interactor.resetOrFail(
+ logger,
+ BuildSlaveFailure("Builder unexpectedly returned ABORTED"))
except Exception as e:
# We've already done our best to mark the builder as failed.
logger.error("Failed to rescue from ABORTED: %s" % e)
@@ -448,7 +447,7 @@
later, the build records is delayed by reducing the lastscore to
ZERO.
"""
- yield self.build.buildqueue_record.builder.cleanSlave()
+ yield self._interactor.cleanSlave()
self.build.buildqueue_record.reset()
transaction.commit()
=== modified file 'lib/lp/buildmaster/tests/mock_slaves.py'
--- lib/lp/buildmaster/tests/mock_slaves.py 2012-10-01 05:18:25 +0000
+++ lib/lp/buildmaster/tests/mock_slaves.py 2013-08-22 01:38:28 +0000
@@ -36,15 +36,9 @@
CannotFetchFile,
CorruptBuildCookie,
)
-from lp.buildmaster.model.builder import (
- BuilderSlave,
- rescueBuilderIfLost,
- updateBuilderStatus,
- )
+from lp.buildmaster.model.builder import BuilderSlave
+from lp.buildmaster.model.buildfarmjobbehavior import IdleBuildBehavior
from lp.services.config import config
-from lp.soyuz.model.binarypackagebuildbehavior import (
- BinaryPackageBuildBehavior,
- )
from lp.testing.sampledata import I386_ARCHITECTURE_NAME
@@ -58,49 +52,22 @@
class MockBuilder:
"""Emulates a IBuilder class."""
- def __init__(self, name, slave, behavior=None):
- if behavior is None:
- self.current_build_behavior = BinaryPackageBuildBehavior(None)
- else:
- self.current_build_behavior = behavior
-
- self.slave = slave
- self.builderok = True
- self.manual = False
+ def __init__(self, name='mock-builder', behavior=None, builderok=True,
+ manual=False, virtualized=True, vm_host=None):
+ self.current_build_behavior = behavior or IdleBuildBehavior()
+ self.currentjob = None
+ self.builderok = builderok
+ self.manual = manual
self.url = 'http://fake:0000'
- slave.url = self.url
self.name = name
- self.virtualized = True
+ self.virtualized = virtualized
+ self.vm_host = vm_host
+ self.failnotes = None
def failBuilder(self, reason):
self.builderok = False
self.failnotes = reason
- def slaveStatusSentence(self):
- return self.slave.status()
-
- def verifySlaveBuildCookie(self, slave_build_id):
- return self.current_build_behavior.verifySlaveBuildCookie(
- slave_build_id)
-
- def cleanSlave(self):
- return self.slave.clean()
-
- def requestAbort(self):
- return self.slave.abort()
-
- def resumeSlave(self, logger):
- return ('out', 'err')
-
- def checkSlaveAlive(self):
- pass
-
- def rescueIfLost(self, logger=None):
- return rescueBuilderIfLost(self, logger)
-
- def updateStatus(self, logger=None):
- return defer.maybeDeferred(updateBuilderStatus, self, logger)
-
# XXX: It would be *really* nice to run some set of tests against the real
# BuilderSlave and this one to prevent interface skew.
=== modified file 'lib/lp/buildmaster/tests/test_builder.py'
--- lib/lp/buildmaster/tests/test_builder.py 2013-08-06 15:28:14 +0000
+++ lib/lp/buildmaster/tests/test_builder.py 2013-08-22 01:38:28 +0000
@@ -40,6 +40,7 @@
)
from lp.buildmaster.interfaces.buildqueue import IBuildQueueSet
from lp.buildmaster.model.builder import (
+ BuilderInteractor,
BuilderSlave,
ProxyWithConnectionTimeout,
)
@@ -140,74 +141,12 @@
super(TestBuilder, self).setUp()
self.slave_helper = self.useFixture(SlaveTestHelpers())
- def test_updateStatus_aborts_lost_and_broken_slave(self):
- # A slave that's 'lost' should be aborted; when the slave is
- # broken then abort() should also throw a fault.
- slave = LostBuildingBrokenSlave()
- lostbuilding_builder = MockBuilder(
- 'Lost Building Broken Slave', slave, behavior=CorruptBehavior())
- d = lostbuilding_builder.updateStatus(BufferLogger())
-
- def check_slave_status(failure):
- self.assertIn('abort', slave.call_log)
- # 'Fault' comes from the LostBuildingBrokenSlave, this is
- # just testing that the value is passed through.
- self.assertIsInstance(failure.value, xmlrpclib.Fault)
-
- return d.addBoth(check_slave_status)
-
- def test_resumeSlaveHost_nonvirtual(self):
- builder = self.factory.makeBuilder(virtualized=False)
- d = builder.resumeSlaveHost()
- return assert_fails_with(d, CannotResumeHost)
-
- def test_resumeSlaveHost_no_vmhost(self):
- builder = self.factory.makeBuilder(virtualized=True, vm_host=None)
- d = builder.resumeSlaveHost()
- return assert_fails_with(d, CannotResumeHost)
-
- def test_resumeSlaveHost_success(self):
- reset_config = """
- [builddmaster]
- vm_resume_command: /bin/echo -n parp"""
- config.push('reset', reset_config)
- self.addCleanup(config.pop, 'reset')
-
- builder = self.factory.makeBuilder(virtualized=True, vm_host="pop")
- d = builder.resumeSlaveHost()
-
- def got_resume(output):
- self.assertEqual(('parp', ''), output)
-
- return d.addCallback(got_resume)
-
- def test_resumeSlaveHost_command_failed(self):
- reset_fail_config = """
- [builddmaster]
- vm_resume_command: /bin/false"""
- config.push('reset fail', reset_fail_config)
- self.addCleanup(config.pop, 'reset fail')
- builder = self.factory.makeBuilder(virtualized=True, vm_host="pop")
- d = builder.resumeSlaveHost()
- return assert_fails_with(d, CannotResumeHost)
-
def test_handleFailure_increments_failure_count(self):
builder = self.factory.makeBuilder(virtualized=False)
builder.builderok = True
builder.handleFailure(BufferLogger())
self.assertEqual(1, builder.failure_count)
- def test_resetOrFail_resume_failure(self):
- reset_fail_config = """
- [builddmaster]
- vm_resume_command: /bin/false"""
- config.push('reset fail', reset_fail_config)
- self.addCleanup(config.pop, 'reset fail')
- builder = self.factory.makeBuilder(virtualized=True, vm_host="pop")
- builder.builderok = True
- d = builder.resetOrFail(BufferLogger(), Exception())
- return assert_fails_with(d, CannotResumeHost)
-
def _setupBuilder(self):
processor = self.factory.makeProcessor(name="i386")
builder = self.factory.makeBuilder(
@@ -247,7 +186,7 @@
# findAndStartJob delegates to it.
removeSecurityProxy(builder)._findBuildCandidate = FakeMethod(
result=candidate)
- d = builder.findAndStartJob()
+ d = BuilderInteractor(builder).findAndStartJob()
return d.addCallback(self.assertEqual, candidate)
def test_findAndStartJob_starts_job(self):
@@ -258,7 +197,7 @@
candidate = build.queueBuild()
removeSecurityProxy(builder)._findBuildCandidate = FakeMethod(
result=candidate)
- d = builder.findAndStartJob()
+ d = BuilderInteractor(builder).findAndStartJob()
def check_build_started(candidate):
self.assertEqual(candidate.builder, builder)
@@ -273,26 +212,123 @@
candidate = build.queueBuild()
removeSecurityProxy(builder)._findBuildCandidate = FakeMethod(
result=candidate)
- d = builder.findAndStartJob()
+ interactor = BuilderInteractor(builder)
+ d = interactor.findAndStartJob()
def check_build_started(candidate):
self.assertIn(
- ('echo', 'ping'), removeSecurityProxy(builder.slave).call_log)
+ ('echo', 'ping'), interactor.slave.call_log)
return d.addCallback(check_build_started)
+ def test_recover_building_slave_with_job_that_finished_elsewhere(self):
+ # See bug 671242
+ # When a job is destroyed, the builder's behaviour should be reset
+ # too so that we don't traceback when the wrong behaviour tries
+ # to access a non-existent job.
+ builder, build = self._setupBinaryBuildAndBuilder()
+ candidate = build.queueBuild()
+ building_slave = BuildingSlave()
+ self.patch(
+ BuilderSlave, 'makeBuilderSlave', FakeMethod(building_slave))
+ candidate.markAsBuilding(builder)
+
+ # At this point we should see a valid behaviour on the builder:
+ self.assertFalse(
+ zope_isinstance(
+ builder.current_build_behavior, IdleBuildBehavior))
+
+ # Now reset the job and try to rescue the builder.
+ candidate.destroySelf()
+ self.layer.txn.commit()
+ builder = getUtility(IBuilderSet)[builder.name]
+ d = BuilderInteractor(builder).rescueIfLost()
+
+ def check_builder(ignored):
+ self.assertIsInstance(
+ removeSecurityProxy(builder.current_build_behavior),
+ IdleBuildBehavior)
+
+ return d.addCallback(check_builder)
+
+
+class TestBuilderInteractor(TestCase):
+
+ run_tests_with = AsynchronousDeferredRunTest.make_factory(timeout=10)
+
+ def test_updateStatus_aborts_lost_and_broken_slave(self):
+ # A slave that's 'lost' should be aborted; when the slave is
+ # broken then abort() should also throw a fault.
+ slave = LostBuildingBrokenSlave()
+ lostbuilding_builder = MockBuilder(behavior=CorruptBehavior())
+ d = BuilderInteractor(lostbuilding_builder, slave).updateStatus(
+ BufferLogger())
+
+ def check_slave_status(failure):
+ self.assertIn('abort', slave.call_log)
+ # 'Fault' comes from the LostBuildingBrokenSlave, this is
+ # just testing that the value is passed through.
+ self.assertIsInstance(failure.value, xmlrpclib.Fault)
+
+ return d.addBoth(check_slave_status)
+
+ def test_resumeSlaveHost_nonvirtual(self):
+ builder = MockBuilder(virtualized=False)
+ d = BuilderInteractor(builder).resumeSlaveHost()
+ return assert_fails_with(d, CannotResumeHost)
+
+ def test_resumeSlaveHost_no_vmhost(self):
+ builder = MockBuilder(virtualized=True, vm_host=None)
+ d = BuilderInteractor(builder).resumeSlaveHost()
+ return assert_fails_with(d, CannotResumeHost)
+
+ def test_resumeSlaveHost_success(self):
+ reset_config = """
+ [builddmaster]
+ vm_resume_command: /bin/echo -n parp"""
+ config.push('reset', reset_config)
+ self.addCleanup(config.pop, 'reset')
+
+ builder = MockBuilder(virtualized=True, vm_host="pop")
+ d = BuilderInteractor(builder).resumeSlaveHost()
+
+ def got_resume(output):
+ self.assertEqual(('parp', ''), output)
+
+ return d.addCallback(got_resume)
+
+ def test_resumeSlaveHost_command_failed(self):
+ reset_fail_config = """
+ [builddmaster]
+ vm_resume_command: /bin/false"""
+ config.push('reset fail', reset_fail_config)
+ self.addCleanup(config.pop, 'reset fail')
+ builder = MockBuilder(virtualized=True, vm_host="pop")
+ d = BuilderInteractor(builder).resumeSlaveHost()
+ return assert_fails_with(d, CannotResumeHost)
+
+ def test_resetOrFail_resume_failure(self):
+ reset_fail_config = """
+ [builddmaster]
+ vm_resume_command: /bin/false"""
+ config.push('reset fail', reset_fail_config)
+ self.addCleanup(config.pop, 'reset fail')
+ builder = MockBuilder(virtualized=True, vm_host="pop", builderok=True)
+ d = BuilderInteractor(builder).resetOrFail(BufferLogger(), Exception())
+ return assert_fails_with(d, CannotResumeHost)
+
def test_slave(self):
# Builder.slave is a BuilderSlave that points at the actual Builder.
# The Builder is only ever used in scripts that run outside of the
# security context.
- builder = removeSecurityProxy(
- self.factory.makeBuilder(virtualized=False))
- self.assertEqual(builder.url, builder.slave.url)
- self.assertEqual(10, builder.slave.timeout)
+ builder = MockBuilder(virtualized=False)
+ interactor = BuilderInteractor(builder)
+ self.assertEqual(builder.url, interactor.slave.url)
+ self.assertEqual(10, interactor.slave.timeout)
- builder = removeSecurityProxy(
- self.factory.makeBuilder(virtualized=True))
- self.assertEqual(5, builder.slave.timeout)
+ builder = MockBuilder(virtualized=True)
+ interactor = BuilderInteractor(builder)
+ self.assertEqual(5, interactor.slave.timeout)
def test_recovery_of_aborted_virtual_slave(self):
# If a virtual_slave is in the ABORTED state,
@@ -300,9 +336,7 @@
# currently building anything.
# See bug 463046.
aborted_slave = AbortedSlave()
- builder = MockBuilder("mock_builder", aborted_slave)
- builder.currentjob = None
- d = builder.rescueIfLost()
+ d = BuilderInteractor(MockBuilder(), aborted_slave).rescueIfLost()
def check_slave_calls(ignored):
self.assertIn('clean', aborted_slave.call_log)
@@ -314,11 +348,8 @@
# cleaned since the sbuild process doesn't properly kill the
# build job. We test that the builder is marked failed.
aborted_slave = AbortedSlave()
- builder = MockBuilder("mock_builder", aborted_slave)
- builder.currentjob = None
- builder.virtualized = False
- builder.builderok = True
- d = builder.rescueIfLost()
+ builder = MockBuilder(virtualized=False, builderok=True)
+ d = BuilderInteractor(builder, aborted_slave).rescueIfLost()
def check_failed(ignored):
self.assertFalse(builder.builderok)
@@ -329,8 +360,8 @@
def test_recover_ok_slave(self):
# An idle slave is not rescued.
slave = OkSlave()
- builder = MockBuilder("mock_builder", slave, TrivialBehavior())
- d = builder.rescueIfLost()
+ builder = MockBuilder(behavior=TrivialBehavior())
+ d = BuilderInteractor(builder, slave).rescueIfLost()
def check_slave_calls(ignored):
self.assertNotIn('abort', slave.call_log)
@@ -342,8 +373,8 @@
# rescueIfLost does not attempt to abort or clean a builder that is
# WAITING.
waiting_slave = WaitingSlave()
- builder = MockBuilder("mock_builder", waiting_slave, TrivialBehavior())
- d = builder.rescueIfLost()
+ builder = MockBuilder(behavior=TrivialBehavior())
+ d = BuilderInteractor(builder, waiting_slave).rescueIfLost()
def check_slave_calls(ignored):
self.assertNotIn('abort', waiting_slave.call_log)
@@ -358,8 +389,8 @@
# builder is reset for a new build, and the corrupt build is
# discarded.
waiting_slave = WaitingSlave()
- builder = MockBuilder("mock_builder", waiting_slave, CorruptBehavior())
- d = builder.rescueIfLost()
+ builder = MockBuilder(behavior=CorruptBehavior())
+ d = BuilderInteractor(builder, waiting_slave).rescueIfLost()
def check_slave_calls(ignored):
self.assertNotIn('abort', waiting_slave.call_log)
@@ -371,9 +402,8 @@
# rescueIfLost does not attempt to abort or clean a builder that is
# BUILDING.
building_slave = BuildingSlave()
- builder = MockBuilder(
- "mock_builder", building_slave, TrivialBehavior())
- d = builder.rescueIfLost()
+ builder = MockBuilder(behavior=TrivialBehavior())
+ d = BuilderInteractor(builder, building_slave).rescueIfLost()
def check_slave_calls(ignored):
self.assertNotIn('abort', building_slave.call_log)
@@ -385,9 +415,8 @@
# If a slave is BUILDING with a build id we don't recognize, then we
# abort the build, thus stopping it in its tracks.
building_slave = BuildingSlave()
- builder = MockBuilder(
- "mock_builder", building_slave, CorruptBehavior())
- d = builder.rescueIfLost()
+ builder = MockBuilder(behavior=CorruptBehavior())
+ d = BuilderInteractor(builder, building_slave).rescueIfLost()
def check_slave_calls(ignored):
self.assertIn('abort', building_slave.call_log)
@@ -395,54 +424,17 @@
return d.addCallback(check_slave_calls)
- def test_recover_building_slave_with_job_that_finished_elsewhere(self):
- # See bug 671242
- # When a job is destroyed, the builder's behaviour should be reset
- # too so that we don't traceback when the wrong behaviour tries
- # to access a non-existent job.
- builder, build = self._setupBinaryBuildAndBuilder()
- candidate = build.queueBuild()
- building_slave = BuildingSlave()
- self.patch(
- BuilderSlave, 'makeBuilderSlave', FakeMethod(building_slave))
- candidate.markAsBuilding(builder)
-
- # At this point we should see a valid behaviour on the builder:
- self.assertFalse(
- zope_isinstance(
- builder.current_build_behavior, IdleBuildBehavior))
-
- # Now reset the job and try to rescue the builder.
- candidate.destroySelf()
- self.layer.txn.commit()
- builder = getUtility(IBuilderSet)[builder.name]
- d = builder.rescueIfLost()
-
- def check_builder(ignored):
- self.assertIsInstance(
- removeSecurityProxy(builder.current_build_behavior),
- IdleBuildBehavior)
-
- return d.addCallback(check_builder)
-
-
-class TestBuilderSlaveStatus(TestCaseWithFactory):
- # Verify what IBuilder.slaveStatus returns with slaves in different
- # states.
-
- layer = LaunchpadZopelessLayer
+
+class TestBuilderInteractorSlaveStatus(TestCase):
+ # Verify what BuilderInteractor.slaveStatus returns with slaves in
+ # different states.
+
run_tests_with = AsynchronousDeferredRunTest
- def setUp(self):
- super(TestBuilderSlaveStatus, self).setUp()
- self.slave_helper = self.useFixture(SlaveTestHelpers())
-
def assertStatus(self, slave, builder_status=None,
build_status=None, logtail=False, filemap=None,
dependencies=None):
- builder = self.factory.makeBuilder()
- self.patch(BuilderSlave, 'makeBuilderSlave', FakeMethod(slave))
- d = builder.slaveStatus()
+ d = BuilderInteractor(MockBuilder(), slave).slaveStatus()
def got_status(status_dict):
expected = {}
@@ -487,23 +479,17 @@
def test_isAvailable_with_not_builderok(self):
# isAvailable() is a wrapper around slaveStatusSentence()
- builder = self.factory.makeBuilder()
+ builder = MockBuilder()
builder.builderok = False
- d = builder.isAvailable()
+ d = BuilderInteractor(builder).isAvailable()
return d.addCallback(self.assertFalse)
def test_isAvailable_with_slave_fault(self):
- builder = self.factory.makeBuilder()
- self.patch(
- BuilderSlave, 'makeBuilderSlave', FakeMethod(BrokenSlave()))
- d = builder.isAvailable()
+ d = BuilderInteractor(MockBuilder(), BrokenSlave()).isAvailable()
return d.addCallback(self.assertFalse)
def test_isAvailable_with_slave_idle(self):
- builder = self.factory.makeBuilder()
- self.patch(
- BuilderSlave, 'makeBuilderSlave', FakeMethod(OkSlave()))
- d = builder.isAvailable()
+ d = BuilderInteractor(MockBuilder(), OkSlave()).isAvailable()
return d.addCallback(self.assertTrue)
=== modified file 'lib/lp/buildmaster/tests/test_manager.py'
--- lib/lp/buildmaster/tests/test_manager.py 2013-07-27 12:02:13 +0000
+++ lib/lp/buildmaster/tests/test_manager.py 2013-08-22 01:38:28 +0000
@@ -35,6 +35,7 @@
)
from lp.buildmaster.model.builder import (
Builder,
+ BuilderInteractor,
BuilderSlave,
)
from lp.buildmaster.tests.harness import BuilddManagerTestSetup
@@ -454,6 +455,7 @@
self.builder.virtualized = True
self.scanner = SlaveScanner(builder_name, BufferLogger())
self.scanner.builder = self.builder
+ self.scanner.interactor = BuilderInteractor(self.builder)
self.scanner.logger.name = 'slave-scanner'
def test_ignores_nonvirtual(self):
=== modified file 'lib/lp/code/model/recipebuilder.py'
--- lib/lp/code/model/recipebuilder.py 2013-01-09 07:58:55 +0000
+++ lib/lp/code/model/recipebuilder.py 2013-08-22 01:38:28 +0000
@@ -137,7 +137,7 @@
distroarchseries.displayname)
logger.info(
"Sending chroot file for recipe build to %s" % self._builder.name)
- d = self._builder.slave.cacheFile(logger, chroot)
+ d = self._interactor.slave.cacheFile(logger, chroot)
def got_cache_file(ignored):
# Generate a string which can be used to cross-check when
@@ -149,7 +149,7 @@
logger.info(
"Initiating build %s on %s" % (buildid, self._builder.url))
- return self._builder.slave.build(
+ return self._interactor.slave.build(
cookie, "sourcepackagerecipe", chroot_sha1, {}, args)
def log_build_result((status, info)):
=== modified file 'lib/lp/code/model/tests/test_recipebuilder.py'
--- lib/lp/code/model/tests/test_recipebuilder.py 2013-03-12 05:51:28 +0000
+++ lib/lp/code/model/tests/test_recipebuilder.py 2013-08-22 01:38:28 +0000
@@ -28,7 +28,10 @@
from lp.buildmaster.interfaces.buildfarmjobbehavior import (
IBuildFarmJobBehavior,
)
-from lp.buildmaster.model.builder import BuilderSlave
+from lp.buildmaster.model.builder import (
+ BuilderInteractor,
+ BuilderSlave,
+ )
from lp.buildmaster.model.buildqueue import BuildQueue
from lp.buildmaster.tests.mock_slaves import (
MockBuilder,
@@ -123,8 +126,8 @@
# VerifyBuildRequest won't raise any exceptions when called with a
# valid builder set.
job = self.makeJob()
- builder = MockBuilder("bob-de-bouwer", OkSlave())
- job.setBuilder(builder)
+ builder = MockBuilder("bob-de-bouwer")
+ job.setBuilderInteractor(BuilderInteractor(builder, OkSlave()))
logger = BufferLogger()
job.verifyBuildRequest(logger)
self.assertEquals("", logger.getLogBuffer())
@@ -132,9 +135,9 @@
def test_verifyBuildRequest_non_virtual(self):
# verifyBuildRequest will raise if a non-virtual builder is proposed.
job = self.makeJob()
- builder = MockBuilder('non-virtual builder', OkSlave())
+ builder = MockBuilder('non-virtual builder')
builder.virtualized = False
- job.setBuilder(builder)
+ job.setBuilderInteractor(BuilderInteractor(builder, OkSlave()))
logger = BufferLogger()
e = self.assertRaises(AssertionError, job.verifyBuildRequest, logger)
self.assertEqual(
@@ -146,7 +149,8 @@
pocket=PackagePublishingPocket.SECURITY)
job = self.factory.makeSourcePackageRecipeBuildJob(recipe_build=build)
job = IBuildFarmJobBehavior(job.specific_job)
- job.setBuilder(MockBuilder("bob-de-bouwer", OkSlave()))
+ job.setBuilderInteractor(
+ BuilderInteractor(MockBuilder("bob-de-bouwer"), OkSlave()))
e = self.assertRaises(
AssertionError, job.verifyBuildRequest, BufferLogger())
self.assertIn('invalid pocket due to the series status of', str(e))
@@ -301,10 +305,10 @@
test_publisher = SoyuzTestPublisher()
test_publisher.addFakeChroots(job.build.distroseries)
slave = OkSlave()
- builder = MockBuilder("bob-de-bouwer", slave)
+ builder = MockBuilder("bob-de-bouwer")
processorfamily = ProcessorFamilySet().getByProcessorName('386')
builder.processor = processorfamily.processors[0]
- job.setBuilder(builder)
+ job.setBuilderInteractor(BuilderInteractor(builder, slave))
logger = BufferLogger()
d = defer.maybeDeferred(job.dispatchBuildToSlave, "someid", logger)
@@ -333,10 +337,10 @@
# available for the distroseries to build for.
job = self.makeJob()
#test_publisher = SoyuzTestPublisher()
- builder = MockBuilder("bob-de-bouwer", OkSlave())
+ builder = MockBuilder("bob-de-bouwer")
processorfamily = ProcessorFamilySet().getByProcessorName('386')
builder.processor = processorfamily.processors[0]
- job.setBuilder(builder)
+ job.setBuilderInteractor(BuilderInteractor(builder, OkSlave()))
logger = BufferLogger()
d = defer.maybeDeferred(job.dispatchBuildToSlave, "someid", logger)
return assert_fails_with(d, CannotBuild)
=== modified file 'lib/lp/soyuz/browser/tests/test_builder_views.py'
--- lib/lp/soyuz/browser/tests/test_builder_views.py 2013-01-29 04:43:50 +0000
+++ lib/lp/soyuz/browser/tests/test_builder_views.py 2013-08-22 01:38:28 +0000
@@ -13,7 +13,6 @@
)
import transaction
from zope.component import getUtility
-from zope.security.proxy import removeSecurityProxy
from lp.buildmaster.enums import (
BuildFarmJobType,
@@ -25,17 +24,13 @@
)
from lp.registry.interfaces.person import IPersonSet
from lp.services.database.sqlbase import flush_database_updates
-from lp.services.webapp.servers import LaunchpadTestRequest
from lp.soyuz.browser.build import getSpecificJobs
-from lp.soyuz.browser.builder import BuilderEditView
from lp.testing import (
celebrity_logged_in,
- login,
record_two_runs,
StormStatementRecorder,
TestCaseWithFactory,
)
-from lp.testing.fakemethod import FakeMethod
from lp.testing.layers import LaunchpadFunctionalLayer
from lp.testing.matchers import HasQueryCount
from lp.testing.sampledata import ADMIN_EMAIL
@@ -45,43 +40,6 @@
)
-class TestBuilderEditView(TestCaseWithFactory):
-
- layer = LaunchpadFunctionalLayer
-
- def setUp(self):
- super(TestBuilderEditView, self).setUp()
- # Login as an admin to ensure access to the view's context
- # object.
- login(ADMIN_EMAIL)
- self.builder = removeSecurityProxy(self.factory.makeBuilder())
-
- def initialize_view(self):
- form = {
- "field.manual": "on",
- "field.actions.update": "Change",
- }
- request = LaunchpadTestRequest(method="POST", form=form)
- view = BuilderEditView(self.builder, request)
- return view
-
- def test_posting_form_doesnt_call_slave_xmlrpc(self):
- # Posting the +edit for should not call isAvailable, which
- # would do xmlrpc to a slave builder and is explicitly forbidden
- # in a webapp process.
- view = self.initialize_view()
-
- # Stub out the slaveStatusSentence() method with a fake one that
- # records if it's been called.
- view.context.slaveStatusSentence = FakeMethod(result=[0])
-
- view.initialize()
-
- # If the dummy slaveStatusSentence() was called the call count
- # would not be zero.
- self.assertTrue(view.context.slaveStatusSentence.call_count == 0)
-
-
class TestgetSpecificJobs(TestCaseWithFactory):
layer = LaunchpadFunctionalLayer
=== modified file 'lib/lp/soyuz/model/binarypackagebuildbehavior.py'
--- lib/lp/soyuz/model/binarypackagebuildbehavior.py 2013-01-07 02:40:55 +0000
+++ lib/lp/soyuz/model/binarypackagebuildbehavior.py 2013-08-22 01:38:28 +0000
@@ -53,7 +53,7 @@
filemap[lfa.filename] = lfa.content.sha1
if not private:
dl.append(
- self._builder.slave.cacheFile(
+ self._interactor.slave.cacheFile(
logger, source_file.libraryfile))
d = defer.gatherResults(dl)
return d.addCallback(lambda ignored: filemap)
@@ -64,7 +64,7 @@
# Start the binary package build on the slave builder. First
# we send the chroot.
chroot = self.build.distro_arch_series.getChroot()
- d = self._builder.slave.cacheFile(logger, chroot)
+ d = self._interactor.slave.cacheFile(logger, chroot)
d.addCallback(self._buildFilemapStructure, logger)
def got_filemap(filemap):
@@ -78,7 +78,7 @@
"Initiating build %s on %s" % (buildid, self._builder.url))
args = self._extraBuildArgs(self.build)
- d = self._builder.slave.build(
+ d = self._interactor.slave.build(
cookie, "binarypackage", chroot_sha1, filemap, args)
def got_build((status, info)):
message = """%s (%s):
@@ -190,7 +190,7 @@
"(%s, %s)" % (
self._builder.url, file_name, url, sha1))
dl.append(
- self._builder.slave.sendFileToSlave(
+ self._interactor.slave.sendFileToSlave(
sha1, url, "buildd", archive.buildd_secret))
return dl
=== modified file 'lib/lp/soyuz/tests/test_binarypackagebuildbehavior.py'
--- lib/lp/soyuz/tests/test_binarypackagebuildbehavior.py 2013-07-27 12:02:13 +0000
+++ lib/lp/soyuz/tests/test_binarypackagebuildbehavior.py 2013-08-22 01:38:28 +0000
@@ -20,7 +20,10 @@
from lp.buildmaster.enums import BuildStatus
from lp.buildmaster.interfaces.builder import CannotBuild
-from lp.buildmaster.model.builder import BuilderSlave
+from lp.buildmaster.model.builder import (
+ BuilderInteractor,
+ BuilderSlave,
+ )
from lp.buildmaster.tests.mock_slaves import (
AbortedSlave,
AbortingSlave,
@@ -135,7 +138,7 @@
builder = removeSecurityProxy(builder)
candidate = removeSecurityProxy(candidate)
return defer.maybeDeferred(
- builder.startBuild, candidate, BufferLogger())
+ BuilderInteractor(builder).startBuild, candidate, BufferLogger())
def test_non_virtual_ppa_dispatch(self):
# When the BinaryPackageBuildBehavior dispatches PPA builds to
@@ -218,7 +221,7 @@
build.distro_arch_series.addOrUpdateChroot(lf)
candidate = build.queueBuild()
behavior = candidate.required_build_behavior
- behavior.setBuilder(builder)
+ behavior.setBuilderInteractor(BuilderInteractor(builder))
e = self.assertRaises(
AssertionError, behavior.verifyBuildRequest, BufferLogger())
expected_message = (
@@ -239,7 +242,7 @@
build.distro_arch_series.addOrUpdateChroot(lf)
candidate = build.queueBuild()
behavior = candidate.required_build_behavior
- behavior.setBuilder(builder)
+ behavior.setBuilderInteractor(BuilderInteractor(builder))
e = self.assertRaises(
AssertionError, behavior.verifyBuildRequest, BufferLogger())
self.assertEqual(
@@ -258,7 +261,7 @@
build.distro_arch_series.addOrUpdateChroot(lf)
candidate = build.queueBuild()
behavior = candidate.required_build_behavior
- behavior.setBuilder(builder)
+ behavior.setBuilderInteractor(BuilderInteractor(builder))
e = self.assertRaises(
AssertionError, behavior.verifyBuildRequest, BufferLogger())
self.assertEqual(
@@ -273,7 +276,7 @@
builder=builder, archive=archive)
candidate = build.queueBuild()
behavior = candidate.required_build_behavior
- behavior.setBuilder(builder)
+ behavior.setBuilderInteractor(BuilderInteractor(builder))
e = self.assertRaises(
CannotBuild, behavior.verifyBuildRequest, BufferLogger())
self.assertIn("Missing CHROOT", str(e))
@@ -315,6 +318,7 @@
switch_dbuser('testadmin')
self.builder = self.factory.makeBuilder()
+ self.interactor = BuilderInteractor(self.builder)
self.build = self.factory.makeBinaryPackageBuild(
builder=self.builder, pocket=PackagePublishingPocket.RELEASE)
lf = self.factory.makeLibraryFileAlias()
@@ -323,7 +327,7 @@
self.candidate = self.build.queueBuild()
self.candidate.markAsBuilding(self.builder)
self.behavior = self.candidate.required_build_behavior
- self.behavior.setBuilder(self.builder)
+ self.behavior.setBuilderInteractor(BuilderInteractor(self.builder))
# This is required so that uploaded files from the buildd don't
# hang around between test runs.
self.addCleanup(self._cleanup)
@@ -347,7 +351,7 @@
self.assertBuildProperties(self.build)
self.assertEqual(BuildStatus.FAILEDTOBUILD, self.build.status)
- d = self.builder.updateBuild(self.candidate)
+ d = self.interactor.updateBuild(self.candidate)
return d.addCallback(got_update)
def test_depwait_collection(self):
@@ -362,7 +366,7 @@
self.assertEqual(BuildStatus.MANUALDEPWAIT, self.build.status)
self.assertEqual(DEPENDENCIES, self.build.dependencies)
- d = self.builder.updateBuild(self.candidate)
+ d = self.interactor.updateBuild(self.candidate)
return d.addCallback(got_update)
def test_chrootfail_collection(self):
@@ -375,7 +379,7 @@
self.assertBuildProperties(self.build)
self.assertEqual(BuildStatus.CHROOTWAIT, self.build.status)
- d = self.builder.updateBuild(self.candidate)
+ d = self.interactor.updateBuild(self.candidate)
return d.addCallback(got_update)
def test_builderfail_collection(self):
@@ -393,7 +397,7 @@
job = self.candidate.specific_job.job
self.assertEqual(JobStatus.WAITING, job.status)
- d = self.builder.updateBuild(self.candidate)
+ d = self.interactor.updateBuild(self.candidate)
return d.addCallback(got_update)
def test_building_collection(self):
@@ -405,7 +409,7 @@
# The fake log is returned from the BuildingSlave() mock.
self.assertEqual("This is a build log", self.candidate.logtail)
- d = self.builder.updateBuild(self.candidate)
+ d = self.interactor.updateBuild(self.candidate)
return d.addCallback(got_update)
def test_aborted_collection(self):
@@ -416,7 +420,7 @@
def got_update(ignored):
self.assertEqual(BuildStatus.NEEDSBUILD, self.build.status)
- d = self.builder.updateBuild(self.candidate)
+ d = self.interactor.updateBuild(self.candidate)
return d.addCallback(got_update)
def test_aborting_collection(self):
@@ -429,7 +433,7 @@
"Waiting for slave process to be terminated",
self.candidate.logtail)
- d = self.builder.updateBuild(self.candidate)
+ d = self.interactor.updateBuild(self.candidate)
return d.addCallback(got_update)
def test_collection_for_deleted_source(self):
@@ -446,7 +450,7 @@
self.assertEqual(
BuildStatus.SUPERSEDED, self.build.status)
- d = self.builder.updateBuild(self.candidate)
+ d = self.interactor.updateBuild(self.candidate)
return d.addCallback(got_update)
def test_uploading_collection(self):
@@ -460,7 +464,7 @@
# upload processing succeeded.
self.assertIs(None, self.build.upload_log)
- d = self.builder.updateBuild(self.candidate)
+ d = self.interactor.updateBuild(self.candidate)
return d.addCallback(got_update)
def test_givenback_collection(self):
@@ -477,7 +481,7 @@
job = self.candidate.specific_job.job
self.assertEqual(JobStatus.WAITING, job.status)
- d = self.builder.updateBuild(self.candidate)
+ d = self.interactor.updateBuild(self.candidate)
return d.addCallback(got_update)
def test_log_file_collection(self):
@@ -520,12 +524,12 @@
orig_file_content = open(tmp_orig_file_name).read()
self.assertEqual(orig_file_content, uncompressed_file)
- d = removeSecurityProxy(self.builder.slave).getFile(
+ d = removeSecurityProxy(self.interactor.slave).getFile(
'buildlog', tmp_orig_file_name)
return d.addCallback(got_orig_log)
d = removeSecurityProxy(self.behavior).getLogFromSlave(
- self.build, self.build.buildqueue_record)
+ self.build.buildqueue_record)
return d.addCallback(got_log)
def test_private_build_log_storage(self):
@@ -547,7 +551,7 @@
self.layer.txn.commit()
self.assertTrue(self.build.log.restricted)
- d = self.builder.updateBuild(self.candidate)
+ d = self.interactor.updateBuild(self.candidate)
return d.addCallback(got_update)
=== modified file 'lib/lp/translations/model/translationtemplatesbuildbehavior.py'
--- lib/lp/translations/model/translationtemplatesbuildbehavior.py 2013-01-23 06:41:24 +0000
+++ lib/lp/translations/model/translationtemplatesbuildbehavior.py 2013-08-22 01:38:28 +0000
@@ -18,7 +18,6 @@
from twisted.internet import defer
from zope.component import getUtility
from zope.interface import implements
-from zope.security.proxy import removeSecurityProxy
from lp.app.interfaces.launchpad import ILaunchpadCelebrities
from lp.buildmaster.enums import BuildStatus
@@ -52,7 +51,7 @@
raise CannotBuild("Unable to find a chroot for %s" %
distroarchseries.displayname)
chroot_sha1 = chroot.content.sha1
- d = self._builder.slave.cacheFile(logger, chroot)
+ d = self._interactor.slave.cacheFile(logger, chroot)
def got_cache_file(ignored):
cookie = self.buildfarmjob.generateSlaveBuildCookie()
@@ -64,7 +63,7 @@
filemap = {}
- return self._builder.slave.build(
+ return self._interactor.slave.build(
cookie, self.build_type, chroot_sha1, filemap, args)
return d.addCallback(got_cache_file)
@@ -93,7 +92,7 @@
logger.error("Did not find templates tarball in slave output.")
return defer.succeed(None)
- slave = removeSecurityProxy(buildqueue.builder.slave)
+ slave = self._interactor.slave
fd, fname = tempfile.mkstemp()
tarball_file = os.fdopen(fd, 'wb')
@@ -174,6 +173,6 @@
yield self.storeLogFromSlave(build_queue=queue_item)
- yield queue_item.builder.cleanSlave()
+ yield self._interactor.cleanSlave()
queue_item.destroySelf()
transaction.commit()
=== modified file 'lib/lp/translations/tests/test_translationtemplatesbuildbehavior.py'
--- lib/lp/translations/tests/test_translationtemplatesbuildbehavior.py 2013-01-09 07:58:55 +0000
+++ lib/lp/translations/tests/test_translationtemplatesbuildbehavior.py 2013-08-22 01:38:28 +0000
@@ -20,7 +20,10 @@
IBuildFarmJobBehavior,
)
from lp.buildmaster.interfaces.buildqueue import IBuildQueueSet
-from lp.buildmaster.model.builder import BuilderSlave
+from lp.buildmaster.model.builder import (
+ BuilderInteractor,
+ BuilderSlave,
+ )
from lp.buildmaster.tests.mock_slaves import (
SlaveTestHelpers,
WaitingSlave,
@@ -82,7 +85,8 @@
branch=branch)
behavior = IBuildFarmJobBehavior(specific_job)
slave = WaitingSlave()
- behavior._builder = removeSecurityProxy(self.factory.makeBuilder())
+ behavior.setBuilderInteractor(
+ BuilderInteractor(self.factory.makeBuilder()))
self.patch(BuilderSlave, 'makeBuilderSlave', FakeMethod(slave))
if use_fake_chroot:
lf = self.factory.makeLibraryFileAlias()
@@ -139,7 +143,7 @@
def got_dispatch((status, info)):
# call_log lives on the mock WaitingSlave and tells us what
# calls to the slave that the behaviour class made.
- call_log = behavior._builder.slave.call_log
+ call_log = behavior._interactor.slave.call_log
build_params = call_log[-1]
self.assertEqual('build', build_params[0])
build_type = build_params[2]
@@ -174,7 +178,7 @@
buildqueue = FakeBuildQueue(behavior)
path = behavior.templates_tarball_path
# Poke the file we're expecting into the mock slave.
- behavior._builder.slave.valid_file_hashes.append(path)
+ behavior._interactor.slave.valid_file_hashes.append(path)
def got_tarball(filename):
tarball = open(filename, 'r')
@@ -193,20 +197,20 @@
behavior = self.makeBehavior()
behavior._uploadTarball = FakeMethod()
queue_item = FakeBuildQueue(behavior)
- builder = behavior._builder
+ slave = behavior._interactor.slave
d = behavior.dispatchBuildToSlave(queue_item, logging)
def got_dispatch((status, info)):
self.assertEqual(0, queue_item.destroySelf.call_count)
- slave_call_log = behavior._builder.slave.call_log
+ slave_call_log = slave.call_log
self.assertNotIn('clean', slave_call_log)
self.assertEqual(0, behavior._uploadTarball.call_count)
- return builder.slave.status()
+ return slave.status()
def got_status(status):
- slave_call_log = behavior._builder.slave.call_log
+ slave_call_log = slave.call_log
slave_status = {
'builder_status': status[0],
'build_status': status[1],
@@ -219,7 +223,7 @@
self.assertEqual(BuildStatus.FULLYBUILT, behavior.build.status)
# Log file is stored.
self.assertIsNotNone(behavior.build.log)
- slave_call_log = behavior._builder.slave.call_log
+ slave_call_log = slave.call_log
self.assertEqual(1, queue_item.destroySelf.call_count)
self.assertIn('clean', slave_call_log)
self.assertEqual(1, behavior._uploadTarball.call_count)
@@ -234,12 +238,12 @@
behavior = self.makeBehavior()
behavior._uploadTarball = FakeMethod()
queue_item = FakeBuildQueue(behavior)
- builder = behavior._builder
+ slave = behavior._interactor.slave
d = behavior.dispatchBuildToSlave(queue_item, logging)
def got_dispatch((status, info)):
# Now that we've dispatched, get the status.
- return builder.slave.status()
+ return slave.status()
def got_status(status):
raw_status = (
@@ -262,8 +266,7 @@
# Log file is stored.
self.assertIsNotNone(behavior.build.log)
self.assertEqual(1, queue_item.destroySelf.call_count)
- slave_call_log = behavior._builder.slave.call_log
- self.assertIn('clean', slave_call_log)
+ self.assertIn('clean', slave.call_log)
self.assertEqual(0, behavior._uploadTarball.call_count)
d.addCallback(got_dispatch)
@@ -277,11 +280,11 @@
behavior = self.makeBehavior()
behavior._uploadTarball = FakeMethod()
queue_item = FakeBuildQueue(behavior)
- builder = behavior._builder
+ slave = behavior._interactor.slave
d = behavior.dispatchBuildToSlave(queue_item, logging)
def got_dispatch((status, info)):
- return builder.slave.status()
+ return slave.status()
def got_status(status):
raw_status = (
@@ -301,8 +304,7 @@
def build_updated(ignored):
self.assertEqual(BuildStatus.FULLYBUILT, behavior.build.status)
self.assertEqual(1, queue_item.destroySelf.call_count)
- slave_call_log = behavior._builder.slave.call_log
- self.assertIn('clean', slave_call_log)
+ self.assertIn('clean', slave.call_log)
self.assertEqual(0, behavior._uploadTarball.call_count)
d.addCallback(got_dispatch)
@@ -315,7 +317,7 @@
branch = productseries.branch
behavior = self.makeBehavior(branch=branch)
queue_item = FakeBuildQueue(behavior)
- builder = behavior._builder
+ slave = behavior._interactor.slave
d = behavior.dispatchBuildToSlave(queue_item, logging)
@@ -327,10 +329,9 @@
return defer.succeed(None)
def got_dispatch((status, info)):
- builder.slave.getFile = fake_getFile
- builder.slave.filemap = {
- 'translation-templates.tar.gz': 'foo'}
- return builder.slave.status()
+ slave.getFile = fake_getFile
+ slave.filemap = {'translation-templates.tar.gz': 'foo'}
+ return slave.status()
def got_status(status):
slave_status = {
Follow ups