launchpad-reviewers team mailing list archive
-
launchpad-reviewers team
-
Mailing list archive
-
Message #23964
[Merge] lp:~cjwatson/launchpad/job-retry-no-oops into lp:launchpad
Colin Watson has proposed merging lp:~cjwatson/launchpad/job-retry-no-oops into lp:launchpad.
Commit message:
Upgrade to lazr.jobrunner 0.16, and test for lack of OOPSes on retry.
Requested reviews:
Launchpad code reviewers (launchpad-reviewers)
For more details, see:
https://code.launchpad.net/~cjwatson/launchpad/job-retry-no-oops/+merge/372556
lazr.jobrunner 0.16 also adds Python 3 support, although we'll need to upgrade to Celery 4.3 or newer before being able to take advantage of that.
--
Your team Launchpad code reviewers is requested to review the proposed merge of lp:~cjwatson/launchpad/job-retry-no-oops into lp:launchpad.
=== modified file 'constraints.txt'
--- constraints.txt 2019-08-23 11:40:51 +0000
+++ constraints.txt 2019-09-10 15:10:56 +0000
@@ -286,7 +286,7 @@
lazr.config==2.2.1
lazr.delegates==2.0.4
lazr.enum==1.1.3
-lazr.jobrunner==0.14
+lazr.jobrunner==0.16
lazr.lifecycle==1.1
lazr.restful==0.20.1
lazr.restfulclient==0.13.2
=== modified file 'lib/lp/services/job/tests/test_runner.py'
--- lib/lp/services/job/tests/test_runner.py 2018-03-30 20:42:14 +0000
+++ lib/lp/services/job/tests/test_runner.py 2019-09-10 15:10:56 +0000
@@ -44,6 +44,7 @@
TwistedJobRunner,
)
from lp.services.log.logger import BufferLogger
+from lp.services.scripts.logger import OopsHandler
from lp.services.timeline.requesttimeline import get_request_timeline
from lp.services.timeout import (
get_default_timeout_function,
@@ -366,18 +367,29 @@
def test_runJobHandleErrors_user_error_no_oops(self):
"""If the job raises a user error, there is no oops."""
+ logging.getLogger().addHandler(OopsHandler('test_runner'))
job = RaisingJobUserError('boom')
runner = JobRunner([job])
runner.runJobHandleError(job)
self.assertEqual(0, len(self.oopses))
+ def test_runJobHandleErrors_retry_error_no_oops(self):
+ """If the job raises a retry error, there is no oops."""
+ logging.getLogger().addHandler(OopsHandler('test_runner'))
+ job = RaisingRetryJob('completion')
+ runner = JobRunner([job])
+ runner.runJobHandleError(job)
+ self.assertEqual(0, len(self.oopses))
+
def test_runJob_raising_retry_error(self):
"""If a job raises a retry_error, it should be re-queued."""
job = RaisingRetryJob('completion')
- runner = JobRunner([job])
+ logger = BufferLogger()
+ logger.setLevel(logging.INFO)
+ runner = JobRunner([job], logger=logger)
self.assertIs(None, job.scheduled_start)
- with self.expectedLog('Scheduling retry due to RetryError'):
- runner.runJob(job, None)
+ self.addCleanup(lambda: self.addDetail('log', logger.content))
+ runner.runJob(job, None)
self.assertEqual(JobStatus.WAITING, job.status)
expected_delay = datetime.now(UTC) + timedelta(minutes=10)
self.assertThat(
@@ -388,6 +400,8 @@
self.assertIsNone(job.lease_expires)
self.assertNotIn(job, runner.completed_jobs)
self.assertIn(job, runner.incomplete_jobs)
+ self.assertIn(
+ 'Scheduling retry due to RetryError', logger.getLogBuffer())
def test_runJob_exceeding_max_retries(self):
"""If a job exceeds maximum retries, it should raise normally."""
Follow ups