← Back to team overview

launchpad-reviewers team mailing list archive

[Merge] ~cjwatson/launchpad:new-threading-api into launchpad:master

 

Colin Watson has proposed merging ~cjwatson/launchpad:new-threading-api into launchpad:master.

Commit message:
Use Python 2.6's new threading API

This involves some changes to use properties instead of accessor
methods, and some changes from camelCase to snake_case.

Requested reviews:
  Launchpad code reviewers (launchpad-reviewers)

For more details, see:
https://code.launchpad.net/~cjwatson/launchpad/+git/launchpad/+merge/374028
-- 
Your team Launchpad code reviewers is requested to review the proposed merge of ~cjwatson/launchpad:new-threading-api into launchpad:master.
diff --git a/bzrplugins/lpserve/__init__.py b/bzrplugins/lpserve/__init__.py
index 5ea5f86..1db2554 100644
--- a/bzrplugins/lpserve/__init__.py
+++ b/bzrplugins/lpserve/__init__.py
@@ -627,7 +627,7 @@ class LPForkingService(object):
         trace.note('Exiting')
 
     def _do_loop(self):
-        while not self._should_terminate.isSet():
+        while not self._should_terminate.is_set():
             try:
                 conn, client_addr = self._server_socket.accept()
             except self._socket_timeout:
diff --git a/bzrplugins/lpserve/test_lpserve.py b/bzrplugins/lpserve/test_lpserve.py
index fd5baa7..6879ee6 100644
--- a/bzrplugins/lpserve/test_lpserve.py
+++ b/bzrplugins/lpserve/test_lpserve.py
@@ -95,10 +95,10 @@ class TestingLPForkingServiceInAThread(lpserve.LPForkingService):
         thread = threading.Thread(target=new_service.main_loop,
                                   name='TestingLPForkingServiceInAThread')
         new_service.this_thread = thread
-        # should we be doing thread.setDaemon(True) ?
+        # should we be doing thread.daemon = True ?
         thread.start()
         new_service.service_started.wait(10.0)
-        if not new_service.service_started.isSet():
+        if not new_service.service_started.is_set():
             raise RuntimeError(
                 'Failed to start the TestingLPForkingServiceInAThread')
         test.addCleanup(new_service.stop_service)
@@ -112,7 +112,7 @@ class TestingLPForkingServiceInAThread(lpserve.LPForkingService):
             return
         self._should_terminate.set()
         self.service_stopped.wait(10.0)
-        if not self.service_stopped.isSet():
+        if not self.service_stopped.is_set():
             raise RuntimeError(
                 'Failed to stop the TestingLPForkingServiceInAThread')
         self.this_thread.join()
@@ -213,7 +213,7 @@ class TestLPForkingService(TestCaseWithLPForkingService):
         response = self.send_message_to_service('quit\n')
         self.assertEqual('ok\nquit command requested... exiting\n', response)
         self.service.service_stopped.wait(10.0)
-        self.assertTrue(self.service.service_stopped.isSet())
+        self.assertTrue(self.service.service_stopped.is_set())
 
     def test_send_invalid_message_fails(self):
         response = self.send_message_to_service('unknown\n')
diff --git a/lib/lp/bugs/scripts/checkwatches/core.py b/lib/lp/bugs/scripts/checkwatches/core.py
index 66e2d1f..5d47b6e 100644
--- a/lib/lp/bugs/scripts/checkwatches/core.py
+++ b/lib/lp/bugs/scripts/checkwatches/core.py
@@ -174,15 +174,15 @@ class CheckwatchesMaster(WorkingBase):
         def make_updater(bug_tracker_name, bug_tracker_id):
             """Returns a function that can update the given bug tracker."""
             def updater(batch_size=None):
-                thread = threading.currentThread()
-                thread_name = thread.getName()
-                thread.setName(bug_tracker_name)
+                thread = threading.current_thread()
+                thread_name = thread.name
+                thread.name = bug_tracker_name
                 try:
                     with self.statement_logging:
                         return self.updateBugTracker(
                             bug_tracker_id, batch_size)
                 finally:
-                    thread.setName(thread_name)
+                    thread.name = thread_name
             return updater
 
         for bug_tracker_name in bug_tracker_names:
diff --git a/lib/lp/bugs/scripts/checkwatches/tests/test_core.py b/lib/lp/bugs/scripts/checkwatches/tests/test_core.py
index b7fea39..f92f606 100644
--- a/lib/lp/bugs/scripts/checkwatches/tests/test_core.py
+++ b/lib/lp/bugs/scripts/checkwatches/tests/test_core.py
@@ -410,7 +410,7 @@ class OutputFileForThreads:
         self.lock = threading.Lock()
 
     def write(self, data):
-        thread_name = threading.currentThread().getName()
+        thread_name = threading.current_thread().name
         with self.lock:
             if thread_name in self.output:
                 self.output[thread_name].append(data)
diff --git a/lib/lp/codehosting/codeimport/tests/servers.py b/lib/lp/codehosting/codeimport/tests/servers.py
index 1b911d9..20388fc 100644
--- a/lib/lp/codehosting/codeimport/tests/servers.py
+++ b/lib/lp/codehosting/codeimport/tests/servers.py
@@ -300,7 +300,7 @@ class HTTPGitServerThread(threading.Thread):
 
     def __init__(self, backend, address, port=None):
         super(HTTPGitServerThread, self).__init__()
-        self.setName("HTTP Git server on %s:%s" % (address, port))
+        self.name = "HTTP Git server on %s:%s" % (address, port)
         app = HTTPGitApplication(
             backend,
             handlers={'turnip-set-symbolic-ref': TurnipSetSymbolicRefHandler})
diff --git a/lib/lp/scripts/garbo.py b/lib/lp/scripts/garbo.py
index 1795f5a..b6b5f27 100644
--- a/lib/lp/scripts/garbo.py
+++ b/lib/lp/scripts/garbo.py
@@ -1762,7 +1762,7 @@ class BaseDatabaseGarbageCollector(LaunchpadCronScript):
         has timed out.
         """
         self.logger.debug(
-            "Worker thread %s running.", threading.currentThread().name)
+            "Worker thread %s running.", threading.current_thread().name)
         install_feature_controller(make_script_feature_controller(self.name))
         self.login()
 
@@ -1772,7 +1772,7 @@ class BaseDatabaseGarbageCollector(LaunchpadCronScript):
                 # Exit silently. We warn later.
                 self.logger.debug(
                     "Worker thread %s detected script timeout.",
-                    threading.currentThread().name)
+                    threading.current_thread().name)
                 break
 
             try:
diff --git a/lib/lp/services/database/doc/storm-store-reset.txt b/lib/lp/services/database/doc/storm-store-reset.txt
index d462f80..990f9c1 100644
--- a/lib/lp/services/database/doc/storm-store-reset.txt
+++ b/lib/lp/services/database/doc/storm-store-reset.txt
@@ -23,7 +23,7 @@ we rely on that to find out whether or not to reset stores.
     >>> def request_salgados_homepage():
     ...     global alive_items
     ...     global thread_name
-    ...     thread_name = threading.currentThread().getName()
+    ...     thread_name = threading.current_thread().name
     ...     from lp.testing.pages import setupBrowser
     ...     http = setupBrowser(auth="Basic foo.bar@xxxxxxxxxxxxx:test")
     ...     http.open("http://launchpad.test/~salgado/+edit";)
diff --git a/lib/lp/services/gpg/tests/test_gpghandler.py b/lib/lp/services/gpg/tests/test_gpghandler.py
index a818cf0..ca24c3b 100644
--- a/lib/lp/services/gpg/tests/test_gpghandler.py
+++ b/lib/lp/services/gpg/tests/test_gpghandler.py
@@ -166,7 +166,7 @@ class TestGPGHandler(TestCase):
         # raises GPGKeyTemporarilyNotFoundError.
         # We simulate a timeout using responses rather than by setting a low
         # timeout, as otherwise the test will fail if the fetch thread
-        # happens to complete between Thread.start and Thread.isAlive.
+        # happens to complete between Thread.start and Thread.is_alive.
         responses.add(
             'GET',
             self.gpg_handler.getURLForKeyInServer(
diff --git a/lib/lp/services/profile/mem.py b/lib/lp/services/profile/mem.py
index 67ce8d1..19bc988 100644
--- a/lib/lp/services/profile/mem.py
+++ b/lib/lp/services/profile/mem.py
@@ -210,7 +210,7 @@ def logInThread(n=30):
     reflog = file('/tmp/refs.log', 'w')
     t = threading.Thread(target=_logRefsEverySecond, args=(reflog, n))
     # Allow process to exit without explicitly stopping thread.
-    t.setDaemon(True)
+    t.daemon = True
     t.start()
 
 
diff --git a/lib/lp/services/profile/profile.py b/lib/lp/services/profile/profile.py
index 8a89c57..7516045 100644
--- a/lib/lp/services/profile/profile.py
+++ b/lib/lp/services/profile/profile.py
@@ -321,7 +321,7 @@ def end_request(event):
             oops_report = request.oops
         filename = '%s-%s-%s-%s' % (
             timestamp, pageid, oopsid,
-            threading.currentThread().getName())
+            threading.current_thread().name)
         if 'callgrind' in actions:
             # The stats class looks at the filename to know to use
             # callgrind syntax.
diff --git a/lib/lp/services/scripts/__init__.py b/lib/lp/services/scripts/__init__.py
index f8eeb42..51456c2 100644
--- a/lib/lp/services/scripts/__init__.py
+++ b/lib/lp/services/scripts/__init__.py
@@ -101,7 +101,7 @@ def execute_zcml_for_scripts(use_web_security=False):
                 thread, zope.sendmail.delivery.QueueProcessorThread):
                 thread.stop()
                 thread.join(30)
-                if thread.isAlive():
+                if thread.is_alive():
                     raise RuntimeError(
                         "QueueProcessorThread did not shut down")
     atexit.register(kill_queue_processor_threads)
diff --git a/lib/lp/services/timeout.py b/lib/lp/services/timeout.py
index 1c77d37..d96456f 100644
--- a/lib/lp/services/timeout.py
+++ b/lib/lp/services/timeout.py
@@ -232,10 +232,10 @@ class with_timeout:
                 # This will commonly be SoftTimeLimitExceeded from celery,
                 # since celery's timeout often happens before the job's due
                 # to job setup time.
-                if t.isAlive():
+                if t.is_alive():
                     cleanup(t, args)
                 raise
-            if t.isAlive():
+            if t.is_alive():
                 cleanup(t, args)
                 raise TimeoutError("timeout exceeded.")
             if getattr(t, 'exc_info', None) is not None:
diff --git a/lib/lp/services/webapp/adapter.py b/lib/lp/services/webapp/adapter.py
index 08a22c4..e4cfcc3 100644
--- a/lib/lp/services/webapp/adapter.py
+++ b/lib/lp/services/webapp/adapter.py
@@ -710,7 +710,7 @@ class LaunchpadStatementTracer:
                 sys.stderr.write("." * 70 + "\n")
         # store the last executed statement as an attribute on the current
         # thread
-        threading.currentThread().lp_last_sql_statement = statement
+        threading.current_thread().lp_last_sql_statement = statement
         request_starttime = getattr(_local, 'request_start_time', None)
         if request_starttime is None:
             if print_traceback or self._debug_sql or log_sql is not None:
diff --git a/lib/lp/services/webapp/publication.py b/lib/lp/services/webapp/publication.py
index 1b01f53..ece870a 100644
--- a/lib/lp/services/webapp/publication.py
+++ b/lib/lp/services/webapp/publication.py
@@ -768,7 +768,7 @@ class LaunchpadBrowserPublication(
                     OpStats.stats['5XXs_b'] += 1
 
         # Make sure our databases are in a sane state for the next request.
-        thread_name = threading.currentThread().getName()
+        thread_name = threading.current_thread().name
         for name, store in getUtility(IZStorm).iterstores():
             try:
                 assert store._connection._state != STATE_DISCONNECTED, (
diff --git a/lib/lp/services/webapp/sigusr1.py b/lib/lp/services/webapp/sigusr1.py
index 07ec112..1ef7c80 100644
--- a/lib/lp/services/webapp/sigusr1.py
+++ b/lib/lp/services/webapp/sigusr1.py
@@ -21,7 +21,7 @@ def sigusr1_handler(signum, frame):
         # isn't an appserver thread.
         if not hasattr(thread, 'lp_last_request'):
             continue
-        message.append('\t%s' % thread.getName())
+        message.append('\t%s' % thread.name)
         message.append('\t\tLast Request: %s' % thread.lp_last_request)
         message.append('\t\tMost recent OOPS IDs: %s' %
                        ', '.join(getattr(thread, 'lp_last_oops', [])))
@@ -38,7 +38,7 @@ def setup_sigusr1(event):
 def before_traverse(event):
     """Record the request URL (provided that the request has a URL)"""
     request = event.request
-    threading.currentThread().lp_last_request = str(
+    threading.current_thread().lp_last_request = str(
         getattr(request, 'URL', ''))
 
 
@@ -46,7 +46,7 @@ def end_request(event):
     """Record the OOPS ID in the thread, if one occurred."""
     request = event.request
     if request.oopsid is not None:
-        thread = threading.currentThread()
+        thread = threading.current_thread()
         last_oops_ids = getattr(thread, 'lp_last_oops', [])
         # make sure the OOPS ID list has at most 5 elements
         thread.lp_last_oops = last_oops_ids[-4:] + [request.oopsid]
diff --git a/lib/lp/testing/layers.py b/lib/lp/testing/layers.py
index 647fbb1..0f00f62 100644
--- a/lib/lp/testing/layers.py
+++ b/lib/lp/testing/layers.py
@@ -421,7 +421,7 @@ class BaseLayer:
         def new_live_threads():
             return [
                 thread for thread in threading.enumerate()
-                    if thread not in BaseLayer._threads and thread.isAlive()]
+                    if thread not in BaseLayer._threads and thread.is_alive()]
 
         if BaseLayer.disable_thread_check:
             new_threads = None
@@ -434,7 +434,7 @@ class BaseLayer:
                 has_live_threads = False
                 for new_thread in new_threads:
                     new_thread.join(0.1)
-                    if new_thread.isAlive():
+                    if new_thread.is_alive():
                         has_live_threads = True
                 if has_live_threads:
                     # Trigger full garbage collection that might be