← Back to team overview

sts-sponsors team mailing list archive

[Merge] ~bjornt/maas:perf-test-dont-measure-setup into maas:master

 

Björn Tillenius has proposed merging ~bjornt/maas:perf-test-dont-measure-setup into maas:master.

Commit message:
Make it move explicit what the measure in the performance tests

Before, we measured how long the whole test took to run. That includes
any local setup you do in the test, but also the setup of any fixtures
the test uses.

I also combined measuring the time and, optionally, do profiling
using cProfile. It's more logical that you want to profile the
thing that you are actually measuring.


Requested reviews:
  MAAS Maintainers (maas-maintainers)

For more details, see:
https://code.launchpad.net/~bjornt/maas/+git/maas/+merge/433506
-- 
Your team MAAS Maintainers is requested to review the proposed merge of ~bjornt/maas:perf-test-dont-measure-setup into maas:master.
diff --git a/src/maasperf/tests/cli/test_machines.py b/src/maasperf/tests/cli/test_machines.py
index 5e3fa46..7d7c01b 100644
--- a/src/maasperf/tests/cli/test_machines.py
+++ b/src/maasperf/tests/cli/test_machines.py
@@ -8,12 +8,12 @@ from httplib2 import Response
 from maascli import api
 from maascli.config import ProfileConfig
 from maascli.parser import get_deepest_subparser, prepare_parser
-from maastesting.perftest import perf_test, perf_tester
+from maastesting.perftest import perf_test
 
 
 @perf_test()
 def test_perf_list_machines_CLI(
-    cli_profile, monkeypatch, cli_machines_api_response
+    perf, cli_profile, monkeypatch, cli_machines_api_response
 ):
     @contextmanager
     def mock_ProfileConfig_enter(*args):
@@ -35,12 +35,13 @@ def test_perf_list_machines_CLI(
     monkeypatch.setattr(api, "http_request", mock_http_response)
 
     args = ["maas", cli_profile["name"], "machines", "read"]
-    parser = prepare_parser(args)
-    with perf_tester.record("test_perf_list_machines_CLI.parse"):
-        options = parser.parse_args(args[1:])
-    if hasattr(options, "execute"):
-        with perf_tester.record("test_perf_list_machines_CLI.execute"):
-            options.execute(options)
-    else:
-        sub_parser = get_deepest_subparser(parser, args[1:])
-        sub_parser.error("too few arguments")
+    with perf.record("test_perf_list_machines_CLI"):
+        parser = prepare_parser(args)
+        with perf.record("test_perf_list_machines_CLI.parse"):
+            options = parser.parse_args(args[1:])
+        if hasattr(options, "execute"):
+            with perf.record("test_perf_list_machines_CLI.execute"):
+                options.execute(options)
+        else:
+            sub_parser = get_deepest_subparser(parser, args[1:])
+            sub_parser.error("too few arguments")
diff --git a/src/maasperf/tests/conftest.py b/src/maasperf/tests/conftest.py
index f44495e..5a784fa 100644
--- a/src/maasperf/tests/conftest.py
+++ b/src/maasperf/tests/conftest.py
@@ -6,6 +6,16 @@ from pytest import fixture
 from maasserver.models.user import get_auth_tokens
 from maasserver.testing.factory import factory as maasserver_factory
 from maasserver.testing.testclient import MAASSensibleOAuthClient
+from maastesting.perftest import perf
+
+__all__ = [
+    "admin_api_client",
+    "api_client",
+    "django_db_setup",
+    "factory",
+    "maas_user",
+    "perf",
+]
 
 
 # override pytest-django's db setup
diff --git a/src/maasperf/tests/maasserver/api/test_machines.py b/src/maasperf/tests/maasserver/api/test_machines.py
index dfc2c36..c2acf71 100644
--- a/src/maasperf/tests/maasserver/api/test_machines.py
+++ b/src/maasperf/tests/maasserver/api/test_machines.py
@@ -7,7 +7,7 @@ from piston3.handler import typemapper
 
 from maasserver.api.machines import MachinesHandler
 from maastesting.http import make_HttpRequest
-from maastesting.perftest import perf_test, profile
+from maastesting.perftest import perf_test
 
 
 class DummyEmitter(Emitter):
@@ -16,18 +16,20 @@ class DummyEmitter(Emitter):
 
 
 @perf_test()
-def test_perf_list_machines_MachineHandler_api_endpoint(admin_api_client):
-    with profile("test_perf_list_machines_MachineHandler_api_endpoint"):
+def test_perf_list_machines_MachineHandler_api_endpoint(
+    perf, admin_api_client
+):
+    with perf.record("test_perf_list_machines_MachineHandler_api_endpoint"):
         admin_api_client.get(reverse("machines_handler"))
 
 
 @perf_test(db_only=True)
-def test_perf_list_machines_MachinesHander_direct_call(admin):
+def test_perf_list_machines_MachinesHander_direct_call(perf, admin):
     handler = MachinesHandler()
     request = make_HttpRequest()
     request.user = admin
 
-    with profile("test_perf_list_machines_MachinesHander_direct_call"):
+    with perf.record("test_perf_list_machines_MachinesHander_direct_call"):
         emitter = DummyEmitter(
             handler.read(request),
             typemapper,
@@ -39,10 +41,10 @@ def test_perf_list_machines_MachinesHander_direct_call(admin):
 
 
 @perf_test(db_only=True)
-def test_perf_list_machines_MachinesHander_only_objects(admin):
+def test_perf_list_machines_MachinesHander_only_objects(perf, admin):
     handler = MachinesHandler()
     request = make_HttpRequest()
     request.user = admin
 
-    with profile("test_perf_list_machines_MachinesHander_only_objects"):
+    with perf.record("test_perf_list_machines_MachinesHander_only_objects"):
         list(handler.read(request))
diff --git a/src/maasperf/tests/maasserver/models/test_machine.py b/src/maasperf/tests/maasserver/models/test_machine.py
index 5175d1f..4a183c1 100644
--- a/src/maasperf/tests/maasserver/models/test_machine.py
+++ b/src/maasperf/tests/maasserver/models/test_machine.py
@@ -6,13 +6,14 @@ from maastesting.perftest import perf_test
 
 
 @perf_test(commit_transaction=True, db_only=True)
-def test_perf_create_machines(factory):
+def test_perf_create_machines(perf, factory):
     # TODO use create machines script
-    for _ in range(30):
-        factory.make_Machine()
+    with perf.record("test_perf_create_machines"):
+        for _ in range(30):
+            factory.make_Machine()
 
 
 @perf_test(db_only=True)
-def test_perf_list_machines():
-
-    list(Machine.objects.all())
+def test_perf_list_machines(perf):
+    with perf.record("test_perf_list_machines"):
+        list(Machine.objects.all())
diff --git a/src/maasperf/tests/maasserver/websockets/test_machines.py b/src/maasperf/tests/maasserver/websockets/test_machines.py
index 7fcf8b5..a642214 100644
--- a/src/maasperf/tests/maasserver/websockets/test_machines.py
+++ b/src/maasperf/tests/maasserver/websockets/test_machines.py
@@ -3,14 +3,14 @@
 
 from maasserver.models import Machine
 from maasserver.websockets.handlers.machine import MachineHandler
-from maastesting.perftest import perf_test, profile
+from maastesting.perftest import perf_test
 
 
 @perf_test(db_only=True)
-def test_perf_list_machines_Websocket_endpoint(admin):
+def test_perf_list_machines_Websocket_endpoint(perf, admin):
     # This should test the websocket calls that are used to load
     # the machine listing page on the initial page load.
-    with profile("test_perf_list_machines_Websocket_endpoint"):
+    with perf.record("test_perf_list_machines_Websocket_endpoint"):
         ws_handler = MachineHandler(admin, {}, None)
         # Extracted from a clean load of labmaas with empty local
         # storage
@@ -27,11 +27,11 @@ def test_perf_list_machines_Websocket_endpoint(admin):
 
 
 @perf_test(db_only=True)
-def test_perf_list_machines_Websocket_endpoint_all(admin):
+def test_perf_list_machines_Websocket_endpoint_all(perf, admin):
     # How long would it take to list all the machines using the
     # websocket without any pagination.
     machine_count = Machine.objects.all().count()
-    with profile("test_perf_list_machines_Websocket_endpoint_all"):
+    with perf.record("test_perf_list_machines_Websocket_endpoint_all"):
         ws_handler = MachineHandler(admin, {}, None)
         # Extracted from a clean load of labmaas with empty local
         # storage
diff --git a/src/maastesting/perftest.py b/src/maastesting/perftest.py
index 666018e..435b854 100644
--- a/src/maastesting/perftest.py
+++ b/src/maastesting/perftest.py
@@ -4,14 +4,14 @@
 """Performance testing related classes and functions for MAAS and its applications"""
 
 
-from contextlib import contextmanager
+from contextlib import contextmanager, ExitStack
 from cProfile import Profile
-from datetime import datetime
 from functools import wraps
 import json
 import os
 import random
 import sys
+import time
 
 from pytest import fixture
 from pytest import main as pytest_main
@@ -36,7 +36,37 @@ def maas_data():
     return None
 
 
-perf_tester = None
+@fixture(scope="session")
+def perf():
+    perf_tester = PerfTester(
+        os.environ.get("GIT_BRANCH"), os.environ.get("GIT_HASH")
+    )
+    yield perf_tester
+    output = os.environ.get("OUTPUT_FILE")
+    if output:
+        with open(output, "w") as f:
+            perf_tester.finish_build(f)
+    else:
+        perf_tester.finish_build(sys.stdout)
+
+
+class Timing:
+    duration = None
+
+    def __init__(self):
+        self.start = time.monotonic()
+
+    def stop(self):
+        assert self.duration is None, "Can't call stop() twice."
+        end = time.monotonic()
+        self.duration = end - self.start
+
+
+@contextmanager
+def measure_time():
+    timing = Timing()
+    yield timing
+    timing.stop()
 
 
 class PerfTester:
@@ -45,18 +75,15 @@ class PerfTester:
     def __init__(self, git_branch, git_hash):
         self.results = {"branch": git_branch, "commit": git_hash, "tests": {}}
 
-    def _record(self, name, start, end):
-        delta = (end - start).total_seconds()
-        self.results["tests"][name] = {"duration": delta}
-
     @contextmanager
     def record(self, name):
-        start = datetime.utcnow()
-        try:
+        with ExitStack() as stack:
+            profiling_tag = os.environ.get("MAAS_PROFILING")
+            if profiling_tag:
+                stack.enter_context(profile(name, profiling_tag))
+            timing = stack.enter_context(measure_time())
             yield
-        finally:
-            end = datetime.utcnow()
-            self._record(name, start, end)
+        self.results["tests"][name] = {"duration": timing.duration}
 
     def finish_build(self, output):
         json.dump(self.results, output)
@@ -80,8 +107,7 @@ def perf_test(commit_transaction=False, db_only=False):
             if django_loaded:
                 save_point = transaction.savepoint()
 
-            with perf_tester.record(fn.__name__):
-                fn(*args, **kwargs)
+            fn(*args, **kwargs)
 
             if save_point and commit_transaction:
                 transaction.savepoint_commit(save_point)
@@ -93,33 +119,17 @@ def perf_test(commit_transaction=False, db_only=False):
     return inner
 
 
-def perf_test_finish(output):
-    if output:
-        with open(output, "w") as f:
-            perf_tester.finish_build(f)
-    else:
-        perf_tester.finish_build(sys.stdout)
-
-
 def run_perf_tests(env):
-    global perf_tester
-
     rand_seed = os.environ.get("MAAS_RAND_SEED")
     random.seed(rand_seed)
 
-    try:
-        cmd_args = sys.argv[1:]
-        perf_tester = PerfTester(env.get("GIT_BRANCH"), env.get("GIT_HASH"))
+    cmd_args = sys.argv[1:]
 
-        pytest_main(
-            args=cmd_args,
-        )
-    finally:
-        perf_test_finish(env.get("OUTPUT_FILE"))
+    pytest_main(args=cmd_args)
 
 
 @contextmanager
-def profile(testname: str):
+def profile(testname: str, profiling_tag: str):
     """Produces profiling info for tests
 
     This functions uses cProfile module to provide deterministic
@@ -139,11 +149,8 @@ def profile(testname: str):
         with perftest.profile("my_test_case"):
             <<block being profiled>>
     """
-    if profiling_tag := os.environ.get("MAAS_PROFILING"):
-        with Profile() as profiler:
-            yield
-        filename = f"{testname}.{profiling_tag}.profile"
-        profiler.dump_stats(filename)
-        print(f"Dumped stats to {filename}")
-    else:
+    with Profile() as profiler:
         yield
+    filename = f"{testname}.{profiling_tag}.profile"
+    profiler.dump_stats(filename)
+    print(f"Dumped stats to {filename}")

Follow ups