Layout test will generate a perf metric file to results dir.
authorcommit-queue@webkit.org <commit-queue@webkit.org@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Tue, 8 Jan 2019 21:15:04 +0000 (21:15 +0000)
committercommit-queue@webkit.org <commit-queue@webkit.org@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Tue, 8 Jan 2019 21:15:04 +0000 (21:15 +0000)
https://bugs.webkit.org/show_bug.cgi?id=192030
<rdar://problem/32779516>

Patch by Zhifei Fang <zhifei_fang@apple.com> on 2019-01-08
Reviewed by Aakash Jain.

Layout test running time will be collected to a perf metric file.
For now, instead of outputing running time of all tests (which is huge),
we aggregate them by test directories which are at most two level deep.

* Scripts/webkitpy/layout_tests/controllers/manager.py:
(Manager._end_test_run):
(Manager._output_perf_metrics):
(Manager._print_expectation_line_for_test):
* Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py:
(add_test_perf_metric):
(test_perf_metrics):
* Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py:
(JSONGeneratorTest.test_test_timings_trie):
(JSONGeneratorTest):
(JSONGeneratorTest.test_test_perf_metrics):
* Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py:
(RebaselineTest.test_reset_results):
(RebaselineTest.test_missing_results):
(RebaselineTest.test_new_baseline):

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@239739 268f45cc-cd09-0410-ab3c-d52691b4dbfc

Tools/ChangeLog
Tools/Scripts/webkitpy/layout_tests/controllers/manager.py
Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py
Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py
Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py

index 6791ed5..2eb0362 100644 (file)
@@ -1,3 +1,31 @@
+2019-01-08  Zhifei Fang  <zhifei_fang@apple.com>
+  
+        Layout test will generate a perf metric file to results dir.
+        https://bugs.webkit.org/show_bug.cgi?id=192030
+        <rdar://problem/32779516>
+
+        Reviewed by Aakash Jain.
+
+        Layout test running time will be collected to a perf metric file.
+        For now, instead of outputing running time of all tests (which is huge),
+        we aggregate them by test directories which are at most two level deep. 
+
+        * Scripts/webkitpy/layout_tests/controllers/manager.py:
+        (Manager._end_test_run):
+        (Manager._output_perf_metrics):
+        (Manager._print_expectation_line_for_test):
+        * Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py:
+        (add_test_perf_metric):
+        (test_perf_metrics):
+        * Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py:
+        (JSONGeneratorTest.test_test_timings_trie):
+        (JSONGeneratorTest):
+        (JSONGeneratorTest.test_test_perf_metrics):
+        * Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py:
+        (RebaselineTest.test_reset_results):
+        (RebaselineTest.test_missing_results):
+        (RebaselineTest.test_new_baseline):
+
 2019-01-08  Patrick Griffis  <pgriffis@igalia.com>
 
         [WPE][GTK] Add php-json to install-dependencies
index c4e2139..70842e0 100644 (file)
@@ -339,6 +339,7 @@ class Manager(object):
         exit_code = -1
         if not self._options.dry_run:
             self._port.print_leaks_summary()
+            self._output_perf_metrics(end_time - start_time, initial_results)
             self._upload_json_files(summarized_results, initial_results, results_including_passes, start_time, end_time)
 
             results_path = self._filesystem.join(self._results_directory, "results.html")
@@ -431,6 +432,11 @@ class Manager(object):
                     (result.type != test_expectations.MISSING) and
                     (result.type != test_expectations.CRASH or include_crashes))]
 
+    def _output_perf_metrics(self, run_time, initial_results):
+        perf_metrics_json = json_results_generator.perf_metrics_for_test(run_time, initial_results.results_by_name.values())
+        perf_metrics_path = self._filesystem.join(self._results_directory, "layout_test_perf_metrics.json")
+        self._filesystem.write_text_file(perf_metrics_path, json.dumps(perf_metrics_json))
+
     def _upload_json_files(self, summarized_results, initial_results, results_including_passes=None, start_time=None, end_time=None):
         """Writes the results of the test run as JSON files into the results
         dir and upload the files to the appengine server.
@@ -569,7 +575,7 @@ class Manager(object):
     def _print_expectation_line_for_test(self, format_string, test):
         line = self._expectations.model().get_expectation_line(test)
         print(format_string.format(test, line.expected_behavior, self._expectations.readable_filename_and_line_number(line), line.original_string or ''))
-    
+
     def _print_expectations_for_subset(self, device_type, test_col_width, tests_to_run, tests_to_skip={}):
         format_string = '{{:{width}}} {{}} {{}} {{}}'.format(width=test_col_width)
         if tests_to_skip:
index 76918e0..5898139 100644 (file)
@@ -114,12 +114,74 @@ def test_timings_trie(port, individual_test_timings):
     trie = {}
     for test_result in individual_test_timings:
         test = test_result.test_name
-
         add_path_to_trie(test, int(1000 * test_result.test_run_time), trie)
 
     return trie
 
 
+def _add_perf_metric_for_test(path, time, tests, depth, depth_limit):
+    """
+    Aggregate test time to result for a given test at a specified depth_limit.
+    """
+    if not "/" in path:
+        tests["tests"][path] = {
+            "metrics": {
+                "Time": {
+                    "current": [time],
+                }}}
+        return
+
+    directory, slash, rest = path.partition("/")
+    if depth == depth_limit:
+        if directory not in tests["tests"]:
+            tests["tests"][directory] = {
+                "metrics": {
+                    "Time": {
+                        "current": [time],
+                    }}}
+        else:
+            tests["tests"][directory]["metrics"]["Time"]["current"][0] += time
+        return
+    else:
+        if directory not in tests["tests"]:
+            tests["tests"][directory] = {
+                "metrics": {
+                    "Time": ["Total", "Arithmetic"],
+                },
+                "tests": {}
+            }
+        _add_perf_metric_for_test(rest, time, tests["tests"][directory], depth + 1, depth_limit)
+
+
+def perf_metrics_for_test(run_time, individual_test_timings):
+    """
+    Output two performace metrics
+    1. run time, which is how much time consumed by the layout tests script
+    2. run time of first-level and second-level of test directories
+    """
+    total_run_time = 0
+
+    for test_result in individual_test_timings:
+        total_run_time += int(1000 * test_result.test_run_time)
+
+    perf_metric = {
+        "layout_tests": {
+            "metrics": {
+                "Time": ["Total", "Arithmetic"],
+            },
+            "tests": {}
+        },
+        "layout_tests_run_time": {
+            "metrics": {
+                "Time": {"current": [run_time]},
+            }}}
+    for test_result in individual_test_timings:
+        test = test_result.test_name
+        # for now, we only send two levels of directories
+        _add_perf_metric_for_test(test, int(1000 * test_result.test_run_time), perf_metric["layout_tests"], 1, 2)
+    return perf_metric
+
+
 # FIXME: We already have a TestResult class in test_results.py
 class TestResult(object):
     """A simple class that represents a single test result."""
index 5a212f1..f6adbb7 100644 (file)
@@ -226,3 +226,42 @@ class JSONGeneratorTest(unittest.TestCase):
         }
 
         self.assertEqual(json.dumps(trie), json.dumps(expected_trie))
+
+    def test_perf_metrics_for_test(self):
+        individual_test_timings = []
+        individual_test_timings.append(json_results_generator.TestResult('foo/bar/baz.html', elapsed_time=1.2))
+        individual_test_timings.append(json_results_generator.TestResult('foo/bar/ba.html', elapsed_time=1.4))
+        individual_test_timings.append(json_results_generator.TestResult('bar.html', elapsed_time=0.0001))
+        metrics = json_results_generator.perf_metrics_for_test(1200, individual_test_timings)
+
+        expected_metrics = {
+            "layout_tests": {
+                "metrics": {
+                    "Time": ["Total", "Arithmetic"],
+                },
+                "tests": {
+                    "foo": {
+                        "metrics": {
+                            "Time": ["Total", "Arithmetic"],
+                        },
+                        "tests": {
+                            "bar":  {
+                                "metrics": {
+                                    "Time": {"current": [2600]},
+                                }
+                            }
+                        }
+                    },
+                    "bar.html": {
+                        "metrics": {
+                            "Time": {"current": [0]},
+                        }
+                    }
+                }
+            },
+            "layout_tests_run_time": {
+                "metrics": {
+                    "Time": {"current": [1200]},
+                }
+            }}
+        self.assertEqual(json.dumps(metrics), json.dumps(expected_metrics))
index bb57a56..4b21ba9 100644 (file)
@@ -879,7 +879,7 @@ class RebaselineTest(unittest.TestCase, StreamTestingMixin):
             tests_included=True, host=host, new_results=True)
         file_list = host.filesystem.written_files.keys()
         self.assertEqual(details.exit_code, 0)
-        self.assertEqual(len(file_list), 8)
+        self.assertEqual(len(file_list), 9)
         self.assertBaselines(file_list, "passes/image", [".txt", ".png"], err)
         self.assertBaselines(file_list, "failures/expected/missing_image", [".txt", ".png"], err)
 
@@ -895,7 +895,7 @@ class RebaselineTest(unittest.TestCase, StreamTestingMixin):
             tests_included=True, host=host, new_results=True)
         file_list = host.filesystem.written_files.keys()
         self.assertEqual(details.exit_code, 0)
-        self.assertEqual(len(file_list), 10)
+        self.assertEqual(len(file_list), 11)
         self.assertBaselines(file_list, "failures/unexpected/missing_text", [".txt"], err)
         self.assertBaselines(file_list, "platform/test/failures/unexpected/missing_image", [".png"], err)
         self.assertBaselines(file_list, "platform/test/failures/unexpected/missing_render_tree_dump", [".txt"], err)
@@ -909,7 +909,7 @@ class RebaselineTest(unittest.TestCase, StreamTestingMixin):
             tests_included=True, host=host, new_results=True)
         file_list = host.filesystem.written_files.keys()
         self.assertEqual(details.exit_code, 0)
-        self.assertEqual(len(file_list), 8)
+        self.assertEqual(len(file_list), 9)
         self.assertBaselines(file_list,
             "platform/test-mac-leopard/passes/image", [".txt", ".png"], err)
         self.assertBaselines(file_list,