Add "-o/--output" option to startup.py and new_tab.py benchmark scripts to save the...
authorcommit-queue@webkit.org <commit-queue@webkit.org@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Mon, 17 Dec 2018 23:06:47 +0000 (23:06 +0000)
committercommit-queue@webkit.org <commit-queue@webkit.org@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Mon, 17 Dec 2018 23:06:47 +0000 (23:06 +0000)
https://bugs.webkit.org/show_bug.cgi?id=192385

Patch by Suresh Koppisetty <skoppisetty@apple.com> on 2018-12-17
Reviewed by Ryosuke Niwa.

Sample json output for new tab benchmark script after running for 2 iterations and 2 groups. Values are in milliseconds.
{
        "NewTabBenchmark": {
                "metrics": {
                        "Time": {
                                "current": [
                                        [
                                                410.2939453125,
                                                307.81494140625
                                        ],
                                        [
                                                340.616943359375,
                                                265.94384765625
                                        ]
                                ]
                        }
                }
        }
}

Sample json output for startup time benchmark script after running for 2 iterations. Values are in milliseconds.
{
        "StartupBenchmark": {
                "metrics": {
                        "Time": {
                                "current": [
                                        [
                                                1415.2099609375,
                                                1439.552978515625
                                        ]
                                ]
                        }
                }
        }
}

* LaunchTime/launch_time.py:
* LaunchTime/new_tab.py:
(NewTabBenchmark.get_test_name):
* LaunchTime/startup.py:
(StartupBenchmark.get_test_name):

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@239293 268f45cc-cd09-0410-ab3c-d52691b4dbfc

PerformanceTests/ChangeLog
PerformanceTests/LaunchTime/launch_time.py
PerformanceTests/LaunchTime/new_tab.py
PerformanceTests/LaunchTime/startup.py

index c264aef..704dec4 100644 (file)
@@ -1,5 +1,54 @@
 2018-12-17  Suresh Koppisetty  <skoppisetty@apple.com>
 
 2018-12-17  Suresh Koppisetty  <skoppisetty@apple.com>
 
+        Add "-o/--output" option to startup.py and new_tab.py benchmark scripts to save the results in json format.
+        https://bugs.webkit.org/show_bug.cgi?id=192385
+
+        Reviewed by Ryosuke Niwa.
+
+        Sample json output for new tab benchmark script after running for 2 iterations and 2 groups. Values are in milliseconds.
+        {
+                "NewTabBenchmark": {
+                        "metrics": {
+                                "Time": {
+                                        "current": [
+                                                [
+                                                        410.2939453125,
+                                                        307.81494140625
+                                                ],
+                                                [
+                                                        340.616943359375,
+                                                        265.94384765625
+                                                ]
+                                        ]
+                                }
+                        }
+                }
+        }
+
+        Sample json output for startup time benchmark script after running for 2 iterations. Values are in milliseconds.
+        {
+                "StartupBenchmark": {
+                        "metrics": {
+                                "Time": {
+                                        "current": [
+                                                [
+                                                        1415.2099609375,
+                                                        1439.552978515625
+                                                ]
+                                        ]
+                                }
+                        }
+                }
+        }
+
+        * LaunchTime/launch_time.py:
+        * LaunchTime/new_tab.py:
+        (NewTabBenchmark.get_test_name):
+        * LaunchTime/startup.py:
+        (StartupBenchmark.get_test_name):
+
+2018-12-17  Suresh Koppisetty  <skoppisetty@apple.com>
+
         Import FeedbackServer only if "-f/--feedback-in-browser" option is enabled.
         https://bugs.webkit.org/show_bug.cgi?id=192378
 
         Import FeedbackServer only if "-f/--feedback-in-browser" option is enabled.
         https://bugs.webkit.org/show_bug.cgi?id=192378
 
index be84678..9293fd0 100644 (file)
@@ -5,6 +5,7 @@ import logging
 from math import sqrt
 from operator import mul
 import os
 from math import sqrt
 from operator import mul
 import os
+import json
 from subprocess import call, check_output
 import sys
 import threading
 from subprocess import call, check_output
 import sys
 import threading
@@ -82,6 +83,8 @@ class LaunchTimeBenchmark:
         self._app_name = None
         self._verbose = False
         self._feedback_in_browser = False
         self._app_name = None
         self._verbose = False
         self._feedback_in_browser = False
+        self._save_results_to_json = False
+        self._json_results_path = None
         self._do_not_ignore_first_result = False
         self._iterations = 5
         self._browser_bundle_path = '/Applications/Safari.app'
         self._do_not_ignore_first_result = False
         self._iterations = 5
         self._browser_bundle_path = '/Applications/Safari.app'
@@ -109,6 +112,8 @@ class LaunchTimeBenchmark:
             help="print each iteration's time")
         self.argument_parser.add_argument('-f', '--feedback-in-browser', action='store_true',
             help="show benchmark results in browser (default: {})".format(self._feedback_in_browser))
             help="print each iteration's time")
         self.argument_parser.add_argument('-f', '--feedback-in-browser', action='store_true',
             help="show benchmark results in browser (default: {})".format(self._feedback_in_browser))
+        self.argument_parser.add_argument('-o', '--output', type=self._json_results_path,
+            help='saves benchmark results in json format (default: {})'.format(self._json_results_path))
         self.will_parse_arguments()
 
         args = self.argument_parser.parse_args()
         self.will_parse_arguments()
 
         args = self.argument_parser.parse_args()
@@ -120,6 +125,9 @@ class LaunchTimeBenchmark:
             self._verbose = args.verbose
         if args.feedback_in_browser is not None:
             self._feedback_in_browser = args.feedback_in_browser
             self._verbose = args.verbose
         if args.feedback_in_browser is not None:
             self._feedback_in_browser = args.feedback_in_browser
+        if args.output:
+            self._save_results_to_json = True
+            self._json_results_path = args.output
         path_len = len(self._browser_bundle_path)
         start_index = self._browser_bundle_path.rfind('/', 0, path_len)
         end_index = self._browser_bundle_path.rfind('.', 0, path_len)
         path_len = len(self._browser_bundle_path)
         start_index = self._browser_bundle_path.rfind('/', 0, path_len)
         end_index = self._browser_bundle_path.rfind('.', 0, path_len)
@@ -248,6 +256,9 @@ class LaunchTimeBenchmark:
 
         try:
             group_means = []
 
         try:
             group_means = []
+            if self._save_results_to_json:
+                resultsDict = {self.get_test_name(): {"metrics": {"Time": {"current": []}}}}
+
             results_by_iteration_number = [[] for _ in range(self._iterations)]
 
             group = 1
             results_by_iteration_number = [[] for _ in range(self._iterations)]
 
             group = 1
@@ -273,6 +284,9 @@ class LaunchTimeBenchmark:
                 if not self._verbose:
                     print ''
 
                 if not self._verbose:
                     print ''
 
+                if self._save_results_to_json:
+                    resultsDict[self.get_test_name()]["metrics"]["Time"]["current"].append(results)
+
                 mean, stdev = self._compute_results(results)
                 self.log_verbose('RESULTS:\n')
                 self.log_verbose('mean: {} ms\n'.format(mean))
                 mean, stdev = self._compute_results(results)
                 self.log_verbose('RESULTS:\n')
                 self.log_verbose('mean: {} ms\n'.format(mean))
@@ -289,6 +303,10 @@ class LaunchTimeBenchmark:
             if self._feedback_in_browser:
                 self.launch_browser()
 
             if self._feedback_in_browser:
                 self.launch_browser()
 
+            if self._save_results_to_json and self._json_results_path:
+                with open(self._json_results_path, "w") as jsonFile:
+                    json.dump(resultsDict, jsonFile, indent=4, separators=(',', ': '))
+
             means_by_iteration_number = []
             if len(results_by_iteration_number) > 1 and not self._do_not_ignore_first_result:
                 results_by_iteration_number = results_by_iteration_number[1:]
             means_by_iteration_number = []
             if len(results_by_iteration_number) > 1 and not self._do_not_ignore_first_result:
                 results_by_iteration_number = results_by_iteration_number[1:]
@@ -319,3 +337,6 @@ class LaunchTimeBenchmark:
 
     def did_parse_arguments(self, args):
         pass
 
     def did_parse_arguments(self, args):
         pass
+
+    def get_test_name(self):
+        return "LaunchTimeBenchmark"
index 161b665..dcef11e 100755 (executable)
@@ -55,6 +55,9 @@ class NewTabBenchmark(LaunchTimeBenchmark):
     def group_init(self):
         self.launch_browser()
 
     def group_init(self):
         self.launch_browser()
 
+    def get_test_name(self):
+        return "NewTabBenchmark"
+
     def will_parse_arguments(self):
         self.argument_parser.add_argument('-g', '--groups', type=int,
             help='number of groups of iterations to run (default: {})'.format(self.iteration_groups))
     def will_parse_arguments(self):
         self.argument_parser.add_argument('-g', '--groups', type=int,
             help='number of groups of iterations to run (default: {})'.format(self.iteration_groups))
index 39a6fc3..e8844cd 100755 (executable)
@@ -24,6 +24,9 @@ class StartupBenchmark(LaunchTimeBenchmark):
         self.quit_browser()
         return result
 
         self.quit_browser()
         return result
 
+    def get_test_name(self):
+        return "StartupBenchmark"
+
     @staticmethod
     def ResponseHandler(startup_benchmark):
         class Handler(DefaultLaunchTimeHandler):
     @staticmethod
     def ResponseHandler(startup_benchmark):
         class Handler(DefaultLaunchTimeHandler):