https://bugs.webkit.org/show_bug.cgi?id=149944
Reviewed by Ryosuke Niwa.
Add '--read-results-json' and '--no-adjust-unit' options.
'--read-results-json' option converts result file to human readable format.
'--no-adjust-unit' option skips scientific notation convertion.
'--platform' defaults to 'osx' and '--browser' defaults to 'safari'.
* Scripts/webkitpy/benchmark_runner/benchmark_results.py:
(BenchmarkResults.format):
(BenchmarkResults._format_tests):
(BenchmarkResults._format_values):
* Scripts/webkitpy/benchmark_runner/benchmark_runner.py:
(BenchmarkRunner.__init__):
(BenchmarkRunner._run_benchmark):
(BenchmarkRunner._dump):
(BenchmarkRunner.show_results):
(BenchmarkRunner._show_results): Deleted.
* Scripts/webkitpy/benchmark_runner/run_benchmark.py:
(parse_args):
(start):
git-svn-id: http://svn.webkit.org/repository/webkit/trunk@190779
268f45cc-cd09-0410-ab3c-
d52691b4dbfc
+2015-10-08 Dewei Zhu <dewei_zhu@apple.com>
+
+ Extend run-benchmark script to support human-readable results conversion.
+ https://bugs.webkit.org/show_bug.cgi?id=149944
+
+ Reviewed by Ryosuke Niwa.
+
+ Add '--read-results-json' and '--no-adjust-unit' options.
+ '--read-results-json' option converts result file to human readable format.
+ '--no-adjust-unit' option skips scientific notation convertion.
+ '--platform' defaults to 'osx' and '--browser' defaults to 'safari'.
+
+ * Scripts/webkitpy/benchmark_runner/benchmark_results.py:
+ (BenchmarkResults.format):
+ (BenchmarkResults._format_tests):
+ (BenchmarkResults._format_values):
+ * Scripts/webkitpy/benchmark_runner/benchmark_runner.py:
+ (BenchmarkRunner.__init__):
+ (BenchmarkRunner._run_benchmark):
+ (BenchmarkRunner._dump):
+ (BenchmarkRunner.show_results):
+ (BenchmarkRunner._show_results): Deleted.
+ * Scripts/webkitpy/benchmark_runner/run_benchmark.py:
+ (parse_args):
+ (start):
+
2015-10-08 Daniel Bates <dabates@apple.com>
Add iOS 9 device builder to WebKit Bot Watcher's Dashboard
self._lint_results(results)
self._results = self._aggregate_results(results)
- def format(self):
- return self._format_tests(self._results)
+ def format(self, scale_unit):
+ return self._format_tests(self._results, scale_unit)
@classmethod
- def _format_tests(self, tests, indent=''):
+ def _format_tests(cls, tests, scale_unit, indent=''):
output = ''
config_name = 'current'
for test_name in sorted(tests.keys()):
output += ':' + metric_name + ':'
if aggregator_name:
output += aggregator_name + ':'
- output += ' ' + self._format_values(metric_name, metric[aggregator_name][config_name]) + '\n'
+ output += ' ' + cls._format_values(metric_name, metric[aggregator_name][config_name], scale_unit) + '\n'
if 'tests' in test:
- output += self._format_tests(test['tests'], indent=(indent + ' ' * len(test_name)))
+ output += cls._format_tests(test['tests'], scale_unit, indent=(indent + ' ' * len(test_name)))
return output
@classmethod
- def _format_values(cls, metric_name, values):
+ def _format_values(cls, metric_name, values, scale_unit):
values = map(float, values)
total = sum(values)
mean = total / len(values)
unit = cls._unit_from_metric(metric_name)
+ if not scale_unit:
+ return ('{mean:.3f}{unit} stdev={delta:.1%}').format(mean=mean, delta=sample_stdev / mean, unit=unit)
+
if unit == 'ms':
unit = 's'
mean = float(mean) / 1000
class BenchmarkRunner(object):
- def __init__(self, plan_file, local_copy, count_override, build_dir, output_file, platform, browser, device_id=None):
+ def __init__(self, plan_file, local_copy, count_override, build_dir, output_file, platform, browser, scale_unit=True, device_id=None):
try:
plan_file = self._find_plan_file(plan_file)
with open(plan_file, 'r') as fp:
self._http_server_driver.set_device_id(device_id)
self._build_dir = os.path.abspath(build_dir) if build_dir else None
self._output_file = output_file
+ self._scale_unit = scale_unit
self._device_id = device_id
except IOError as error:
_log.error('Can not open plan file: {plan_file} - Error {error}'.format(plan_file=plan_file, error=error))
_log.info('End of {current_iteration} iteration of current benchmark'.format(current_iteration=iteration))
results = self._wrap(results)
self._dump(results, self._output_file if self._output_file else self._plan['output_file'])
- self._show_results(results)
+ self.show_results(results, self._scale_unit)
def execute(self):
with BenchmarkBuilder(self._plan_name, self._plan) as web_root:
json.dump(results, fp)
except IOError as error:
_log.error('Cannot open output file: {output_file} - Error: {error}'.format(output_file=output_file, error=error))
- _log.error('Results are:\n {result}'.format(json.dumps(results)))
+ _log.error('Results are:\n {result}'.format(result=json.dumps(results)))
@classmethod
def _wrap(cls, dicts):
return a + b
@classmethod
- def _show_results(cls, results):
+ def show_results(cls, results, scale_unit=True):
results = BenchmarkResults(results)
- print results.format()
+ print results.format(scale_unit)
#!/usr/bin/env python
import argparse
+import json
import logging
import platform
import os
parser = argparse.ArgumentParser(description='Automate the browser based performance benchmarks')
parser.add_argument('--output-file', dest='output', default=None)
parser.add_argument('--build-directory', dest='buildDir', help='Path to the browser executable. e.g. WebKitBuild/Release/')
- parser.add_argument('--plan', dest='plan', required=True, help='Benchmark plan to run. e.g. speedometer, jetstream')
- parser.add_argument('--platform', dest='platform', required=True, choices=BrowserDriverFactory.available_platforms())
+ parser.add_argument('--platform', dest='platform', default='osx', choices=BrowserDriverFactory.available_platforms())
# FIXME: Should we add chrome as an option? Well, chrome uses webkit in iOS.
- parser.add_argument('--browser', dest='browser', required=True, choices=BrowserDriverFactory.available_browsers())
+ parser.add_argument('--browser', dest='browser', default='safari', choices=BrowserDriverFactory.available_browsers())
parser.add_argument('--debug', action='store_true')
parser.add_argument('--local-copy', dest='localCopy', help='Path to a local copy of the benchmark. e.g. PerformanceTests/SunSpider/')
parser.add_argument('--count', dest='countOverride', type=int, help='Number of times to run the benchmark. e.g. 5')
parser.add_argument('--device-id', dest='device_id', default=None)
+ parser.add_argument('--no-adjust-unit', dest='scale_unit', action='store_false')
+ mutual_group = parser.add_mutually_exclusive_group(required=True)
+ mutual_group.add_argument('--read-results-json', dest='json_file', help='Specify file you want to format')
+ mutual_group.add_argument('--plan', dest='plan', help='Benchmark plan to run. e.g. speedometer, jetstream')
args = parser.parse_args()
def start(args):
- runner = BenchmarkRunner(args.plan, args.localCopy, args.countOverride, args.buildDir, args.output, args.platform, args.browser, args.device_id)
+ if args.json_file:
+ BenchmarkRunner.show_results(json.load(open(args.json_file, 'r')), args.scale_unit)
+ return
+ runner = BenchmarkRunner(args.plan, args.localCopy, args.countOverride, args.buildDir, args.output, args.platform, args.browser, args.scale_unit, args.device_id)
runner.execute()