Extend run-benchmark script to support human-readable results conversion.
[WebKit.git] / Tools / Scripts / webkitpy / benchmark_runner / benchmark_runner.py
1 #!/usr/bin/env python
2
3 import json
4 import logging
5 import shutil
6 import signal
7 import subprocess
8 import sys
9 import tempfile
10 import time
11 import types
12 import os
13 import urlparse
14
15 from benchmark_builder import BenchmarkBuilder
16 from benchmark_results import BenchmarkResults
17 from browser_driver.browser_driver_factory import BrowserDriverFactory
18 from http_server_driver.http_server_driver_factory import HTTPServerDriverFactory
19 from utils import timeout
20
21
22 _log = logging.getLogger(__name__)
23
24
25 class BenchmarkRunner(object):
26
27     def __init__(self, plan_file, local_copy, count_override, build_dir, output_file, platform, browser, scale_unit=True, device_id=None):
28         try:
29             plan_file = self._find_plan_file(plan_file)
30             with open(plan_file, 'r') as fp:
31                 self._plan_name = os.path.split(os.path.splitext(plan_file)[0])[1]
32                 self._plan = json.load(fp)
33                 if local_copy:
34                     self._plan['local_copy'] = local_copy
35                 if count_override:
36                     self._plan['count'] = count_override
37                 self._browser_driver = BrowserDriverFactory.create(platform, browser)
38                 self._http_server_driver = HTTPServerDriverFactory.create(platform)
39                 self._http_server_driver.set_device_id(device_id)
40                 self._build_dir = os.path.abspath(build_dir) if build_dir else None
41                 self._output_file = output_file
42                 self._scale_unit = scale_unit
43                 self._device_id = device_id
44         except IOError as error:
45             _log.error('Can not open plan file: {plan_file} - Error {error}'.format(plan_file=plan_file, error=error))
46             raise error
47         except ValueError as error:
48             _log.error('Plan file: {plan_file} may not follow JSON format - Error {error}'.format(plan_file=plan_file, error=error))
49             raise error
50
51     def _find_plan_file(self, plan_file):
52         if not os.path.exists(plan_file):
53             absPath = os.path.join(os.path.dirname(__file__), 'data/plans', plan_file)
54             if os.path.exists(absPath):
55                 return absPath
56             if not absPath.endswith('.plan'):
57                 absPath += '.plan'
58             if os.path.exists(absPath):
59                 return absPath
60         return plan_file
61
62     def _run_benchmark(self, count, web_root):
63         results = []
64         for iteration in xrange(1, count + 1):
65             _log.info('Start the iteration {current_iteration} of current benchmark'.format(current_iteration=iteration))
66             try:
67                 result = None
68                 self._http_server_driver.serve(web_root)
69                 self._browser_driver.prepare_env(self._device_id)
70                 url = urlparse.urljoin(self._http_server_driver.base_url(), self._plan_name + '/' + self._plan['entry_point'])
71                 self._browser_driver.launch_url(url, self._build_dir)
72                 with timeout(self._plan['timeout']):
73                     result = self._http_server_driver.fetch_result()
74                     assert(not self._http_server_driver.get_return_code())
75                     assert(result)
76                     results.append(json.loads(result))
77             finally:
78                 self._browser_driver.restore_env()
79                 self._browser_driver.close_browsers()
80                 self._http_server_driver.kill_server()
81             _log.info('End of {current_iteration} iteration of current benchmark'.format(current_iteration=iteration))
82         results = self._wrap(results)
83         self._dump(results, self._output_file if self._output_file else self._plan['output_file'])
84         self.show_results(results, self._scale_unit)
85
86     def execute(self):
87         with BenchmarkBuilder(self._plan_name, self._plan) as web_root:
88             self._run_benchmark(int(self._plan['count']), web_root)
89
90     @classmethod
91     def _dump(cls, results, output_file):
92         _log.info('Dumping the results to file')
93         try:
94             with open(output_file, 'w') as fp:
95                 json.dump(results, fp)
96         except IOError as error:
97             _log.error('Cannot open output file: {output_file} - Error: {error}'.format(output_file=output_file, error=error))
98             _log.error('Results are:\n {result}'.format(result=json.dumps(results)))
99
100     @classmethod
101     def _wrap(cls, dicts):
102         _log.debug('Merging following results:\n{results}'.format(results=json.dumps(dicts)))
103         if not dicts:
104             return None
105         ret = {}
106         for dic in dicts:
107             ret = cls._merge(ret, dic)
108         _log.debug('Results after merging:\n{result}'.format(result=json.dumps(ret)))
109         return ret
110
111     @classmethod
112     def _merge(cls, a, b):
113         assert(isinstance(a, type(b)))
114         arg_type = type(a)
115         # special handle for list type, and should be handle before equal check
116         if arg_type == types.ListType and len(a) and (type(a[0]) == types.StringType or type(a[0]) == types.UnicodeType):
117             return a
118         if arg_type == types.DictType:
119             result = {}
120             for key, value in a.items():
121                 if key in b:
122                     result[key] = cls._merge(value, b[key])
123                 else:
124                     result[key] = value
125             for key, value in b.items():
126                 if key not in result:
127                     result[key] = value
128             return result
129         # for other types
130         return a + b
131
132     @classmethod
133     def show_results(cls, results, scale_unit=True):
134         results = BenchmarkResults(results)
135         print results.format(scale_unit)