Make run-benchmark script supports 'config' key in test plan.
[WebKit-https.git] / Tools / Scripts / webkitpy / benchmark_runner / benchmark_runner.py
1 #!/usr/bin/env python
2
3 import json
4 import logging
5 import shutil
6 import signal
7 import subprocess
8 import sys
9 import tempfile
10 import time
11 import types
12 import os
13 import urlparse
14
15 from benchmark_builder import BenchmarkBuilder
16 from benchmark_results import BenchmarkResults
17 from browser_driver.browser_driver_factory import BrowserDriverFactory
18 from http_server_driver.http_server_driver_factory import HTTPServerDriverFactory
19 from utils import timeout
20
21
22 _log = logging.getLogger(__name__)
23
24
25 class BenchmarkRunner(object):
26
27     def __init__(self, plan_file, local_copy, count_override, build_dir, output_file, platform, browser, scale_unit=True, device_id=None):
28         try:
29             plan_file = self._find_plan_file(plan_file)
30             with open(plan_file, 'r') as fp:
31                 self._plan_name = os.path.split(os.path.splitext(plan_file)[0])[1]
32                 self._plan = json.load(fp)
33                 if not 'options' in self._plan:
34                     self._plan['options'] = {}
35                 if local_copy:
36                     self._plan['local_copy'] = local_copy
37                 if count_override:
38                     self._plan['count'] = count_override
39                 self._browser_driver = BrowserDriverFactory.create(platform, browser)
40                 self._http_server_driver = HTTPServerDriverFactory.create(platform)
41                 self._http_server_driver.set_device_id(device_id)
42                 self._build_dir = os.path.abspath(build_dir) if build_dir else None
43                 self._output_file = output_file
44                 self._scale_unit = scale_unit
45                 self._config = self._plan.get('config', {})
46                 if device_id:
47                     self._config['device_id'] = device_id
48         except IOError as error:
49             _log.error('Can not open plan file: {plan_file} - Error {error}'.format(plan_file=plan_file, error=error))
50             raise error
51         except ValueError as error:
52             _log.error('Plan file: {plan_file} may not follow JSON format - Error {error}'.format(plan_file=plan_file, error=error))
53             raise error
54
55     def _find_plan_file(self, plan_file):
56         if not os.path.exists(plan_file):
57             absPath = os.path.join(os.path.dirname(__file__), 'data/plans', plan_file)
58             if os.path.exists(absPath):
59                 return absPath
60             if not absPath.endswith('.plan'):
61                 absPath += '.plan'
62             if os.path.exists(absPath):
63                 return absPath
64         return plan_file
65
66     def _get_result(self, test_url):
67         result = self._browser_driver.add_additional_results(test_url, self._http_server_driver.fetch_result())
68         assert(not self._http_server_driver.get_return_code())
69         return result
70
71     def _run_one_test(self, web_root, test_file):
72         result = None
73         try:
74             self._http_server_driver.serve(web_root)
75             url = urlparse.urljoin(self._http_server_driver.base_url(), self._plan_name + '/' + test_file)
76             self._browser_driver.launch_url(url, self._plan['options'], self._build_dir)
77             with timeout(self._plan['timeout']):
78                 result = self._get_result(url)
79         finally:
80             self._browser_driver.close_browsers()
81             self._http_server_driver.kill_server()
82
83         return json.loads(result)
84
85     def _run_benchmark(self, count, web_root):
86         results = []
87         debug_outputs = []
88         for iteration in xrange(1, count + 1):
89             _log.info('Start the iteration {current_iteration} of {iterations} for current benchmark'.format(current_iteration=iteration, iterations=count))
90             try:
91                 self._browser_driver.prepare_env(self._config)
92
93                 if 'entry_point' in self._plan:
94                     result = self._run_one_test(web_root, self._plan['entry_point'])
95                     debug_outputs.append(result.pop('debugOutput', None))
96                     assert(result)
97                     results.append(result)
98                 elif 'test_files' in self._plan:
99                     run_result = {}
100                     for test in self._plan['test_files']:
101                         result = self._run_one_test(web_root, test)
102                         assert(result)
103                         run_result = self._merge(run_result, result)
104                         debug_outputs.append(result.pop('debugOutput', None))
105
106                     results.append(run_result)
107                 else:
108                     raise Exception('Plan does not contain entry_point or test_files')
109
110             finally:
111                 self._browser_driver.restore_env()
112
113             _log.info('End the iteration {current_iteration} of {iterations} for current benchmark'.format(current_iteration=iteration, iterations=count))
114
115         results = self._wrap(results)
116         output_file = self._output_file if self._output_file else self._plan['output_file']
117         self._dump(self._merge({'debugOutput': debug_outputs}, results), output_file)
118         self.show_results(results, self._scale_unit)
119
120     def execute(self):
121         with BenchmarkBuilder(self._plan_name, self._plan) as web_root:
122             self._run_benchmark(int(self._plan['count']), web_root)
123
124     @classmethod
125     def _dump(cls, results, output_file):
126         _log.info('Dumping the results to file {output_file}'.format(output_file=output_file))
127         try:
128             with open(output_file, 'w') as fp:
129                 json.dump(results, fp)
130         except IOError as error:
131             _log.error('Cannot open output file: {output_file} - Error: {error}'.format(output_file=output_file, error=error))
132             _log.error('Results are:\n {result}'.format(result=json.dumps(results)))
133
134     @classmethod
135     def _wrap(cls, dicts):
136         _log.debug('Merging following results:\n{results}'.format(results=json.dumps(dicts)))
137         if not dicts:
138             return None
139         ret = {}
140         for dic in dicts:
141             ret = cls._merge(ret, dic)
142         _log.debug('Results after merging:\n{result}'.format(result=json.dumps(ret)))
143         return ret
144
145     @classmethod
146     def _merge(cls, a, b):
147         assert(isinstance(a, type(b)))
148         arg_type = type(a)
149         # special handle for list type, and should be handle before equal check
150         if arg_type == types.ListType and len(a) and (type(a[0]) == types.StringType or type(a[0]) == types.UnicodeType):
151             return a
152         if arg_type == types.DictType:
153             result = {}
154             for key, value in a.items():
155                 if key in b:
156                     result[key] = cls._merge(value, b[key])
157                 else:
158                     result[key] = value
159             for key, value in b.items():
160                 if key not in result:
161                     result[key] = value
162             return result
163         # for other types
164         return a + b
165
166     @classmethod
167     def show_results(cls, results, scale_unit=True):
168         results = BenchmarkResults(results)
169         print results.format(scale_unit)