237389c24dc139f6c606d1edbb2cf72c5c59621b
[WebKit-https.git] / Tools / Scripts / webkitpy / benchmark_runner / benchmark_runner.py
1 #!/usr/bin/env python
2
3 import json
4 import logging
5 import shutil
6 import signal
7 import subprocess
8 import sys
9 import tempfile
10 import time
11 import types
12 import os
13 import urlparse
14
15 from benchmark_builder import BenchmarkBuilder
16 from benchmark_results import BenchmarkResults
17 from browser_driver.browser_driver_factory import BrowserDriverFactory
18 from http_server_driver.http_server_driver_factory import HTTPServerDriverFactory
19 from utils import timeout
20
21
22 _log = logging.getLogger(__name__)
23
24
25 class BenchmarkRunner(object):
26
27     def __init__(self, plan_file, local_copy, count_override, build_dir, output_file, platform, browser, scale_unit=True, device_id=None):
28         try:
29             plan_file = self._find_plan_file(plan_file)
30             with open(plan_file, 'r') as fp:
31                 self._plan_name = os.path.split(os.path.splitext(plan_file)[0])[1]
32                 self._plan = json.load(fp)
33                 if not 'options' in self._plan:
34                     self._plan['options'] = {}
35                 if local_copy:
36                     self._plan['local_copy'] = local_copy
37                 if count_override:
38                     self._plan['count'] = count_override
39                 self._browser_driver = BrowserDriverFactory.create(platform, browser)
40                 self._http_server_driver = HTTPServerDriverFactory.create(platform)
41                 self._http_server_driver.set_device_id(device_id)
42                 self._build_dir = os.path.abspath(build_dir) if build_dir else None
43                 self._output_file = output_file
44                 self._scale_unit = scale_unit
45                 self._device_id = device_id
46         except IOError as error:
47             _log.error('Can not open plan file: {plan_file} - Error {error}'.format(plan_file=plan_file, error=error))
48             raise error
49         except ValueError as error:
50             _log.error('Plan file: {plan_file} may not follow JSON format - Error {error}'.format(plan_file=plan_file, error=error))
51             raise error
52
53     def _find_plan_file(self, plan_file):
54         if not os.path.exists(plan_file):
55             absPath = os.path.join(os.path.dirname(__file__), 'data/plans', plan_file)
56             if os.path.exists(absPath):
57                 return absPath
58             if not absPath.endswith('.plan'):
59                 absPath += '.plan'
60             if os.path.exists(absPath):
61                 return absPath
62         return plan_file
63
64     def _get_result(self, test_url):
65         result = self._browser_driver.add_additional_results(test_url, self._http_server_driver.fetch_result())
66         assert(not self._http_server_driver.get_return_code())
67         return result
68
69     def _run_one_test(self, web_root, test_file):
70         result = None
71         try:
72             self._http_server_driver.serve(web_root)
73             url = urlparse.urljoin(self._http_server_driver.base_url(), self._plan_name + '/' + test_file)
74             self._browser_driver.launch_url(url, self._plan['options'], self._build_dir)
75             with timeout(self._plan['timeout']):
76                 result = self._get_result(url)
77         finally:
78             self._browser_driver.close_browsers()
79             self._http_server_driver.kill_server()
80
81         return json.loads(result)
82
83     def _run_benchmark(self, count, web_root):
84         results = []
85         debug_outputs = []
86         for iteration in xrange(1, count + 1):
87             _log.info('Start the iteration {current_iteration} of {iterations} for current benchmark'.format(current_iteration=iteration, iterations=count))
88             try:
89                 self._browser_driver.prepare_env(self._device_id)
90
91                 if 'entry_point' in self._plan:
92                     result = self._run_one_test(web_root, self._plan['entry_point'])
93                     debug_outputs.append(result.pop('debugOutput', None))
94                     assert(result)
95                     results.append(result)
96                 elif 'test_files' in self._plan:
97                     run_result = {}
98                     for test in self._plan['test_files']:
99                         result = self._run_one_test(web_root, test)
100                         assert(result)
101                         run_result = self._merge(run_result, result)
102                         debug_outputs.append(result.pop('debugOutput', None))
103
104                     results.append(run_result)
105                 else:
106                     raise Exception('Plan does not contain entry_point or test_files')
107
108             finally:
109                 self._browser_driver.restore_env()
110
111             _log.info('End the iteration {current_iteration} of {iterations} for current benchmark'.format(current_iteration=iteration, iterations=count))
112
113         results = self._wrap(results)
114         output_file = self._output_file if self._output_file else self._plan['output_file']
115         self._dump(self._merge({'debugOutput': debug_outputs}, results), output_file)
116         self.show_results(results, self._scale_unit)
117
118     def execute(self):
119         with BenchmarkBuilder(self._plan_name, self._plan) as web_root:
120             self._run_benchmark(int(self._plan['count']), web_root)
121
122     @classmethod
123     def _dump(cls, results, output_file):
124         _log.info('Dumping the results to file {output_file}'.format(output_file=output_file))
125         try:
126             with open(output_file, 'w') as fp:
127                 json.dump(results, fp)
128         except IOError as error:
129             _log.error('Cannot open output file: {output_file} - Error: {error}'.format(output_file=output_file, error=error))
130             _log.error('Results are:\n {result}'.format(result=json.dumps(results)))
131
132     @classmethod
133     def _wrap(cls, dicts):
134         _log.debug('Merging following results:\n{results}'.format(results=json.dumps(dicts)))
135         if not dicts:
136             return None
137         ret = {}
138         for dic in dicts:
139             ret = cls._merge(ret, dic)
140         _log.debug('Results after merging:\n{result}'.format(result=json.dumps(ret)))
141         return ret
142
143     @classmethod
144     def _merge(cls, a, b):
145         assert(isinstance(a, type(b)))
146         arg_type = type(a)
147         # special handle for list type, and should be handle before equal check
148         if arg_type == types.ListType and len(a) and (type(a[0]) == types.StringType or type(a[0]) == types.UnicodeType):
149             return a
150         if arg_type == types.DictType:
151             result = {}
152             for key, value in a.items():
153                 if key in b:
154                     result[key] = cls._merge(value, b[key])
155                 else:
156                     result[key] = value
157             for key, value in b.items():
158                 if key not in result:
159                     result[key] = value
160             return result
161         # for other types
162         return a + b
163
164     @classmethod
165     def show_results(cls, results, scale_unit=True):
166         results = BenchmarkResults(results)
167         print results.format(scale_unit)