run_benchmark should have an option to specify the number of runs
[WebKit-https.git] / Tools / Scripts / webkitpy / benchmark_runner / benchmark_runner.py
index a9728a37858a1d044c0019e2f88657a76df39c12..faeb85223ca55a6361f44c0ab1e31121a4b507d3 100644 (file)
@@ -23,33 +23,57 @@ _log = logging.getLogger(__name__)
 
 class BenchmarkRunner(object):
 
-    def __init__(self, planFile, buildDir, outputFile, platform, browser):
+    def __init__(self, planFile, localCopy, countOverride, buildDir, outputFile, platform, browser):
         _log.info('Initializing benchmark running')
         try:
+            planFile = self._findPlanFile(planFile)
             with open(planFile, 'r') as fp:
+                self.planName = os.path.split(os.path.splitext(planFile)[0])[1]
                 self.plan = json.load(fp)
+                if localCopy:
+                    self.plan['local_copy'] = localCopy
+                if countOverride:
+                    self.plan['count'] = countOverride
                 self.browserDriver = BrowserDriverFactory.create([platform, browser])
                 self.httpServerDriver = HTTPServerDriverFactory.create([self.plan['http_server_driver']])
-                self.buildDir = os.path.abspath(buildDir)
+                self.buildDir = os.path.abspath(buildDir) if buildDir else None
                 self.outputFile = outputFile
         except IOError:
             _log.error('Can not open plan file: %s' % planFile)
+            raise
         except ValueError:
             _log.error('Plan file:%s may not follow JSON format' % planFile)
-        except:
             raise
 
+    def _findPlanFile(self, planFile):
+        if not os.path.exists(planFile):
+            absPath = os.path.join(os.path.dirname(__file__), 'data/plans', planFile)
+            if os.path.exists(absPath):
+                return absPath
+            if not absPath.endswith('.plan'):
+                absPath += '.plan'
+            if os.path.exists(absPath):
+                return absPath
+        return planFile
+
     def execute(self):
         _log.info('Start to execute the plan')
         _log.info('Start a new benchmark')
         results = []
         benchmarkBuilder = BenchmarkBuilderFactory.create([self.plan['benchmark_builder']])
-        webRoot = benchmarkBuilder.prepare(self.plan['original_benchmark'], self.plan['benchmark_patch'] if 'benchmark_patch' in self.plan else None)
+
+        if not self.plan.get('local_copy') and not self.plan.get('remote_archive'):
+            _log.error('Either local_copy or remote_archive must be specified in the plan')
+            return 2
+
+        webRoot = benchmarkBuilder.prepare(self.planName, self.plan.get('local_copy'), self.plan.get('remote_archive'),
+            self.plan.get('benchmark_patch'), self.plan.get('create_script'))
         for x in xrange(int(self.plan['count'])):
             _log.info('Start the iteration %d of current benchmark' % (x + 1))
             self.httpServerDriver.serve(webRoot)
             self.browserDriver.prepareEnv()
-            self.browserDriver.launchUrl(urlparse.urljoin(self.httpServerDriver.baseUrl(), self.plan['entry_point']), self.buildDir)
+            url = urlparse.urljoin(self.httpServerDriver.baseUrl(), self.planName + '/' + self.plan['entry_point'])
+            self.browserDriver.launchUrl(url, self.buildDir)
             try:
                 with timeout(self.plan['timeout']):
                     result = json.loads(self.httpServerDriver.fetchResult())
@@ -72,7 +96,7 @@ class BenchmarkRunner(object):
 
     @classmethod
     def dump(cls, results, outputFile):
-        _log.info('Dumpping the results to file')
+        _log.info('Dumping the results to file')
         try:
             with open(outputFile, 'w') as fp:
                 json.dump(results, fp)
@@ -82,11 +106,13 @@ class BenchmarkRunner(object):
 
     @classmethod
     def wrap(cls, dicts):
+        _log.info('Merging following results:\n%s', json.dumps(dicts))
         if not dicts:
             return None
         ret = {}
         for dic in dicts:
             ret = cls.merge(ret, dic)
+        _log.info('Results after merging:\n%s', json.dumps(ret))
         return ret
 
     @classmethod
@@ -94,9 +120,7 @@ class BenchmarkRunner(object):
         assert(isinstance(a, type(b)))
         argType = type(a)
         # special handle for list type, and should be handle before equal check
-        if argType == types.ListType:
-            return a + b
-        if a == b:
+        if argType == types.ListType and len(a) and (type(a[0]) == types.StringType or type(a[0]) == types.UnicodeType):
             return a
         if argType == types.DictType:
             result = {}