From bcb994365d74c68c00bbd055557a773b6315d2c3 Mon Sep 17 00:00:00 2001 From: "commit-queue@webkit.org" Date: Wed, 29 Apr 2015 00:51:36 +0000 Subject: [PATCH] Increase stablility of run-benchmark script https://bugs.webkit.org/show_bug.cgi?id=144361 Patch by Dewei Zhu on 2015-04-28 Reviewed by Ryosuke Niwa. * Scripts/run-benchmark: (main): * Scripts/webkitpy/benchmark_runner/README.md: Update readme due to changes. * Scripts/webkitpy/benchmark_runner/benchmark_builder/generic_benchmark_builder.py: (GenericBenchmarkBuilder.clean): * Scripts/webkitpy/benchmark_runner/benchmark_runner.py: Improve error handling and remove an unnecessary loop. (BenchmarkRunner.__init__): (BenchmarkRunner.execute): * Scripts/webkitpy/benchmark_runner/browser_driver/osx_safari_driver.py: (OSXSafariDriver.prepareEnv): Remove saved sessions of Safari. * Scripts/webkitpy/benchmark_runner/data/patches/JetStream.patch: Upload 'results' rather than 'time'. * Scripts/webkitpy/benchmark_runner/data/plans/jetstream.plan: Update formate. * Scripts/webkitpy/benchmark_runner/data/plans/speedometer.plan: Update formate. * Scripts/webkitpy/benchmark_runner/http_server_driver/http_server/twisted_http_server.py: (ServerControl.render_POST): Flush the buffer to make sure we can pass the result to stdout. * Scripts/webkitpy/benchmark_runner/http_server_driver/http_server_driver.py: (HTTPServerDriver.fetchResult): (HTTPServerDriver): (HTTPServerDriver.killServer): Add kill server interface. (HTTPServerDriver.getReturnCode): Add get return code interface. * Scripts/webkitpy/benchmark_runner/http_server_driver/simple_http_server_driver.py: Adjust to support new interfaces. (SimpleHTTPServerDriver.serve): (SimpleHTTPServerDriver.fetchResult): (SimpleHTTPServerDriver): (SimpleHTTPServerDriver.killServer): (SimpleHTTPServerDriver.getReturnCode): * Scripts/webkitpy/benchmark_runner/utils.py: (forceRemove): Provide python version of 'rm -rf'. git-svn-id: http://svn.webkit.org/repository/webkit/trunk@183520 268f45cc-cd09-0410-ab3c-d52691b4dbfc --- Tools/ChangeLog | 36 ++++++++++++ Tools/Scripts/run-benchmark | 2 +- .../webkitpy/benchmark_runner/README.md | 36 +++++------- .../generic_benchmark_builder.py | 4 +- .../benchmark_runner/benchmark_runner.py | 56 ++++++++++--------- .../browser_driver/osx_safari_driver.py | 3 + .../data/patches/JetStream.patch | 2 +- .../data/plans/jetstream.plan | 18 +++--- .../data/plans/speedometer.plan | 20 +++---- .../http_server/twisted_http_server.py | 1 + .../http_server_driver/http_server_driver.py | 8 +++ .../simple_http_server_driver.py | 15 ++++- .../webkitpy/benchmark_runner/utils.py | 8 +++ 13 files changed, 131 insertions(+), 78 deletions(-) diff --git a/Tools/ChangeLog b/Tools/ChangeLog index 02eced954607..71543f1ec04f 100644 --- a/Tools/ChangeLog +++ b/Tools/ChangeLog @@ -1,3 +1,39 @@ +2015-04-28 Dewei Zhu + + Increase stablility of run-benchmark script + https://bugs.webkit.org/show_bug.cgi?id=144361 + + Reviewed by Ryosuke Niwa. + + * Scripts/run-benchmark: + (main): + * Scripts/webkitpy/benchmark_runner/README.md: Update readme due to changes. + * Scripts/webkitpy/benchmark_runner/benchmark_builder/generic_benchmark_builder.py: + (GenericBenchmarkBuilder.clean): + * Scripts/webkitpy/benchmark_runner/benchmark_runner.py: Improve error handling and remove an unnecessary loop. + (BenchmarkRunner.__init__): + (BenchmarkRunner.execute): + * Scripts/webkitpy/benchmark_runner/browser_driver/osx_safari_driver.py: + (OSXSafariDriver.prepareEnv): Remove saved sessions of Safari. + * Scripts/webkitpy/benchmark_runner/data/patches/JetStream.patch: Upload 'results' rather than 'time'. + * Scripts/webkitpy/benchmark_runner/data/plans/jetstream.plan: Update formate. + * Scripts/webkitpy/benchmark_runner/data/plans/speedometer.plan: Update formate. + * Scripts/webkitpy/benchmark_runner/http_server_driver/http_server/twisted_http_server.py: + (ServerControl.render_POST): Flush the buffer to make sure we can pass the result to stdout. + * Scripts/webkitpy/benchmark_runner/http_server_driver/http_server_driver.py: + (HTTPServerDriver.fetchResult): + (HTTPServerDriver): + (HTTPServerDriver.killServer): Add kill server interface. + (HTTPServerDriver.getReturnCode): Add get return code interface. + * Scripts/webkitpy/benchmark_runner/http_server_driver/simple_http_server_driver.py: Adjust to support new interfaces. + (SimpleHTTPServerDriver.serve): + (SimpleHTTPServerDriver.fetchResult): + (SimpleHTTPServerDriver): + (SimpleHTTPServerDriver.killServer): + (SimpleHTTPServerDriver.getReturnCode): + * Scripts/webkitpy/benchmark_runner/utils.py: + (forceRemove): Provide python version of 'rm -rf'. + 2015-04-28 Sam Weinig [Content Extensions] Process NFAs individually to avoid having all NFAs live at the same time diff --git a/Tools/Scripts/run-benchmark b/Tools/Scripts/run-benchmark index 5a575379d876..3b0629aa1289 100755 --- a/Tools/Scripts/run-benchmark +++ b/Tools/Scripts/run-benchmark @@ -31,7 +31,7 @@ def main(): _log.debug('\tbuild directory\t: %s' % args.buildDir) _log.debug('\tplan name\t: %s', args.plan) runner = BenchmarkRunner(args.plan, args.buildDir, args.output, args.platform, args.browser) - runner.execute() + return runner.execute() if __name__ == '__main__': main() diff --git a/Tools/Scripts/webkitpy/benchmark_runner/README.md b/Tools/Scripts/webkitpy/benchmark_runner/README.md index 52350dfdef81..0cad4a198e3e 100644 --- a/Tools/Scripts/webkitpy/benchmark_runner/README.md +++ b/Tools/Scripts/webkitpy/benchmark_runner/README.md @@ -45,7 +45,7 @@ benchmark_runner ## HOW TOs ### How to run ```shell - python path/to/run_benchmark --build-directory path/to/browser/directory --plan json_format_plan --platform target_platform --browser target_browser + python path/to/run-benchmark --build-directory path/to/browser/directory --plan json_format_plan --platform target_platform --browser target_browser ``` * **path/to/browser/directory**: should be the folder containing the executable binary(e.g. /Application/ on OSX which contains Safari.app) * **json_format_plan**: the benchmark plan you want to execute @@ -55,29 +55,24 @@ To create a plan, you may refer to Plans/jetstream.plan. ```json { "http_server_driver": "SimpleHTTPServerDriver", - "benchmarks": [ - { - "timeout" : 600, - "count": 5, - "benchmark_builder": "JetStreamBenchmarkBuilder", - "original_benchmark": "../../../../PerformanceTests/JetStream", - "benchmark_patch": "data/patches/JetStream.patch", - "entry_point": "JetStream/JetStream-1.0.1/index.html", - "output_file": "jetstream.result" - } - ] + "timeout" : 600, + "count": 5, + "benchmark_builder": "JetStreamBenchmarkBuilder", + "original_benchmark": "../../../../PerformanceTests/JetStream", + "benchmark_patch": "data/patches/JetStream.patch", + "entry_point": "JetStream/JetStream-1.0.1/index.html", + "output_file": "jetstream.result" } ``` Plan is a json-formatted dictionary which contains following keys * **http_server_driver**: (**case-sensitive**) the http server module you want to host the resources. Current available option is "SimpleHTTPServerHandle" which is based on python twisted framework. -* **benchmarks**: contains a list of benchmarks you want to run, each benchmark is a list contains following keys: - * **timeout**: time limit for **EACH RUN** of the benchmark. This can avoid program getting stuck in the extreme circumstances. The time limit is suggested to be 1.5-2x the time spent in a normal run. - * **count**: the number of times you want to run benchmark - * **benchmark_builder**: builder of the benchmark which is responsible for arranging benchmark before the web server serving the directory. In most case, 'GenericBenchmarkHandler' is sufficient. It copies the benchmark to a temporary directory and applies patch to benchmark. If you have special requirement, you could design your own benchmark handle, just like the 'JetStreamBenchmarkHandle' in this example. - * **original_benchmark**: path of benchmark, a relative path to the root of this project ('benchmark_runner' directory) - * **benchmark_path**: (**OPTIONAL**) path of patch, a relative path to the root of this project ('benchmark_runner' directory) - * **entry_point**: the relative url you want browser to launch (a relative path to the webroot) - * **output_file**: specify the output file +* **timeout**: time limit for **EACH RUN** of the benchmark. This can avoid program getting stuck in the extreme circumstances. The time limit is suggested to be 1.5-2x the time spent in a normal run. +* **count**: the number of times you want to run benchmark +* **benchmark_builder**: builder of the benchmark which is responsible for arranging benchmark before the web server serving the directory. In most case, 'GenericBenchmarkHandler' is sufficient. It copies the benchmark to a temporary directory and applies patch to benchmark. If you have special requirement, you could design your own benchmark handle, just like the 'JetStreamBenchmarkHandle' in this example. +* **original_benchmark**: path of benchmark, a relative path to the root of this project ('benchmark_runner' directory) +* **benchmark_path**: (**OPTIONAL**) path of patch, a relative path to the root of this project ('benchmark_runner' directory) +* **entry_point**: the relative url you want browser to launch (a relative path to the webRoot) +* **output_file**: specify the output file, this can be overwritten by specifying '--output-file' while invoking run-benchmark script ### How to import a benchmark * Modify the benchmark html file, make sure the page has following functionalities: @@ -107,5 +102,4 @@ Plan is a json-formatted dictionary which contains following keys * Do following instruction **ONLY IF NEEDED**. In most case, you do not have to. * If you want to customize BrowserDriver for specific browser/platform, you need to extend browser_driver/browser_driver.py and register your module in browser_driver/browser_driversjson. * If you want to customize HTTPServerDriver, you need to extend http_server_drirver/http_server_driver and register your module in http_server_driver/http_server_drivers.json. - * If you want to customize ResultWrapper, you need to extend result_wrapper/base_result_wrapper.py and register your module in result_wrapper/result_wrappers.json * If you want to customize BenchmarkBuilder, you need to extend benchmark_builder/generic_benchmark_builder register you module in benchmark_builder/benchmark_builders.json diff --git a/Tools/Scripts/webkitpy/benchmark_runner/benchmark_builder/generic_benchmark_builder.py b/Tools/Scripts/webkitpy/benchmark_runner/benchmark_builder/generic_benchmark_builder.py index be5487999eca..6ac26f34aadd 100644 --- a/Tools/Scripts/webkitpy/benchmark_runner/benchmark_builder/generic_benchmark_builder.py +++ b/Tools/Scripts/webkitpy/benchmark_runner/benchmark_builder/generic_benchmark_builder.py @@ -6,7 +6,7 @@ import os import shutil import subprocess -from webkitpy.benchmark_runner.utils import getPathFromProjectRoot +from webkitpy.benchmark_runner.utils import getPathFromProjectRoot, forceRemove _log = logging.getLogger(__name__) @@ -40,4 +40,4 @@ class GenericBenchmarkBuilder(object): def clean(self): _log.info('Cleanning Benchmark') if self.webRoot: - shutil.rmtree(self.webRoot) + forceRemove(self.webRoot) diff --git a/Tools/Scripts/webkitpy/benchmark_runner/benchmark_runner.py b/Tools/Scripts/webkitpy/benchmark_runner/benchmark_runner.py index b4b9d19af0ab..a9728a37858a 100644 --- a/Tools/Scripts/webkitpy/benchmark_runner/benchmark_runner.py +++ b/Tools/Scripts/webkitpy/benchmark_runner/benchmark_runner.py @@ -30,43 +30,45 @@ class BenchmarkRunner(object): self.plan = json.load(fp) self.browserDriver = BrowserDriverFactory.create([platform, browser]) self.httpServerDriver = HTTPServerDriverFactory.create([self.plan['http_server_driver']]) - self.benchmarks = self.plan['benchmarks'] self.buildDir = os.path.abspath(buildDir) self.outputFile = outputFile except IOError: _log.error('Can not open plan file: %s' % planFile) except ValueError: - _log.error('Plan file:%s may not follow json format' % planFile) + _log.error('Plan file:%s may not follow JSON format' % planFile) except: raise def execute(self): _log.info('Start to execute the plan') - for benchmark in self.benchmarks: - _log.info('Start a new benchmark') - results = [] - benchmarkBuilder = BenchmarkBuilderFactory.create([benchmark['benchmark_builder']]) - webRoot = benchmarkBuilder.prepare(benchmark['original_benchmark'], benchmark['benchmark_patch'] if 'benchmark_patch' in benchmark else None) - for x in xrange(int(benchmark['count'])): - _log.info('Start the iteration %d of current benchmark' % (x + 1)) - self.httpServerDriver.serve(webRoot) - self.browserDriver.prepareEnv() - self.browserDriver.launchUrl(urlparse.urljoin(self.httpServerDriver.baseUrl(), benchmark['entry_point']), self.buildDir) - try: - with timeout(benchmark['timeout']): - result = json.loads(self.httpServerDriver.fetchResult()) - assert(result) - results.append(result) - except: - _log.error('No result. Something went wrong. Will skip current benchmark.') - self.browserDriver.closeBrowsers() - break - finally: - self.browserDriver.closeBrowsers() - _log.info('End of %d iteration of current benchmark' % (x + 1)) - results = self.wrap(results) - self.dump(results, self.outputFile if self.outputFile else benchmark['output_file']) - benchmarkBuilder.clean() + _log.info('Start a new benchmark') + results = [] + benchmarkBuilder = BenchmarkBuilderFactory.create([self.plan['benchmark_builder']]) + webRoot = benchmarkBuilder.prepare(self.plan['original_benchmark'], self.plan['benchmark_patch'] if 'benchmark_patch' in self.plan else None) + for x in xrange(int(self.plan['count'])): + _log.info('Start the iteration %d of current benchmark' % (x + 1)) + self.httpServerDriver.serve(webRoot) + self.browserDriver.prepareEnv() + self.browserDriver.launchUrl(urlparse.urljoin(self.httpServerDriver.baseUrl(), self.plan['entry_point']), self.buildDir) + try: + with timeout(self.plan['timeout']): + result = json.loads(self.httpServerDriver.fetchResult()) + assert(not self.httpServerDriver.getReturnCode()) + assert(result) + results.append(result) + except: + _log.error('No result or server crashes. Something went wrong. Will skip current benchmark.') + self.browserDriver.closeBrowsers() + self.httpServerDriver.killServer() + benchmarkBuilder.clean() + return 1 + finally: + self.browserDriver.closeBrowsers() + _log.info('End of %d iteration of current benchmark' % (x + 1)) + results = self.wrap(results) + self.dump(results, self.outputFile if self.outputFile else self.plan['output_file']) + benchmarkBuilder.clean() + return 0 @classmethod def dump(cls, results, outputFile): diff --git a/Tools/Scripts/webkitpy/benchmark_runner/browser_driver/osx_safari_driver.py b/Tools/Scripts/webkitpy/benchmark_runner/browser_driver/osx_safari_driver.py index fbf491d2dc49..a63652b6f539 100644 --- a/Tools/Scripts/webkitpy/benchmark_runner/browser_driver/osx_safari_driver.py +++ b/Tools/Scripts/webkitpy/benchmark_runner/browser_driver/osx_safari_driver.py @@ -8,6 +8,7 @@ import time # We assume that this handle can only be used when the platform is OSX. from AppKit import NSRunningApplication from browser_driver import BrowserDriver +from webkitpy.benchmark_runner.utils import forceRemove _log = logging.getLogger(__name__) @@ -17,6 +18,8 @@ class OSXSafariDriver(BrowserDriver): def prepareEnv(self): self.closeBrowsers() + forceRemove(os.path.join(os.path.expanduser('~'), 'Library/Saved Application State/com.apple.Safari.savedState')) + forceRemove(os.path.join(os.path.expanduser('~'), 'Library/Safari/LastSession.plist')) self.safariPreferences = ["-HomePage", "about:blank", "-WarnAboutFraudulentWebsites", "0", "-ExtensionsEnabled", "0", "-ShowStatusBar", "0", "-NewWindowBehavior", "1", "-NewTabBehavior", "1"] def launchUrl(self, url, browserBuildPath=None): diff --git a/Tools/Scripts/webkitpy/benchmark_runner/data/patches/JetStream.patch b/Tools/Scripts/webkitpy/benchmark_runner/data/patches/JetStream.patch index 7e0e57533add..04bdbe8c1773 100644 --- a/Tools/Scripts/webkitpy/benchmark_runner/data/patches/JetStream.patch +++ b/Tools/Scripts/webkitpy/benchmark_runner/data/patches/JetStream.patch @@ -10,7 +10,7 @@ index 73ee420..60f587c 100644 + function computeRefinedResults(){ + var results = {}; + for (var i = 0; i < benchmarks.length; ++i) { -+ results[benchmarks[i].name] = {"metrics" : {"Score" : {"current" : [benchmarks[i].times]}}}; ++ results[benchmarks[i].name] = {"metrics" : {"Score" : {"current" : [benchmarks[i].results]}}}; + } + return {"JetStream": {"metrics" : {"Score" : ["Geometric"]}, "tests" : results}}; + } diff --git a/Tools/Scripts/webkitpy/benchmark_runner/data/plans/jetstream.plan b/Tools/Scripts/webkitpy/benchmark_runner/data/plans/jetstream.plan index 9a898834d315..b6e7e17b74ab 100644 --- a/Tools/Scripts/webkitpy/benchmark_runner/data/plans/jetstream.plan +++ b/Tools/Scripts/webkitpy/benchmark_runner/data/plans/jetstream.plan @@ -1,14 +1,10 @@ { "http_server_driver": "SimpleHTTPServerDriver", - "benchmarks": [ - { - "timeout" : 600, - "count": 5, - "benchmark_builder": "JetStreamBenchmarkBuilder", - "original_benchmark": "../../../../PerformanceTests/JetStream", - "benchmark_patch": "data/patches/JetStream.patch", - "entry_point": "JetStream/JetStream-1.0.1/index.html", - "output_file": "jetstream.result" - } - ] + "timeout" : 600, + "count": 5, + "benchmark_builder": "JetStreamBenchmarkBuilder", + "original_benchmark": "../../../../PerformanceTests/JetStream", + "benchmark_patch": "data/patches/JetStream.patch", + "entry_point": "JetStream/JetStream-1.0.1/index.html", + "output_file": "jetstream.result" } diff --git a/Tools/Scripts/webkitpy/benchmark_runner/data/plans/speedometer.plan b/Tools/Scripts/webkitpy/benchmark_runner/data/plans/speedometer.plan index cff10a68680c..0214438fc695 100644 --- a/Tools/Scripts/webkitpy/benchmark_runner/data/plans/speedometer.plan +++ b/Tools/Scripts/webkitpy/benchmark_runner/data/plans/speedometer.plan @@ -1,14 +1,10 @@ { - "benchmarks": [ - { - "benchmark_builder": "GenericBenchmarkBuilder", - "benchmark_patch": "data/patches/Speedometer.patch", - "original_benchmark": "../../../../PerformanceTests/Speedometer", - "count": 5, - "entry_point": "Speedometer/Full.html", - "output_file": "speedometer.result", - "timeout": 600 - } - ], - "http_server_driver": "SimpleHTTPServerDriver" + "http_server_driver": "SimpleHTTPServerDriver", + "timeout": 300, + "count": 5, + "benchmark_builder": "GenericBenchmarkBuilder", + "original_benchmark": "../../../../PerformanceTests/Speedometer", + "benchmark_patch": "data/patches/Speedometer.patch", + "entry_point": "Speedometer/Full.html", + "output_file": "speedometer.result" } diff --git a/Tools/Scripts/webkitpy/benchmark_runner/http_server_driver/http_server/twisted_http_server.py b/Tools/Scripts/webkitpy/benchmark_runner/http_server_driver/http_server/twisted_http_server.py index 172fcf253812..953c04d8af6a 100644 --- a/Tools/Scripts/webkitpy/benchmark_runner/http_server_driver/http_server/twisted_http_server.py +++ b/Tools/Scripts/webkitpy/benchmark_runner/http_server_driver/http_server/twisted_http_server.py @@ -16,6 +16,7 @@ class ServerControl(Resource): def render_POST(self, request): sys.stdout.write(request.content.getvalue()) + sys.stdout.flush() return 'OK' diff --git a/Tools/Scripts/webkitpy/benchmark_runner/http_server_driver/http_server_driver.py b/Tools/Scripts/webkitpy/benchmark_runner/http_server_driver/http_server_driver.py index 02094b82f21f..97eb436119d5 100644 --- a/Tools/Scripts/webkitpy/benchmark_runner/http_server_driver/http_server_driver.py +++ b/Tools/Scripts/webkitpy/benchmark_runner/http_server_driver/http_server_driver.py @@ -11,3 +11,11 @@ class HTTPServerDriver(object): @abstractmethod def fetchResult(self): pass + + @abstractmethod + def killServer(self): + pass + + @abstractmethod + def getReturnCode(self): + pass diff --git a/Tools/Scripts/webkitpy/benchmark_runner/http_server_driver/simple_http_server_driver.py b/Tools/Scripts/webkitpy/benchmark_runner/http_server_driver/simple_http_server_driver.py index a4292a662308..b66b4f67b2bc 100644 --- a/Tools/Scripts/webkitpy/benchmark_runner/http_server_driver/simple_http_server_driver.py +++ b/Tools/Scripts/webkitpy/benchmark_runner/http_server_driver/simple_http_server_driver.py @@ -30,11 +30,11 @@ class SimpleHTTPServerDriver(HTTPServerDriver): _log.error('Cannot get the ip address of current machine') raise - def serve(self, webroot): + def serve(self, webRoot): oldWorkingDirectory = os.getcwd() os.chdir(os.path.dirname(os.path.abspath(__file__))) - _log.info('Lauchning an http server') - self.serverProcess = subprocess.Popen(['/usr/bin/python', 'http_server/twisted_http_server.py', webroot], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + _log.info('Launching an http server') + self.serverProcess = subprocess.Popen(['/usr/bin/python', 'http_server/twisted_http_server.py', webRoot], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) os.chdir(oldWorkingDirectory) maxAttempt = 5 interval = 0.5 @@ -77,3 +77,12 @@ class SimpleHTTPServerDriver(HTTPServerDriver): def fetchResult(self): return self.serverProcess.communicate()[0] + + def killServer(self): + try: + self.serverProcess.terminate() + except OSError: + _log.info('Invalid pid, server may exit properly') + + def getReturnCode(self): + return self.serverProcess.returncode diff --git a/Tools/Scripts/webkitpy/benchmark_runner/utils.py b/Tools/Scripts/webkitpy/benchmark_runner/utils.py index 6bc4b68e951e..d8d055c56d3e 100644 --- a/Tools/Scripts/webkitpy/benchmark_runner/utils.py +++ b/Tools/Scripts/webkitpy/benchmark_runner/utils.py @@ -4,6 +4,7 @@ import json import logging import os import signal +import shutil _log = logging.getLogger(__name__) @@ -37,6 +38,13 @@ def loadJSONFromFile(filePath): raise Exception("Invalid json format or empty json was found in %s" % (filePath)) +def forceRemove(path): + try: + shutil.rmtree(path) + except: + # Directory/file does not exist or privilege issue, just ignore it + pass + # Borrow this code from # 'http://stackoverflow.com/questions/2281850/timeout-function-if-it-takes-too-long-to-finish' class TimeoutError(Exception): -- 2.36.0