run-webkit-tests: Upload test results (new results database)
authorjbedard@apple.com <jbedard@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Thu, 4 Apr 2019 00:43:05 +0000 (00:43 +0000)
committerjbedard@apple.com <jbedard@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Thu, 4 Apr 2019 00:43:05 +0000 (00:43 +0000)
https://bugs.webkit.org/show_bug.cgi?id=196577
<rdar://problem/34841155>

Reviewed by Lucas Forschler.

* Scripts/webkitpy/layout_tests/controllers/manager.py:
(Manager.run): Upload results after each device shard, since the configuration will be
different for each device.
(Manager._results_to_upload_json_trie): Convert a TestRunResults object to a trie to be
uploaded to a results database.
* Scripts/webkitpy/layout_tests/run_webkit_tests.py:
(parse_args): Add new 'Upload Options' and prepare 'Results JSON Options' for deprecation.

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@243842 268f45cc-cd09-0410-ab3c-d52691b4dbfc

Tools/ChangeLog
Tools/Scripts/webkitpy/layout_tests/controllers/manager.py
Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py

index 8c63542..411a97a 100644 (file)
@@ -1,3 +1,19 @@
+2019-04-03  Jonathan Bedard  <jbedard@apple.com>
+
+        run-webkit-tests: Upload test results (new results database)
+        https://bugs.webkit.org/show_bug.cgi?id=196577
+        <rdar://problem/34841155>
+
+        Reviewed by Lucas Forschler.
+
+        * Scripts/webkitpy/layout_tests/controllers/manager.py:
+        (Manager.run): Upload results after each device shard, since the configuration will be
+        different for each device.
+        (Manager._results_to_upload_json_trie): Convert a TestRunResults object to a trie to be
+        uploaded to a results database.
+        * Scripts/webkitpy/layout_tests/run_webkit_tests.py:
+        (parse_args): Add new 'Upload Options' and prepare 'Results JSON Options' for deprecation.
+
 2019-04-03  Myles C. Maxfield  <mmaxfield@apple.com>
 
         -apple-trailing-word is needed for browser detection
index 0407290..57dcee6 100644 (file)
@@ -55,6 +55,7 @@ from webkitpy.layout_tests.models import test_run_results
 from webkitpy.layout_tests.models.test_input import TestInput
 from webkitpy.layout_tests.models.test_run_results import INTERRUPTED_EXIT_STATUS
 from webkitpy.tool.grammar import pluralize
+from webkitpy.results.upload import Upload
 from webkitpy.xcode.device_type import DeviceType
 
 _log = logging.getLogger(__name__)
@@ -174,6 +175,7 @@ class Manager(object):
         return True
 
     def run(self, args):
+        num_failed_uploads = 0
         total_tests = set()
         aggregate_test_names = set()
         aggregate_tests = set()
@@ -252,12 +254,38 @@ class Manager(object):
 
             _log.info('Running {}{}'.format(pluralize(len(tests_to_run_by_device[device_type]), 'test'), ' for {}'.format(str(device_type)) if device_type else ''))
             _log.info('')
+            start_time_for_device = time.time()
             if not tests_to_run_by_device[device_type]:
                 continue
             if not self._set_up_run(tests_to_run_by_device[device_type], device_type=device_type):
                 return test_run_results.RunDetails(exit_code=-1)
 
+            configuration = self._port.configuration_for_upload(self._port.target_host(0))
+            configuration['flavor'] = 'wk2' if self._options.webkit_test_runner else 'wk1'
             temp_initial_results, temp_retry_results, temp_enabled_pixel_tests_in_retry = self._run_test_subset(tests_to_run_by_device[device_type], tests_to_skip, device_type=device_type)
+
+            if self._options.report_urls:
+                self._printer.writeln('\n')
+                self._printer.write_update('Preparing upload data ...')
+
+                upload = Upload(
+                    suite='layout-tests',
+                    configuration=configuration,
+                    details=Upload.create_details(options=self._options),
+                    commits=self._port.commits_for_upload(),
+                    run_stats=Upload.create_run_stats(
+                        start_time=start_time_for_device,
+                        end_time=time.time(),
+                        tests_skipped=temp_initial_results.remaining + temp_initial_results.expected_skips,
+                    ),
+                    results=self._results_to_upload_json_trie(self._expectations[device_type], temp_initial_results),
+                )
+                for url in self._options.report_urls:
+                    self._printer.write_update('Uploading to {} ...'.format(url))
+                    if not upload.upload(url, log_line_func=self._printer.writeln):
+                        num_failed_uploads += 1
+                self._printer.writeln('Uploads completed!')
+
             initial_results = initial_results.merge(temp_initial_results) if initial_results else temp_initial_results
             retry_results = retry_results.merge(temp_retry_results) if retry_results else temp_retry_results
             enabled_pixel_tests_in_retry |= temp_enabled_pixel_tests_in_retry
@@ -268,7 +296,10 @@ class Manager(object):
         self._runner.stop_servers()
 
         end_time = time.time()
-        return self._end_test_run(start_time, end_time, initial_results, retry_results, enabled_pixel_tests_in_retry)
+        result = self._end_test_run(start_time, end_time, initial_results, retry_results, enabled_pixel_tests_in_retry)
+        if num_failed_uploads:
+            result.exit_code = -1
+        return result
 
     def _run_test_subset(self, tests_to_run, tests_to_skip, device_type=None):
         try:
@@ -415,6 +446,42 @@ class Manager(object):
         perf_metrics_path = self._filesystem.join(self._results_directory, "layout_test_perf_metrics.json")
         self._filesystem.write_text_file(perf_metrics_path, json.dumps(perf_metrics_json))
 
+    def _results_to_upload_json_trie(self, expectations, results):
+        FAILURE_TO_TEXT = {
+            test_expectations.PASS: Upload.Expectations.PASS,
+            test_expectations.CRASH: Upload.Expectations.CRASH,
+            test_expectations.TIMEOUT: Upload.Expectations.TIMEOUT,
+            test_expectations.IMAGE: Upload.Expectations.IMAGE,
+            test_expectations.TEXT: Upload.Expectations.TEXT,
+            test_expectations.AUDIO: Upload.Expectations.AUDIO,
+            test_expectations.MISSING: Upload.Expectations.WARNING,
+            test_expectations.IMAGE_PLUS_TEXT: ' '.join([Upload.Expectations.IMAGE, Upload.Expectations.TEXT]),
+        }
+
+        results_trie = {}
+        for result in results.results_by_name.itervalues():
+            if result.type == test_expectations.SKIP:
+                continue
+
+            expected = expectations.filtered_expectations_for_test(
+                result.test_name,
+                self._options.pixel_tests or bool(result.reftest_type),
+                self._options.world_leaks,
+            )
+            if expected == {test_expectations.PASS}:
+                expected = None
+            else:
+                expected = ' '.join([FAILURE_TO_TEXT.get(e, Upload.Expectations.FAIL) for e in expected])
+
+            json_results_generator.add_path_to_trie(
+                result.test_name,
+                Upload.create_test_result(
+                    expected=expected,
+                    actual=FAILURE_TO_TEXT.get(result.type, Upload.Expectations.FAIL) if result.type else None,
+                    time=int(result.test_run_time * 1000),
+                ), results_trie)
+        return results_trie
+
     def _upload_json_files(self, summarized_results, initial_results, results_including_passes=None, start_time=None, end_time=None):
         """Writes the results of the test run as JSON files into the results
         dir and upload the files to the appengine server.
index 4bf7f44..3d05a85 100755 (executable)
@@ -41,6 +41,7 @@ from webkitpy.layout_tests.models.test_run_results import INTERRUPTED_EXIT_STATU
 from webkitpy.port import configuration_options, platform_options
 from webkitpy.layout_tests.views import buildbot_results
 from webkitpy.layout_tests.views import printing
+from webkitpy.results.options import upload_options
 
 
 _log = logging.getLogger(__name__)
@@ -317,17 +318,13 @@ def parse_args(args):
         optparse.make_option("--wptserver-doc-root", type="string", help=("Set web platform server document root, relative to LayoutTests directory")),
     ]))
 
-    # FIXME: Move these into json_results_generator.py
-    option_group_definitions.append(("Result JSON Options", [
+    # FIXME: Remove this group once the old results dashboards are deprecated.
+    option_group_definitions.append(("Legacy Result Options", [
         optparse.make_option("--master-name", help="The name of the buildbot master."),
-        optparse.make_option("--builder-name", default="",
-            help=("The name of the builder shown on the waterfall running this script. e.g. Apple MountainLion Release WK2 (Tests).")),
         optparse.make_option("--build-name", default="DUMMY_BUILD_NAME",
             help=("The name of the builder used in its path, e.g. webkit-rel.")),
         optparse.make_option("--build-slave", default="DUMMY_BUILD_SLAVE",
             help=("The name of the buildslave used. e.g. apple-macpro-6.")),
-        optparse.make_option("--build-number", default="DUMMY_BUILD_NUMBER",
-            help=("The build number of the builder running this script.")),
         optparse.make_option("--test-results-server", action="append", default=[],
             help=("If specified, upload results json files to this appengine server.")),
         optparse.make_option("--results-server-host", action="append", default=[],
@@ -340,6 +337,8 @@ def parse_args(args):
             help=("If specified, tests are allowed to make requests to the specified hostname."))
     ]))
 
+    option_group_definitions.append(('Upload Options', upload_options()))
+
     option_parser = optparse.OptionParser(usage="%prog [options] [<path>...]")
 
     for group_name, group_options in option_group_definitions: