webkitpy: clean up options for specifying multiple platforms at once
[WebKit-https.git] / Tools / Scripts / webkitpy / tool / commands / queries.py
1 # Copyright (c) 2009 Google Inc. All rights reserved.
2 # Copyright (c) 2009 Apple Inc. All rights reserved.
3 # Copyright (c) 2012 Intel Corporation. All rights reserved.
4 #
5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions are
7 # met:
8 #
9 #     * Redistributions of source code must retain the above copyright
10 # notice, this list of conditions and the following disclaimer.
11 #     * Redistributions in binary form must reproduce the above
12 # copyright notice, this list of conditions and the following disclaimer
13 # in the documentation and/or other materials provided with the
14 # distribution.
15 #     * Neither the name of Google Inc. nor the names of its
16 # contributors may be used to endorse or promote products derived from
17 # this software without specific prior written permission.
18 #
19 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 import fnmatch
32 import re
33
34 from datetime import datetime
35 from optparse import make_option
36
37 from webkitpy.tool import steps
38
39 from webkitpy.common.checkout.commitinfo import CommitInfo
40 from webkitpy.common.config.committers import CommitterList
41 import webkitpy.common.config.urls as config_urls
42 from webkitpy.common.net.buildbot import BuildBot
43 from webkitpy.common.net.regressionwindow import RegressionWindow
44 from webkitpy.common.system.crashlogs import CrashLogs
45 from webkitpy.common.system.user import User
46 from webkitpy.tool.grammar import pluralize
47 from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
48 from webkitpy.common.system.deprecated_logging import log
49 from webkitpy.layout_tests.models.test_expectations import TestExpectations
50 from webkitpy.layout_tests.port import platform_options, configuration_options
51
52
53 class SuggestReviewers(AbstractDeclarativeCommand):
54     name = "suggest-reviewers"
55     help_text = "Suggest reviewers for a patch based on recent changes to the modified files."
56
57     def __init__(self):
58         options = [
59             steps.Options.git_commit,
60         ]
61         AbstractDeclarativeCommand.__init__(self, options=options)
62
63     def execute(self, options, args, tool):
64         reviewers = tool.checkout().suggested_reviewers(options.git_commit)
65         print "\n".join([reviewer.full_name for reviewer in reviewers])
66
67
68 class BugsToCommit(AbstractDeclarativeCommand):
69     name = "bugs-to-commit"
70     help_text = "List bugs in the commit-queue"
71
72     def execute(self, options, args, tool):
73         # FIXME: This command is poorly named.  It's fetching the commit-queue list here.  The name implies it's fetching pending-commit (all r+'d patches).
74         bug_ids = tool.bugs.queries.fetch_bug_ids_from_commit_queue()
75         for bug_id in bug_ids:
76             print "%s" % bug_id
77
78
79 class PatchesInCommitQueue(AbstractDeclarativeCommand):
80     name = "patches-in-commit-queue"
81     help_text = "List patches in the commit-queue"
82
83     def execute(self, options, args, tool):
84         patches = tool.bugs.queries.fetch_patches_from_commit_queue()
85         log("Patches in commit queue:")
86         for patch in patches:
87             print patch.url()
88
89
90 class PatchesToCommitQueue(AbstractDeclarativeCommand):
91     name = "patches-to-commit-queue"
92     help_text = "Patches which should be added to the commit queue"
93     def __init__(self):
94         options = [
95             make_option("--bugs", action="store_true", dest="bugs", help="Output bug links instead of patch links"),
96         ]
97         AbstractDeclarativeCommand.__init__(self, options=options)
98
99     @staticmethod
100     def _needs_commit_queue(patch):
101         if patch.commit_queue() == "+": # If it's already cq+, ignore the patch.
102             log("%s already has cq=%s" % (patch.id(), patch.commit_queue()))
103             return False
104
105         # We only need to worry about patches from contributers who are not yet committers.
106         committer_record = CommitterList().committer_by_email(patch.attacher_email())
107         if committer_record:
108             log("%s committer = %s" % (patch.id(), committer_record))
109         return not committer_record
110
111     def execute(self, options, args, tool):
112         patches = tool.bugs.queries.fetch_patches_from_pending_commit_list()
113         patches_needing_cq = filter(self._needs_commit_queue, patches)
114         if options.bugs:
115             bugs_needing_cq = map(lambda patch: patch.bug_id(), patches_needing_cq)
116             bugs_needing_cq = sorted(set(bugs_needing_cq))
117             for bug_id in bugs_needing_cq:
118                 print "%s" % tool.bugs.bug_url_for_bug_id(bug_id)
119         else:
120             for patch in patches_needing_cq:
121                 print "%s" % tool.bugs.attachment_url_for_id(patch.id(), action="edit")
122
123
124 class PatchesToReview(AbstractDeclarativeCommand):
125     name = "patches-to-review"
126     help_text = "List bugs which have attachments pending review"
127
128     def __init__(self):
129         options = [
130             make_option("--all", action="store_true",
131                         help="Show all bugs regardless of who is on CC (it might take a while)"),
132             make_option("--include-cq-denied", action="store_true",
133                         help="By default, r? patches with cq- are omitted unless this option is set"),
134             make_option("--cc-email",
135                         help="Specifies the email on the CC field (defaults to your bugzilla login email)"),
136         ]
137         AbstractDeclarativeCommand.__init__(self, options=options)
138
139     def _print_report(self, report, cc_email, print_all):
140         if print_all:
141             print "Bugs with attachments pending review:"
142         else:
143             print "Bugs with attachments pending review that has %s in the CC list:" % cc_email
144
145         print "http://webkit.org/b/bugid   Description (age in days)"
146         for row in report:
147             print "%s (%d)" % (row[1], row[0])
148
149         print "Total: %d" % len(report)
150
151     def _generate_report(self, bugs, include_cq_denied):
152         report = []
153
154         for bug in bugs:
155             patch = bug.unreviewed_patches()[-1]
156
157             if not include_cq_denied and patch.commit_queue() == "-":
158                 continue
159
160             age_in_days = (datetime.today() - patch.attach_date()).days
161             report.append((age_in_days, "http://webkit.org/b/%-7s %s" % (bug.id(), bug.title())))
162
163         report.sort()
164         return report
165
166     def execute(self, options, args, tool):
167         tool.bugs.authenticate()
168
169         cc_email = options.cc_email
170         if not cc_email and not options.all:
171             cc_email = tool.bugs.username
172
173         bugs = tool.bugs.queries.fetch_bugs_from_review_queue(cc_email=cc_email)
174         report = self._generate_report(bugs, options.include_cq_denied)
175         self._print_report(report, cc_email, options.all)
176
177 class WhatBroke(AbstractDeclarativeCommand):
178     name = "what-broke"
179     help_text = "Print failing buildbots (%s) and what revisions broke them" % config_urls.buildbot_url
180
181     def _print_builder_line(self, builder_name, max_name_width, status_message):
182         print "%s : %s" % (builder_name.ljust(max_name_width), status_message)
183
184     def _print_blame_information_for_builder(self, builder_status, name_width, avoid_flakey_tests=True):
185         builder = self._tool.buildbot.builder_with_name(builder_status["name"])
186         red_build = builder.build(builder_status["build_number"])
187         regression_window = builder.find_regression_window(red_build)
188         if not regression_window.failing_build():
189             self._print_builder_line(builder.name(), name_width, "FAIL (error loading build information)")
190             return
191         if not regression_window.build_before_failure():
192             self._print_builder_line(builder.name(), name_width, "FAIL (blame-list: sometime before %s?)" % regression_window.failing_build().revision())
193             return
194
195         revisions = regression_window.revisions()
196         first_failure_message = ""
197         if (regression_window.failing_build() == builder.build(builder_status["build_number"])):
198             first_failure_message = " FIRST FAILURE, possibly a flaky test"
199         self._print_builder_line(builder.name(), name_width, "FAIL (blame-list: %s%s)" % (revisions, first_failure_message))
200         for revision in revisions:
201             commit_info = self._tool.checkout().commit_info_for_revision(revision)
202             if commit_info:
203                 print commit_info.blame_string(self._tool.bugs)
204             else:
205                 print "FAILED to fetch CommitInfo for r%s, likely missing ChangeLog" % revision
206
207     def execute(self, options, args, tool):
208         builder_statuses = tool.buildbot.builder_statuses()
209         longest_builder_name = max(map(len, map(lambda builder: builder["name"], builder_statuses)))
210         failing_builders = 0
211         for builder_status in builder_statuses:
212             # If the builder is green, print OK, exit.
213             if builder_status["is_green"]:
214                 continue
215             self._print_blame_information_for_builder(builder_status, name_width=longest_builder_name)
216             failing_builders += 1
217         if failing_builders:
218             print "%s of %s are failing" % (failing_builders, pluralize("builder", len(builder_statuses)))
219         else:
220             print "All builders are passing!"
221
222
223 class ResultsFor(AbstractDeclarativeCommand):
224     name = "results-for"
225     help_text = "Print a list of failures for the passed revision from bots on %s" % config_urls.buildbot_url
226     argument_names = "REVISION"
227
228     def _print_layout_test_results(self, results):
229         if not results:
230             print " No results."
231             return
232         for title, files in results.parsed_results().items():
233             print " %s" % title
234             for filename in files:
235                 print "  %s" % filename
236
237     def execute(self, options, args, tool):
238         builders = self._tool.buildbot.builders()
239         for builder in builders:
240             print "%s:" % builder.name()
241             build = builder.build_for_revision(args[0], allow_failed_lookups=True)
242             self._print_layout_test_results(build.layout_test_results())
243
244
245 class FailureReason(AbstractDeclarativeCommand):
246     name = "failure-reason"
247     help_text = "Lists revisions where individual test failures started at %s" % config_urls.buildbot_url
248
249     def _blame_line_for_revision(self, revision):
250         try:
251             commit_info = self._tool.checkout().commit_info_for_revision(revision)
252         except Exception, e:
253             return "FAILED to fetch CommitInfo for r%s, exception: %s" % (revision, e)
254         if not commit_info:
255             return "FAILED to fetch CommitInfo for r%s, likely missing ChangeLog" % revision
256         return commit_info.blame_string(self._tool.bugs)
257
258     def _print_blame_information_for_transition(self, regression_window, failing_tests):
259         red_build = regression_window.failing_build()
260         print "SUCCESS: Build %s (r%s) was the first to show failures: %s" % (red_build._number, red_build.revision(), failing_tests)
261         print "Suspect revisions:"
262         for revision in regression_window.revisions():
263             print self._blame_line_for_revision(revision)
264
265     def _explain_failures_for_builder(self, builder, start_revision):
266         print "Examining failures for \"%s\", starting at r%s" % (builder.name(), start_revision)
267         revision_to_test = start_revision
268         build = builder.build_for_revision(revision_to_test, allow_failed_lookups=True)
269         layout_test_results = build.layout_test_results()
270         if not layout_test_results:
271             # FIXME: This could be made more user friendly.
272             print "Failed to load layout test results from %s; can't continue. (start revision = r%s)" % (build.results_url(), start_revision)
273             return 1
274
275         results_to_explain = set(layout_test_results.failing_tests())
276         last_build_with_results = build
277         print "Starting at %s" % revision_to_test
278         while results_to_explain:
279             revision_to_test -= 1
280             new_build = builder.build_for_revision(revision_to_test, allow_failed_lookups=True)
281             if not new_build:
282                 print "No build for %s" % revision_to_test
283                 continue
284             build = new_build
285             latest_results = build.layout_test_results()
286             if not latest_results:
287                 print "No results build %s (r%s)" % (build._number, build.revision())
288                 continue
289             failures = set(latest_results.failing_tests())
290             if len(failures) >= 20:
291                 # FIXME: We may need to move this logic into the LayoutTestResults class.
292                 # The buildbot stops runs after 20 failures so we don't have full results to work with here.
293                 print "Too many failures in build %s (r%s), ignoring." % (build._number, build.revision())
294                 continue
295             fixed_results = results_to_explain - failures
296             if not fixed_results:
297                 print "No change in build %s (r%s), %s unexplained failures (%s in this build)" % (build._number, build.revision(), len(results_to_explain), len(failures))
298                 last_build_with_results = build
299                 continue
300             regression_window = RegressionWindow(build, last_build_with_results)
301             self._print_blame_information_for_transition(regression_window, fixed_results)
302             last_build_with_results = build
303             results_to_explain -= fixed_results
304         if results_to_explain:
305             print "Failed to explain failures: %s" % results_to_explain
306             return 1
307         print "Explained all results for %s" % builder.name()
308         return 0
309
310     def _builder_to_explain(self):
311         builder_statuses = self._tool.buildbot.builder_statuses()
312         red_statuses = [status for status in builder_statuses if not status["is_green"]]
313         print "%s failing" % (pluralize("builder", len(red_statuses)))
314         builder_choices = [status["name"] for status in red_statuses]
315         # We could offer an "All" choice here.
316         chosen_name = self._tool.user.prompt_with_list("Which builder to diagnose:", builder_choices)
317         # FIXME: prompt_with_list should really take a set of objects and a set of names and then return the object.
318         for status in red_statuses:
319             if status["name"] == chosen_name:
320                 return (self._tool.buildbot.builder_with_name(chosen_name), status["built_revision"])
321
322     def execute(self, options, args, tool):
323         (builder, latest_revision) = self._builder_to_explain()
324         start_revision = self._tool.user.prompt("Revision to walk backwards from? [%s] " % latest_revision) or latest_revision
325         if not start_revision:
326             print "Revision required."
327             return 1
328         return self._explain_failures_for_builder(builder, start_revision=int(start_revision))
329
330
331 class FindFlakyTests(AbstractDeclarativeCommand):
332     name = "find-flaky-tests"
333     help_text = "Lists tests that often fail for a single build at %s" % config_urls.buildbot_url
334
335     def _find_failures(self, builder, revision):
336         build = builder.build_for_revision(revision, allow_failed_lookups=True)
337         if not build:
338             print "No build for %s" % revision
339             return (None, None)
340         results = build.layout_test_results()
341         if not results:
342             print "No results build %s (r%s)" % (build._number, build.revision())
343             return (None, None)
344         failures = set(results.failing_tests())
345         if len(failures) >= 20:
346             # FIXME: We may need to move this logic into the LayoutTestResults class.
347             # The buildbot stops runs after 20 failures so we don't have full results to work with here.
348             print "Too many failures in build %s (r%s), ignoring." % (build._number, build.revision())
349             return (None, None)
350         return (build, failures)
351
352     def _increment_statistics(self, flaky_tests, flaky_test_statistics):
353         for test in flaky_tests:
354             count = flaky_test_statistics.get(test, 0)
355             flaky_test_statistics[test] = count + 1
356
357     def _print_statistics(self, statistics):
358         print "=== Results ==="
359         print "Occurances Test name"
360         for value, key in sorted([(value, key) for key, value in statistics.items()]):
361             print "%10d %s" % (value, key)
362
363     def _walk_backwards_from(self, builder, start_revision, limit):
364         flaky_test_statistics = {}
365         all_previous_failures = set([])
366         one_time_previous_failures = set([])
367         previous_build = None
368         for i in range(limit):
369             revision = start_revision - i
370             print "Analyzing %s ... " % revision,
371             (build, failures) = self._find_failures(builder, revision)
372             if failures == None:
373                 # Notice that we don't loop on the empty set!
374                 continue
375             print "has %s failures" % len(failures)
376             flaky_tests = one_time_previous_failures - failures
377             if flaky_tests:
378                 print "Flaky tests: %s %s" % (sorted(flaky_tests),
379                                               previous_build.results_url())
380             self._increment_statistics(flaky_tests, flaky_test_statistics)
381             one_time_previous_failures = failures - all_previous_failures
382             all_previous_failures = failures
383             previous_build = build
384         self._print_statistics(flaky_test_statistics)
385
386     def _builder_to_analyze(self):
387         statuses = self._tool.buildbot.builder_statuses()
388         choices = [status["name"] for status in statuses]
389         chosen_name = self._tool.user.prompt_with_list("Which builder to analyze:", choices)
390         for status in statuses:
391             if status["name"] == chosen_name:
392                 return (self._tool.buildbot.builder_with_name(chosen_name), status["built_revision"])
393
394     def execute(self, options, args, tool):
395         (builder, latest_revision) = self._builder_to_analyze()
396         limit = self._tool.user.prompt("How many revisions to look through? [10000] ") or 10000
397         return self._walk_backwards_from(builder, latest_revision, limit=int(limit))
398
399
400 class TreeStatus(AbstractDeclarativeCommand):
401     name = "tree-status"
402     help_text = "Print the status of the %s buildbots" % config_urls.buildbot_url
403     long_help = """Fetches build status from http://build.webkit.org/one_box_per_builder
404 and displayes the status of each builder."""
405
406     def execute(self, options, args, tool):
407         for builder in tool.buildbot.builder_statuses():
408             status_string = "ok" if builder["is_green"] else "FAIL"
409             print "%s : %s" % (status_string.ljust(4), builder["name"])
410
411
412 class CrashLog(AbstractDeclarativeCommand):
413     name = "crash-log"
414     help_text = "Print the newest crash log for the given process"
415     long_help = """Finds the newest crash log matching the given process name
416 and PID and prints it to stdout."""
417     argument_names = "PROCESS_NAME [PID]"
418
419     def execute(self, options, args, tool):
420         crash_logs = CrashLogs(tool)
421         pid = None
422         if len(args) > 1:
423             pid = int(args[1])
424         print crash_logs.find_newest_log(args[0], pid)
425
426
427 class PrintExpectations(AbstractDeclarativeCommand):
428     name = 'print-expectations'
429     help_text = 'Print the expected result for the given test(s) on the given port(s)'
430
431     def __init__(self):
432         options = [
433             make_option('--all', action='store_true', default=False,
434                         help='display the expectations for *all* tests'),
435             make_option('-x', '--exclude-keyword', action='append', default=[],
436                         help='limit to tests not matching the given keyword (for example, "skip", "slow", or "crash". May specify multiple times'),
437             make_option('-i', '--include-keyword', action='append', default=[],
438                         help='limit to tests with the given keyword (for example, "skip", "slow", or "crash". May specify multiple times'),
439             make_option('--csv', action='store_true', default=False,
440                         help='Print a CSV-style report that includes the port name, modifiers, tests, and expectations'),
441             make_option('-f', '--full', action='store_true', default=False,
442                         help='Print a full TestExpectations-style line for every match'),
443         ] + platform_options(use_globs=True)
444
445         AbstractDeclarativeCommand.__init__(self, options=options)
446         self._expectation_models = {}
447
448     def execute(self, options, args, tool):
449         if not args and not options.all:
450             print "You must either specify one or more test paths or --all."
451             return
452
453         if options.platform:
454             port_names = fnmatch.filter(tool.port_factory.all_port_names(), options.platform)
455             if not port_names:
456                 default_port = tool.port_factory.get(options.platform)
457                 if default_port:
458                     port_names = [default_port.name()]
459                 else:
460                     print "No port names match '%s'" % options.platform
461                     return
462             else:
463                 default_port = tool.port_factory.get(port_names[0])
464         else:
465             default_port = tool.port_factory.get(options=options)
466             port_names = [default_port.name()]
467
468         tests = default_port.tests(args)
469         for port_name in port_names:
470             model = self._model(options, port_name, tests)
471             tests_to_print = self._filter_tests(options, model, tests)
472             lines = [model.get_expectation_line(test) for test in sorted(tests_to_print)]
473             if port_name != port_names[0]:
474                 print
475             print '\n'.join(self._format_lines(options, port_name, lines))
476
477     def _filter_tests(self, options, model, tests):
478         filtered_tests = set()
479         if options.include_keyword:
480             for keyword in options.include_keyword:
481                 filtered_tests.update(model.get_test_set_for_keyword(keyword))
482         else:
483             filtered_tests = tests
484
485         for keyword in options.exclude_keyword:
486             filtered_tests.difference_update(model.get_test_set_for_keyword(keyword))
487         return filtered_tests
488
489     def _format_lines(self, options, port_name, lines):
490         output = []
491         if options.csv:
492             for line in lines:
493                 output.append("%s,%s" % (port_name, line.to_csv()))
494         elif lines:
495             include_modifiers = options.full
496             include_expectations = options.full or len(options.include_keyword) != 1 or len(options.exclude_keyword)
497             output.append("// For %s" % port_name)
498             for line in lines:
499                 output.append("%s" % line.to_string(None, include_modifiers, include_expectations, include_comment=False))
500         return output
501
502     def _model(self, options, port_name, tests):
503         port = self._tool.port_factory.get(port_name, options)
504         expectations_path = port.path_to_test_expectations_file()
505         if not expectations_path in self._expectation_models:
506             self._expectation_models[expectations_path] = TestExpectations(port, tests).model()
507         return self._expectation_models[expectations_path]
508
509
510 class PrintBaselines(AbstractDeclarativeCommand):
511     name = 'print-baselines'
512     help_text = 'Prints the baseline locations for given test(s) on the given port(s)'
513
514     def __init__(self):
515         options = [
516             make_option('--all', action='store_true', default=False,
517                         help='display the baselines for *all* tests'),
518             make_option('--csv', action='store_true', default=False,
519                         help='Print a CSV-style report that includes the port name, test_name, test platform, baseline type, baseline location, and baseline platform'),
520             make_option('--include-virtual-tests', action='store_true',
521                         help='Include virtual tests'),
522         ] + platform_options(use_globs=True)
523         AbstractDeclarativeCommand.__init__(self, options=options)
524         self._platform_regexp = re.compile('platform/([^\/]+)/(.+)')
525
526     def execute(self, options, args, tool):
527         if not args and not options.all:
528             print "You must either specify one or more test paths or --all."
529             return
530
531         default_port = tool.port_factory.get()
532         if options.platform:
533             port_names = fnmatch.filter(tool.port_factory.all_port_names(), options.platform)
534             if not port_names:
535                 print "No port names match '%s'" % options.platform
536         else:
537             port_names = [default_port.name()]
538
539         if options.include_virtual_tests:
540             tests = sorted(default_port.tests(args))
541         else:
542             # FIXME: make real_tests() a public method.
543             tests = sorted(default_port._real_tests(args))
544
545         for port_name in port_names:
546             if port_name != port_names[0]:
547                 print
548             if not options.csv:
549                 print "// For %s" % port_name
550             port = tool.port_factory.get(port_name)
551             for test_name in tests:
552                 self._print_baselines(options, port_name, test_name, port.expected_baselines_by_extension(test_name))
553
554     def _print_baselines(self, options, port_name, test_name, baselines):
555         for extension in sorted(baselines.keys()):
556             baseline_location = baselines[extension]
557             if baseline_location:
558                 if options.csv:
559                     print "%s,%s,%s,%s,%s,%s" % (port_name, test_name, self._platform_for_path(test_name),
560                                                  extension[1:], baseline_location, self._platform_for_path(baseline_location))
561                 else:
562                     print baseline_location
563
564     def _platform_for_path(self, relpath):
565         platform_matchobj = self._platform_regexp.match(relpath)
566         if platform_matchobj:
567             return platform_matchobj.group(1)
568         return None