Remove virtual test support from webkitpy
[WebKit-https.git] / Tools / Scripts / webkitpy / tool / commands / queries.py
1 # Copyright (c) 2009 Google Inc. All rights reserved.
2 # Copyright (c) 2009 Apple Inc. All rights reserved.
3 # Copyright (c) 2012 Intel Corporation. All rights reserved.
4 # Copyright (c) 2013 University of Szeged. All rights reserved.
5 #
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions are
8 # met:
9 #
10 #     * Redistributions of source code must retain the above copyright
11 # notice, this list of conditions and the following disclaimer.
12 #     * Redistributions in binary form must reproduce the above
13 # copyright notice, this list of conditions and the following disclaimer
14 # in the documentation and/or other materials provided with the
15 # distribution.
16 #     * Neither the name of Google Inc. nor the names of its
17 # contributors may be used to endorse or promote products derived from
18 # this software without specific prior written permission.
19 #
20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31
32 import fnmatch
33 import logging
34 import re
35
36 from datetime import datetime
37 from optparse import make_option
38
39 from webkitpy.tool import steps
40
41 from webkitpy.common.checkout.commitinfo import CommitInfo
42 from webkitpy.common.config.committers import CommitterList
43 import webkitpy.common.config.urls as config_urls
44 from webkitpy.common.net.buildbot import BuildBot
45 from webkitpy.common.net.bugzilla import Bugzilla
46 from webkitpy.common.net.regressionwindow import RegressionWindow
47 from webkitpy.common.system.crashlogs import CrashLogs
48 from webkitpy.common.system.user import User
49 from webkitpy.tool.commands.abstractsequencedcommand import AbstractSequencedCommand
50 from webkitpy.tool.grammar import pluralize
51 from webkitpy.tool.multicommandtool import Command
52 from webkitpy.layout_tests.models.test_expectations import TestExpectations
53 from webkitpy.port import platform_options, configuration_options
54
55 _log = logging.getLogger(__name__)
56
57
58 class SuggestReviewers(AbstractSequencedCommand):
59     name = "suggest-reviewers"
60     help_text = "Suggest reviewers for a patch based on recent changes to the modified files."
61     steps = [
62         steps.SuggestReviewers,
63     ]
64
65     def _prepare_state(self, options, args, tool):
66         options.suggest_reviewers = True
67
68
69 class BugsToCommit(Command):
70     name = "bugs-to-commit"
71     help_text = "List bugs in the commit-queue"
72
73     def execute(self, options, args, tool):
74         # FIXME: This command is poorly named.  It's fetching the commit-queue list here.  The name implies it's fetching pending-commit (all r+'d patches).
75         bug_ids = tool.bugs.queries.fetch_bug_ids_from_commit_queue()
76         for bug_id in bug_ids:
77             print "%s" % bug_id
78
79
80 class PatchesInCommitQueue(Command):
81     name = "patches-in-commit-queue"
82     help_text = "List patches in the commit-queue"
83
84     def execute(self, options, args, tool):
85         patches = tool.bugs.queries.fetch_patches_from_commit_queue()
86         _log.info("Patches in commit queue:")
87         for patch in patches:
88             print patch.url()
89
90
91 class PatchesToCommitQueue(Command):
92     name = "patches-to-commit-queue"
93     help_text = "Patches which should be added to the commit queue"
94     def __init__(self):
95         options = [
96             make_option("--bugs", action="store_true", dest="bugs", help="Output bug links instead of patch links"),
97         ]
98         Command.__init__(self, options=options)
99
100     @staticmethod
101     def _needs_commit_queue(patch):
102         if patch.commit_queue() == "+": # If it's already cq+, ignore the patch.
103             _log.info("%s already has cq=%s" % (patch.id(), patch.commit_queue()))
104             return False
105
106         # We only need to worry about patches from contributers who are not yet committers.
107         committer_record = CommitterList().committer_by_email(patch.attacher_email())
108         if committer_record:
109             _log.info("%s committer = %s" % (patch.id(), committer_record))
110         return not committer_record
111
112     def execute(self, options, args, tool):
113         patches = tool.bugs.queries.fetch_patches_from_pending_commit_list()
114         patches_needing_cq = filter(self._needs_commit_queue, patches)
115         if options.bugs:
116             bugs_needing_cq = map(lambda patch: patch.bug_id(), patches_needing_cq)
117             bugs_needing_cq = sorted(set(bugs_needing_cq))
118             for bug_id in bugs_needing_cq:
119                 print "%s" % tool.bugs.bug_url_for_bug_id(bug_id)
120         else:
121             for patch in patches_needing_cq:
122                 print "%s" % tool.bugs.attachment_url_for_id(patch.id(), action="edit")
123
124
125 class PatchesToReview(Command):
126     name = "patches-to-review"
127     help_text = "List bugs which have attachments pending review"
128
129     def __init__(self):
130         options = [
131             make_option("--all", action="store_true",
132                         help="Show all bugs regardless of who is on CC (it might take a while)"),
133             make_option("--include-cq-denied", action="store_true",
134                         help="By default, r? patches with cq- are omitted unless this option is set"),
135             make_option("--cc-email",
136                         help="Specifies the email on the CC field (defaults to your bugzilla login email)"),
137         ]
138         Command.__init__(self, options=options)
139
140     def _print_report(self, report, cc_email, print_all):
141         if print_all:
142             print "Bugs with attachments pending review:"
143         else:
144             print "Bugs with attachments pending review that has %s in the CC list:" % cc_email
145
146         print "http://webkit.org/b/bugid   Description (age in days)"
147         for row in report:
148             print "%s (%d)" % (row[1], row[0])
149
150         print "Total: %d" % len(report)
151
152     def _generate_report(self, bugs, include_cq_denied):
153         report = []
154
155         for bug in bugs:
156             patch = bug.unreviewed_patches()[-1]
157
158             if not include_cq_denied and patch.commit_queue() == "-":
159                 continue
160
161             age_in_days = (datetime.today() - patch.attach_date()).days
162             report.append((age_in_days, "http://webkit.org/b/%-7s %s" % (bug.id(), bug.title())))
163
164         report.sort()
165         return report
166
167     def execute(self, options, args, tool):
168         tool.bugs.authenticate()
169
170         cc_email = options.cc_email
171         if not cc_email and not options.all:
172             cc_email = tool.bugs.username
173
174         bugs = tool.bugs.queries.fetch_bugs_from_review_queue(cc_email=cc_email)
175         report = self._generate_report(bugs, options.include_cq_denied)
176         self._print_report(report, cc_email, options.all)
177
178
179 class WhatBroke(Command):
180     name = "what-broke"
181     help_text = "Print failing buildbots (%s) and what revisions broke them" % config_urls.buildbot_url
182
183     def _print_builder_line(self, builder_name, max_name_width, status_message):
184         print "%s : %s" % (builder_name.ljust(max_name_width), status_message)
185
186     def _print_blame_information_for_builder(self, builder_status, name_width, avoid_flakey_tests=True):
187         builder = self._tool.buildbot.builder_with_name(builder_status["name"])
188         red_build = builder.build(builder_status["build_number"])
189         regression_window = builder.find_regression_window(red_build)
190         if not regression_window.failing_build():
191             self._print_builder_line(builder.name(), name_width, "FAIL (error loading build information)")
192             return
193         if not regression_window.build_before_failure():
194             self._print_builder_line(builder.name(), name_width, "FAIL (blame-list: sometime before %s?)" % regression_window.failing_build().revision())
195             return
196
197         revisions = regression_window.revisions()
198         first_failure_message = ""
199         if (regression_window.failing_build() == builder.build(builder_status["build_number"])):
200             first_failure_message = " FIRST FAILURE, possibly a flaky test"
201         self._print_builder_line(builder.name(), name_width, "FAIL (blame-list: %s%s)" % (revisions, first_failure_message))
202         for revision in revisions:
203             commit_info = self._tool.checkout().commit_info_for_revision(revision)
204             if commit_info:
205                 print commit_info.blame_string(self._tool.bugs)
206             else:
207                 print "FAILED to fetch CommitInfo for r%s, likely missing ChangeLog" % revision
208
209     def execute(self, options, args, tool):
210         builder_statuses = tool.buildbot.builder_statuses()
211         longest_builder_name = max(map(len, map(lambda builder: builder["name"], builder_statuses)))
212         failing_builders = 0
213         for builder_status in builder_statuses:
214             # If the builder is green, print OK, exit.
215             if builder_status["is_green"]:
216                 continue
217             self._print_blame_information_for_builder(builder_status, name_width=longest_builder_name)
218             failing_builders += 1
219         if failing_builders:
220             print "%s of %s are failing" % (failing_builders, pluralize(len(builder_statuses), "builder"))
221         else:
222             print "All builders are passing!"
223
224
225 class ResultsFor(Command):
226     name = "results-for"
227     help_text = "Print a list of failures for the passed revision from bots on %s" % config_urls.buildbot_url
228     argument_names = "REVISION"
229
230     def _print_layout_test_results(self, results):
231         if not results:
232             print " No results."
233             return
234         for title, files in results.parsed_results().items():
235             print " %s" % title
236             for filename in files:
237                 print "  %s" % filename
238
239     def execute(self, options, args, tool):
240         builders = self._tool.buildbot.builders()
241         for builder in builders:
242             print "%s:" % builder.name()
243             build = builder.build_for_revision(args[0], allow_failed_lookups=True)
244             self._print_layout_test_results(build.layout_test_results())
245
246
247 class FailureReason(Command):
248     name = "failure-reason"
249     help_text = "Lists revisions where individual test failures started at %s" % config_urls.buildbot_url
250     arguments_names = "[LAYOUT_TESTS]"
251
252     def _blame_line_for_revision(self, revision):
253         try:
254             commit_info = self._tool.checkout().commit_info_for_revision(revision)
255         except Exception, e:
256             return "FAILED to fetch CommitInfo for r%s, exception: %s" % (revision, e)
257         if not commit_info:
258             return "FAILED to fetch CommitInfo for r%s, likely missing ChangeLog" % revision
259         return commit_info.blame_string(self._tool.bugs)
260
261     def _print_blame_information_for_transition(self, regression_window, failing_tests):
262         red_build = regression_window.failing_build()
263         print "SUCCESS: Build %s (r%s) was the first to show failures: %s" % (red_build._number, red_build.revision(), failing_tests)
264         print "Suspect revisions:"
265         for revision in regression_window.revisions():
266             print self._blame_line_for_revision(revision)
267
268     def _explain_failures_for_builder(self, builder, start_revision):
269         print "Examining failures for \"%s\", starting at r%s" % (builder.name(), start_revision)
270         revision_to_test = start_revision
271         build = builder.build_for_revision(revision_to_test, allow_failed_lookups=True)
272         layout_test_results = build.layout_test_results()
273         if not layout_test_results:
274             # FIXME: This could be made more user friendly.
275             print "Failed to load layout test results from %s; can't continue. (start revision = r%s)" % (build.results_url(), start_revision)
276             return 1
277
278         results_to_explain = set(layout_test_results.failing_tests())
279         last_build_with_results = build
280         print "Starting at %s" % revision_to_test
281         while results_to_explain and not self._done_explaining():
282             revision_to_test -= 1
283             new_build = builder.build_for_revision(revision_to_test, allow_failed_lookups=True)
284             if not new_build:
285                 print "No build for %s" % revision_to_test
286                 continue
287             build = new_build
288             latest_results = build.layout_test_results()
289             if not latest_results:
290                 print "No results build %s (r%s)" % (build._number, build.revision())
291                 continue
292             failures = set(latest_results.failing_tests())
293             if len(failures) >= 500:
294                 # FIXME: We may need to move this logic into the LayoutTestResults class.
295                 # The buildbot stops runs after 500 failures so we don't have full results to work with here.
296                 print "Too many failures in build %s (r%s), ignoring." % (build._number, build.revision())
297                 continue
298             fixed_results = results_to_explain - failures
299             if not fixed_results:
300                 print "No change in build %s (r%s), %s unexplained failures (%s in this build)" % (build._number, build.revision(), len(results_to_explain), len(failures))
301                 last_build_with_results = build
302                 continue
303             self.explained_failures.update(fixed_results)
304             regression_window = RegressionWindow(build, last_build_with_results)
305             self._print_blame_information_for_transition(regression_window, fixed_results)
306             last_build_with_results = build
307             results_to_explain -= fixed_results
308         if results_to_explain:
309             print "Failed to explain failures: %s" % results_to_explain
310             return 1
311         print "Explained all results for %s" % builder.name()
312         return 0
313
314     def _builder_to_explain(self):
315         builder_statuses = self._tool.buildbot.builder_statuses()
316         red_statuses = [status for status in builder_statuses if not status["is_green"]]
317         print "%s failing" % (pluralize("builder", len(red_statuses)))
318         builder_choices = [status["name"] for status in red_statuses]
319         # We could offer an "All" choice here.
320         chosen_name = self._tool.user.prompt_with_list("Which builder to diagnose:", builder_choices)
321         # FIXME: prompt_with_list should really take a set of objects and a set of names and then return the object.
322         for status in red_statuses:
323             if status["name"] == chosen_name:
324                 return (self._tool.buildbot.builder_with_name(chosen_name), status["built_revision"])
325
326     def _done_explaining(self):
327         if not self.failures_to_explain:
328             return False
329
330         return self.explained_failures.issuperset(self.failures_to_explain)
331
332     def execute(self, options, args, tool):
333         (builder, latest_revision) = self._builder_to_explain()
334         start_revision = self._tool.user.prompt("Revision to walk backwards from? [%s] " % latest_revision) or latest_revision
335         self.failures_to_explain = args
336         self.explained_failures = set()
337         if not start_revision:
338             print "Revision required."
339             return 1
340         return self._explain_failures_for_builder(builder, start_revision=int(start_revision))
341
342
343 class FindFlakyTests(Command):
344     name = "find-flaky-tests"
345     help_text = "Lists tests that often fail for a single build at %s" % config_urls.buildbot_url
346
347     def _find_failures(self, builder, revision):
348         build = builder.build_for_revision(revision, allow_failed_lookups=True)
349         if not build:
350             print "No build for %s" % revision
351             return (None, None)
352         results = build.layout_test_results()
353         if not results:
354             print "No results build %s (r%s)" % (build._number, build.revision())
355             return (None, None)
356         failures = set(results.failing_tests())
357         if len(failures) >= 20:
358             # FIXME: We may need to move this logic into the LayoutTestResults class.
359             # The buildbot stops runs after 20 failures so we don't have full results to work with here.
360             print "Too many failures in build %s (r%s), ignoring." % (build._number, build.revision())
361             return (None, None)
362         return (build, failures)
363
364     def _increment_statistics(self, flaky_tests, flaky_test_statistics):
365         for test in flaky_tests:
366             count = flaky_test_statistics.get(test, 0)
367             flaky_test_statistics[test] = count + 1
368
369     def _print_statistics(self, statistics):
370         print "=== Results ==="
371         print "Occurances Test name"
372         for value, key in sorted([(value, key) for key, value in statistics.items()]):
373             print "%10d %s" % (value, key)
374
375     def _walk_backwards_from(self, builder, start_revision, limit):
376         flaky_test_statistics = {}
377         all_previous_failures = set([])
378         one_time_previous_failures = set([])
379         previous_build = None
380         for i in range(limit):
381             revision = start_revision - i
382             print "Analyzing %s ... " % revision,
383             (build, failures) = self._find_failures(builder, revision)
384             if failures == None:
385                 # Notice that we don't loop on the empty set!
386                 continue
387             print "has %s failures" % len(failures)
388             flaky_tests = one_time_previous_failures - failures
389             if flaky_tests:
390                 print "Flaky tests: %s %s" % (sorted(flaky_tests),
391                                               previous_build.results_url())
392             self._increment_statistics(flaky_tests, flaky_test_statistics)
393             one_time_previous_failures = failures - all_previous_failures
394             all_previous_failures = failures
395             previous_build = build
396         self._print_statistics(flaky_test_statistics)
397
398     def _builder_to_analyze(self):
399         statuses = self._tool.buildbot.builder_statuses()
400         choices = [status["name"] for status in statuses]
401         chosen_name = self._tool.user.prompt_with_list("Which builder to analyze:", choices)
402         for status in statuses:
403             if status["name"] == chosen_name:
404                 return (self._tool.buildbot.builder_with_name(chosen_name), status["built_revision"])
405
406     def execute(self, options, args, tool):
407         (builder, latest_revision) = self._builder_to_analyze()
408         limit = self._tool.user.prompt("How many revisions to look through? [10000] ") or 10000
409         return self._walk_backwards_from(builder, latest_revision, limit=int(limit))
410
411
412 class TreeStatus(Command):
413     name = "tree-status"
414     help_text = "Print the status of the %s buildbots" % config_urls.buildbot_url
415     long_help = """Fetches build status from https://build.webkit.org/one_box_per_builder
416 and displayes the status of each builder."""
417
418     def execute(self, options, args, tool):
419         for builder in tool.buildbot.builder_statuses():
420             status_string = "ok" if builder["is_green"] else "FAIL"
421             print "%s : %s" % (status_string.ljust(4), builder["name"])
422
423
424 class CrashLog(Command):
425     name = "crash-log"
426     help_text = "Print the newest crash log for the given process"
427     long_help = """Finds the newest crash log matching the given process name
428 and PID and prints it to stdout."""
429     argument_names = "PROCESS_NAME [PID]"
430
431     def execute(self, options, args, tool):
432         crash_logs = CrashLogs(tool)
433         pid = None
434         if len(args) > 1:
435             pid = int(args[1])
436         print crash_logs.find_newest_log(args[0], pid)
437
438
439 class PrintExpectations(Command):
440     name = 'print-expectations'
441     help_text = 'Print the expected result for the given test(s) on the given port(s)'
442
443     def __init__(self):
444         options = [
445             make_option('--all', action='store_true', default=False,
446                         help='display the expectations for *all* tests'),
447             make_option('-x', '--exclude-keyword', action='append', default=[],
448                         help='limit to tests not matching the given keyword (for example, "skip", "slow", or "crash". May specify multiple times'),
449             make_option('-i', '--include-keyword', action='append', default=[],
450                         help='limit to tests with the given keyword (for example, "skip", "slow", or "crash". May specify multiple times'),
451             make_option('--csv', action='store_true', default=False,
452                         help='Print a CSV-style report that includes the port name, modifiers, tests, and expectations'),
453             make_option('-f', '--full', action='store_true', default=False,
454                         help='Print a full TestExpectations-style line for every match'),
455             make_option('--paths', action='store_true', default=False,
456                         help='display the paths for all applicable expectation files'),
457         ] + platform_options(use_globs=True)
458
459         Command.__init__(self, options=options)
460         self._expectation_models = {}
461
462     def execute(self, options, args, tool):
463         if not options.paths and not args and not options.all:
464             print "You must either specify one or more test paths or --all."
465             return
466
467         if options.platform:
468             port_names = fnmatch.filter(tool.port_factory.all_port_names(), options.platform)
469             if not port_names:
470                 default_port = tool.port_factory.get(options.platform)
471                 if default_port:
472                     port_names = [default_port.name()]
473                 else:
474                     print "No port names match '%s'" % options.platform
475                     return
476             else:
477                 default_port = tool.port_factory.get(port_names[0])
478         else:
479             default_port = tool.port_factory.get(options=options)
480             port_names = [default_port.name()]
481
482         if options.paths:
483             files = default_port.expectations_files()
484             layout_tests_dir = default_port.layout_tests_dir()
485             for file in files:
486                 if file.startswith(layout_tests_dir):
487                     file = file.replace(layout_tests_dir, 'LayoutTests')
488                 print file
489             return
490
491         tests = set(default_port.tests(args))
492         for port_name in port_names:
493             model = self._model(options, port_name, tests)
494             tests_to_print = self._filter_tests(options, model, tests)
495             lines = [model.get_expectation_line(test) for test in sorted(tests_to_print)]
496             if port_name != port_names[0]:
497                 print
498             print '\n'.join(self._format_lines(options, port_name, lines))
499
500     def _filter_tests(self, options, model, tests):
501         filtered_tests = set()
502         if options.include_keyword:
503             for keyword in options.include_keyword:
504                 filtered_tests.update(model.get_test_set_for_keyword(keyword))
505         else:
506             filtered_tests = tests
507
508         for keyword in options.exclude_keyword:
509             filtered_tests.difference_update(model.get_test_set_for_keyword(keyword))
510         return filtered_tests
511
512     def _format_lines(self, options, port_name, lines):
513         output = []
514         if options.csv:
515             for line in lines:
516                 output.append("%s,%s" % (port_name, line.to_csv()))
517         elif lines:
518             include_modifiers = options.full
519             include_expectations = options.full or len(options.include_keyword) != 1 or len(options.exclude_keyword)
520             output.append("// For %s" % port_name)
521             for line in lines:
522                 output.append("%s" % line.to_string(None, include_modifiers, include_expectations, include_comment=False))
523         return output
524
525     def _model(self, options, port_name, tests):
526         port = self._tool.port_factory.get(port_name, options)
527         expectations = TestExpectations(port, tests)
528         expectations.parse_all_expectations()
529         return expectations.model()
530
531
532 class PrintBaselines(Command):
533     name = 'print-baselines'
534     help_text = 'Prints the baseline locations for given test(s) on the given port(s)'
535
536     def __init__(self):
537         options = [
538             make_option('--all', action='store_true', default=False,
539                         help='display the baselines for *all* tests'),
540             make_option('--csv', action='store_true', default=False,
541                         help='Print a CSV-style report that includes the port name, test_name, test platform, baseline type, baseline location, and baseline platform'),
542         ] + platform_options(use_globs=True)
543         Command.__init__(self, options=options)
544         self._platform_regexp = re.compile('platform/([^\/]+)/(.+)')
545
546     def execute(self, options, args, tool):
547         if not args and not options.all:
548             print "You must either specify one or more test paths or --all."
549             return
550
551         default_port = tool.port_factory.get()
552         if options.platform:
553             port_names = fnmatch.filter(tool.port_factory.all_port_names(), options.platform)
554             if not port_names:
555                 print "No port names match '%s'" % options.platform
556         else:
557             port_names = [default_port.name()]
558
559         # FIXME: make real_tests() a public method.
560         tests = sorted(default_port._real_tests(args))
561
562         for port_name in port_names:
563             if port_name != port_names[0]:
564                 print
565             if not options.csv:
566                 print "// For %s" % port_name
567             port = tool.port_factory.get(port_name)
568             for test_name in tests:
569                 self._print_baselines(options, port_name, test_name, port.expected_baselines_by_extension(test_name))
570
571     def _print_baselines(self, options, port_name, test_name, baselines):
572         for extension in sorted(baselines.keys()):
573             baseline_location = baselines[extension]
574             if baseline_location:
575                 if options.csv:
576                     print "%s,%s,%s,%s,%s,%s" % (port_name, test_name, self._platform_for_path(test_name),
577                                                  extension[1:], baseline_location, self._platform_for_path(baseline_location))
578                 else:
579                     print baseline_location
580
581     def _platform_for_path(self, relpath):
582         platform_matchobj = self._platform_regexp.match(relpath)
583         if platform_matchobj:
584             return platform_matchobj.group(1)
585         return None
586
587
588 class FindResolvedBugs(Command):
589     name = "find-resolved-bugs"
590     help_text = "Collect the RESOLVED bugs in the given TestExpectations file"
591     argument_names = "TEST_EXPECTATIONS_FILE"
592
593     def execute(self, options, args, tool):
594         filename = args[0]
595         if not tool.filesystem.isfile(filename):
596             print "The given path is not a file, please pass a valid path."
597             return
598
599         ids = set()
600         inputfile = tool.filesystem.open_text_file_for_reading(filename)
601         for line in inputfile:
602             result = re.search("(https://bugs\.webkit\.org/show_bug\.cgi\?id=|webkit\.org/b/)([0-9]+)", line)
603             if result:
604                 ids.add(result.group(2))
605         inputfile.close()
606
607         resolved_ids = set()
608         num_of_bugs = len(ids)
609         bugzilla = Bugzilla()
610         for i, bugid in enumerate(ids, start=1):
611             bug = bugzilla.fetch_bug(bugid)
612             print "Checking bug %s \t [%d/%d]" % (bugid, i, num_of_bugs)
613             if not bug.is_open():
614                 resolved_ids.add(bugid)
615
616         print "Resolved bugs in %s :" % (filename)
617         for bugid in resolved_ids:
618             print "https://bugs.webkit.org/show_bug.cgi?id=%s" % (bugid)