+2011-01-11 Dirk Pranke <dpranke@chromium.org>
+
+ Reviewed by Ojan Vafai.
+
+ The current modifier parsing code in test_expectations is
+ fragile and hard-coded, so it's not easy to understand the logic
+ or easily add new types of modifiers (like GPU vs. CPU testing
+ for graphics tests, or 32-bit vs. 64-bit differences).
+
+ This is the first of two patches that will add in more generic
+ support and then eliminate the GPU-specific test expectations
+ files for Chromium.
+
+ This patch adds two standalone objects for handling modifiers. The
+ rules for interpreting modifiers, precedence, and conflicts are
+ given in the docstring to the ModifierMatcher class, which
+ returns ModifierMatchResult objects.
+
+ This patch also adds routines to the Port interface and a
+ default set of values in the base object, in order to obtain the
+ values needed on a given test run. These values are then passed
+ to the expectation parser. This also allows us to clean up the
+ logic used to lint all of the different configurations in a
+ single test_expectations.txt file.
+
+ The next patch will merge in the separate GPU expectations file.
+
+ https://bugs.webkit.org/show_bug.cgi?id=51222
+
+ * Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py:
+ * Scripts/webkitpy/layout_tests/layout_package/test_expectations.py:
+ * Scripts/webkitpy/layout_tests/layout_package/test_expectations_unittest.py:
+ * Scripts/webkitpy/layout_tests/port/base.py:
+ * Scripts/webkitpy/layout_tests/port/base_unittest.py:
+ * Scripts/webkitpy/layout_tests/port/chromium.py:
+ * Scripts/webkitpy/layout_tests/port/port_testcase.py:
+ * Scripts/webkitpy/layout_tests/port/test.py:
+ * Scripts/webkitpy/layout_tests/run_webkit_tests.py:
+ * Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py:
+
2011-01-11 Maciej Stachowiak <mjs@apple.com>
Reviewed by Adam Barth.
import os
import optparse
-import pdb
import sys
import unittest
import logging
test in tests]
expectations = test_expectations.TestExpectations(
self._port, test_paths, expectations_str,
- self._port.test_platform_name(), is_debug_mode=False,
+ self._port.test_configuration(),
is_lint_mode=False)
rs = result_summary.ResultSummary(expectations, test_paths)
def test_print_progress__detailed(self):
tests = ['passes/text.html', 'failures/expected/timeout.html',
'failures/expected/crash.html']
- expectations = 'failures/expected/timeout.html = TIMEOUT'
+ expectations = 'BUGX : failures/expected/timeout.html = TIMEOUT'
# first, test that it is disabled properly
# should still print one-line-progress
self.assertFalse(out.empty())
expectations = """
-failures/expected/crash.html = CRASH
-failures/expected/timeout.html = TIMEOUT
+BUGX : failures/expected/crash.html = CRASH
+BUGX : failures/expected/timeout.html = TIMEOUT
"""
err.reset()
out.reset()
for layout tests.
"""
+import itertools
import logging
import os
import re
class TestExpectations:
TEST_LIST = "test_expectations.txt"
- def __init__(self, port, tests, expectations, test_platform_name,
- is_debug_mode, is_lint_mode, overrides=None):
+ def __init__(self, port, tests, expectations, test_config,
+ is_lint_mode, overrides=None):
"""Loads and parses the test expectations given in the string.
Args:
port: handle to object containing platform-specific functionality
test: list of all of the test files
expectations: test expectations as a string
- test_platform_name: name of the platform to match expectations
- against. Note that this may be different than
- port.test_platform_name() when is_lint_mode is True.
- is_debug_mode: whether to use the DEBUG or RELEASE modifiers
- in the expectations
+ test_config: specific values to check against when
+ parsing the file (usually port.test_config(),
+ but may be different when linting or doing other things).
is_lint_mode: If True, just parse the expectations string
looking for errors.
overrides: test expectations that are allowed to override any
and downstream expectations).
"""
self._expected_failures = TestExpectationsFile(port, expectations,
- tests, test_platform_name, is_debug_mode, is_lint_mode,
+ tests, test_config, is_lint_mode,
overrides=overrides)
# TODO(ojan): Allow for removing skipped tests when getting the list of
return '\n'.join(map(str, self.errors))
def __repr__(self):
- return 'ParseError(fatal=%s, errors=%s)' % (fatal, errors)
+ return 'ParseError(fatal=%s, errors=%s)' % (self.fatal, self.errors)
class ModifiersAndExpectations:
'fail': FAIL,
'flaky': FLAKY}
- def __init__(self, port, expectations, full_test_list, test_platform_name,
- is_debug_mode, is_lint_mode, overrides=None):
- """
- expectations: Contents of the expectations file
- full_test_list: The list of all tests to be run pending processing of
- the expections for those tests.
- test_platform_name: name of the platform to match expectations
- against. Note that this may be different than
- port.test_platform_name() when is_lint_mode is True.
- is_debug_mode: Whether we testing a test_shell built debug mode.
- is_lint_mode: Whether this is just linting test_expecatations.txt.
- overrides: test expectations that are allowed to override any
- entries in |expectations|. This is used by callers
- that need to manage two sets of expectations (e.g., upstream
- and downstream expectations).
- """
+ def __init__(self, port, expectations, full_test_list,
+ test_config, is_lint_mode, overrides=None):
+ # See argument documentation in TestExpectation(), above.
self._port = port
self._expectations = expectations
self._full_test_list = full_test_list
- self._test_platform_name = test_platform_name
- self._is_debug_mode = is_debug_mode
+ self._test_config = test_config
self._is_lint_mode = is_lint_mode
self._overrides = overrides
self._errors = []
# Maps relative test paths as listed in the expectations file to a
# list of maps containing modifiers and expectations for each time
- # the test is listed in the expectations file.
+ # the test is listed in the expectations file. We use this to
+ # keep a representation of the entire list of expectations, even
+ # invalid ones.
self._all_expectations = {}
# Maps a test to its list of expectations.
# the options minus any bug or platform strings
self._test_to_modifiers = {}
- # Maps a test to the base path that it was listed with in the list.
+ # Maps a test to the base path that it was listed with in the list and
+ # the number of matches that base path had.
self._test_list_paths = {}
self._modifier_to_tests = self._dict_of_sets(self.MODIFIERS)
def _handle_any_read_errors(self):
if len(self._errors) or len(self._non_fatal_errors):
- if self._is_debug_mode:
- build_type = 'DEBUG'
- else:
- build_type = 'RELEASE'
- _log.error('')
- _log.error("FAILURES FOR PLATFORM: %s, BUILD_TYPE: %s" %
- (self._test_platform_name.upper(), build_type))
+ _log.error("FAILURES FOR %s" % str(self._test_config))
for error in self._errors:
_log.error(error)
expectations = set([PASS])
options = []
modifiers = []
+ num_matches = 0
if self._full_test_list:
for test in self._full_test_list:
if not test in self._test_list_paths:
- self._add_test(test, modifiers, expectations, options,
- overrides_allowed=False)
+ self._add_test(test, modifiers, num_matches, expectations,
+ options, overrides_allowed=False)
def _dict_of_sets(self, strings_to_constants):
"""Takes a dict of strings->constants and returns a dict mapping
options = []
if line.find(":") is -1:
- test_and_expectation = line.split("=")
- else:
- parts = line.split(":")
- options = self._get_options_list(parts[0])
- test_and_expectation = parts[1].split('=')
+ self._add_error(lineno, "Missing a ':'", line)
+ return (None, None, None)
+
+ parts = line.split(':')
+ # FIXME: verify that there is exactly one colon in the line.
+
+ options = self._get_options_list(parts[0])
+ test_and_expectation = parts[1].split('=')
test = test_and_expectation[0].strip()
if (len(test_and_expectation) is not 2):
self._add_error(lineno, "Missing expectations.",
return REMOVE_TEST
- def _has_valid_modifiers_for_current_platform(self, options, lineno,
- test_and_expectations, modifiers):
- """Returns true if the current platform is in the options list or if
- no platforms are listed and if there are no fatal errors in the
- options list.
-
- Args:
- options: List of lowercase options.
- lineno: The line in the file where the test is listed.
- test_and_expectations: The path and expectations for the test.
- modifiers: The set to populate with modifiers.
- """
- has_any_platform = False
- has_bug_id = False
- for option in options:
- if option in self.MODIFIERS:
- modifiers.add(option)
- elif option in self._port.test_platform_names():
- has_any_platform = True
- elif re.match(r'bug\d', option) != None:
- self._add_error(lineno, 'Bug must be either BUGCR, BUGWK, or BUGV8_ for test: %s' %
- option, test_and_expectations)
- elif option.startswith('bug'):
- has_bug_id = True
- elif option not in self.BUILD_TYPES:
- self._add_error(lineno, 'Invalid modifier for test: %s' %
- option, test_and_expectations)
-
- if has_any_platform and not self._match_platform(options):
- return False
-
- if not has_bug_id and 'wontfix' not in options:
- # TODO(ojan): Turn this into an AddError call once all the
- # tests have BUG identifiers.
- self._log_non_fatal_error(lineno, 'Test lacks BUG modifier.',
- test_and_expectations)
-
- if 'release' in options or 'debug' in options:
- if self._is_debug_mode and 'debug' not in options:
- return False
- if not self._is_debug_mode and 'release' not in options:
- return False
-
- if self._is_lint_mode and 'rebaseline' in options:
- self._add_error(lineno,
- 'REBASELINE should only be used for running rebaseline.py. '
- 'Cannot be checked in.', test_and_expectations)
-
- return True
-
- def _match_platform(self, options):
- """Match the list of options against our specified platform. If any
- of the options prefix-match self._platform, return True. This handles
- the case where a test is marked WIN and the platform is WIN-VISTA.
-
- Args:
- options: list of options
- """
- for opt in options:
- if self._test_platform_name.startswith(opt):
- return True
- return False
-
def _add_to_all_expectations(self, test, options, expectations):
# Make all paths unix-style so the dashboard doesn't need to.
test = test.replace('\\', '/')
"""For each test in an expectations iterable, generate the
expectations for it."""
lineno = 0
+ matcher = ModifierMatcher(self._test_config)
for line in expectations:
lineno += 1
+ self._process_line(line, lineno, matcher, overrides_allowed)
- test_list_path, options, expectations = \
- self.parse_expectations_line(line, lineno)
- if not expectations:
- continue
+ def _process_line(self, line, lineno, matcher, overrides_allowed):
+ test_list_path, options, expectations = \
+ self.parse_expectations_line(line, lineno)
+ if not expectations:
+ return
- self._add_to_all_expectations(test_list_path,
- " ".join(options).upper(),
- " ".join(expectations).upper())
+ self._add_to_all_expectations(test_list_path,
+ " ".join(options).upper(),
+ " ".join(expectations).upper())
- modifiers = set()
- if options and not self._has_valid_modifiers_for_current_platform(
- options, lineno, test_list_path, modifiers):
- continue
+ num_matches = self._check_options(matcher, options, lineno,
+ test_list_path)
+ if num_matches == ModifierMatcher.NO_MATCH:
+ return
- expectations = self._parse_expectations(expectations, lineno,
- test_list_path)
+ expectations = self._parse_expectations(expectations, lineno,
+ test_list_path)
- if 'slow' in options and TIMEOUT in expectations:
- self._add_error(lineno,
- 'A test can not be both slow and timeout. If it times out '
- 'indefinitely, then it should be just timeout.',
- test_list_path)
+ self._check_options_against_expectations(options, expectations,
+ lineno, test_list_path)
- full_path = os.path.join(self._port.layout_tests_dir(),
- test_list_path)
- full_path = os.path.normpath(full_path)
- # WebKit's way of skipping tests is to add a -disabled suffix.
- # So we should consider the path existing if the path or the
- # -disabled version exists.
- if (not self._port.path_exists(full_path)
- and not self._port.path_exists(full_path + '-disabled')):
- # Log a non fatal error here since you hit this case any
- # time you update test_expectations.txt without syncing
- # the LayoutTests directory
- self._log_non_fatal_error(lineno, 'Path does not exist.',
- test_list_path)
- continue
+ if self._check_path_does_not_exist(lineno, test_list_path):
+ return
- if not self._full_test_list:
- tests = [test_list_path]
- else:
- tests = self._expand_tests(test_list_path)
+ if not self._full_test_list:
+ tests = [test_list_path]
+ else:
+ tests = self._expand_tests(test_list_path)
- self._add_tests(tests, expectations, test_list_path, lineno,
- modifiers, options, overrides_allowed)
+ modifiers = [o for o in options if o in self.MODIFIERS]
+ self._add_tests(tests, expectations, test_list_path, lineno,
+ modifiers, num_matches, options, overrides_allowed)
def _get_options_list(self, listString):
return [part.strip().lower() for part in listString.strip().split(' ')]
result.add(expectation)
return result
+ def _check_options(self, matcher, options, lineno, test_list_path):
+ match_result = self._check_syntax(matcher, options, lineno,
+ test_list_path)
+ self._check_semantics(options, lineno, test_list_path)
+ return match_result.num_matches
+
+ def _check_syntax(self, matcher, options, lineno, test_list_path):
+ match_result = matcher.match(options)
+ for error in match_result.errors:
+ self._add_error(lineno, error, test_list_path)
+ for warning in match_result.warnings:
+ self._log_non_fatal_error(lineno, warning, test_list_path)
+ return match_result
+
+ def _check_semantics(self, options, lineno, test_list_path):
+ has_wontfix = 'wontfix' in options
+ has_bug = False
+ for opt in options:
+ if opt.startswith('bug'):
+ has_bug = True
+ if re.match('bug\d+', opt):
+ self._add_error(lineno,
+ 'BUG\d+ is not allowed, must be one of '
+ 'BUGCR\d+, BUGWK\d+, BUGV8_\d+, '
+ 'or a non-numeric bug identifier.', test_list_path)
+
+ if not has_bug and not has_wontfix:
+ self._log_non_fatal_error(lineno, 'Test lacks BUG modifier.',
+ test_list_path)
+
+ if self._is_lint_mode and 'rebaseline' in options:
+ self._add_error(lineno,
+ 'REBASELINE should only be used for running rebaseline.py. '
+ 'Cannot be checked in.', test_list_path)
+
+ def _check_options_against_expectations(self, options, expectations,
+ lineno, test_list_path):
+ if 'slow' in options and TIMEOUT in expectations:
+ self._add_error(lineno,
+ 'A test can not be both slow and timeout. If it times out '
+ 'indefinitely, then it should be just timeout.',
+ test_list_path)
+
+ def _check_path_does_not_exist(self, lineno, test_list_path):
+ full_path = os.path.join(self._port.layout_tests_dir(),
+ test_list_path)
+ full_path = os.path.normpath(full_path)
+ # WebKit's way of skipping tests is to add a -disabled suffix.
+ # So we should consider the path existing if the path or the
+ # -disabled version exists.
+ if (not self._port.path_exists(full_path)
+ and not self._port.path_exists(full_path + '-disabled')):
+ # Log a non fatal error here since you hit this case any
+ # time you update test_expectations.txt without syncing
+ # the LayoutTests directory
+ self._log_non_fatal_error(lineno, 'Path does not exist.',
+ test_list_path)
+ return True
+ return False
+
def _expand_tests(self, test_list_path):
"""Convert the test specification to an absolute, normalized
path and make sure directories end with the OS path separator."""
return result
def _add_tests(self, tests, expectations, test_list_path, lineno,
- modifiers, options, overrides_allowed):
+ modifiers, num_matches, options, overrides_allowed):
for test in tests:
- if self._already_seen_test(test, test_list_path, lineno,
- overrides_allowed):
+ if self._already_seen_better_match(test, test_list_path,
+ num_matches, lineno, overrides_allowed):
continue
self._clear_expectations_for_test(test, test_list_path)
- self._add_test(test, modifiers, expectations, options,
+ self._test_list_paths[test] = (os.path.normpath(test_list_path),
+ num_matches, lineno)
+ self._add_test(test, modifiers, num_matches, expectations, options,
overrides_allowed)
- def _add_test(self, test, modifiers, expectations, options,
+ def _add_test(self, test, modifiers, num_matches, expectations, options,
overrides_allowed):
"""Sets the expected state for a given test.
Args:
test: test to add
modifiers: sequence of modifier keywords ('wontfix', 'slow', etc.)
+ num_matches: number of modifiers that matched the configuration
expectations: sequence of expectations (PASS, IMAGE, etc.)
options: sequence of keywords and bug identifiers.
overrides_allowed: whether we're parsing the regular expectations
than a previous listing.
"""
if test in self._test_list_paths:
- self._test_to_expectations.pop(test, '')
- self._remove_from_sets(test, self._expectation_to_tests)
- self._remove_from_sets(test, self._modifier_to_tests)
- self._remove_from_sets(test, self._timeline_to_tests)
- self._remove_from_sets(test, self._result_type_to_tests)
+ base_path = self._test_list_paths[test][0]
+ self._test_to_expectations.pop(base_path, '')
+ self._remove_from_sets(base_path, self._expectation_to_tests)
+ self._remove_from_sets(base_path, self._modifier_to_tests)
+ self._remove_from_sets(base_path, self._timeline_to_tests)
+ self._remove_from_sets(base_path, self._result_type_to_tests)
- self._test_list_paths[test] = os.path.normpath(test_list_path)
def _remove_from_sets(self, test, dict):
"""Removes the given test from the sets in the dictionary.
if test in set_of_tests:
set_of_tests.remove(test)
- def _already_seen_test(self, test, test_list_path, lineno,
- allow_overrides):
- """Returns true if we've already seen a more precise path for this test
- than the test_list_path.
+ def _already_seen_better_match(self, test, test_list_path, num_matches,
+ lineno, overrides_allowed):
+ """Returns whether we've seen a better match already in the file.
+
+ Returns True if we've already seen a test_list_path that matches more of the test
+ than this path does
"""
+ # FIXME: See comment below about matching test configs and num_matches.
+
if not test in self._test_list_paths:
+ # We've never seen this test before.
return False
- prev_base_path = self._test_list_paths[test]
- if (prev_base_path == os.path.normpath(test_list_path)):
- if (not allow_overrides or test in self._overridding_tests):
- if allow_overrides:
- expectation_source = "override"
- else:
- expectation_source = "expectation"
- self._add_error(lineno, 'Duplicate %s.' % expectation_source,
- test)
- return True
- else:
- # We have seen this path, but that's okay because its
- # in the overrides and the earlier path was in the
- # expectations.
- return False
+ prev_base_path, prev_num_matches, prev_lineno = self._test_list_paths[test]
+ base_path = os.path.normpath(test_list_path)
+
+ if len(prev_base_path) > len(base_path):
+ # The previous path matched more of the test.
+ return True
+
+ if len(prev_base_path) < len(base_path):
+ # This path matches more of the test.
+ return False
+
+ if overrides_allowed and test not in self._overridding_tests:
+ # We have seen this path, but that's okay because it is
+ # in the overrides and the earlier path was in the
+ # expectations (not the overrides).
+ return False
+
+ # At this point we know we have seen a previous exact match on this
+ # base path, so we need to check the two sets of modifiers.
+
+ if overrides_allowed:
+ expectation_source = "override"
+ else:
+ expectation_source = "expectation"
+
+ # FIXME: This code was originally designed to allow lines that matched
+ # more modifiers to override lines that matched fewer modifiers.
+ # However, we currently view these as errors. If we decide to make
+ # this policy permanent, we can probably simplify this code
+ # and the ModifierMatcher code a fair amount.
+ #
+ # To use the "more modifiers wins" policy, change the "_add_error" lines for overrides
+ # to _log_non_fatal_error() and change the commented-out "return False".
+
+ if prev_num_matches == num_matches:
+ self._add_error(lineno,
+ 'Duplicate or ambiguous %s.' % expectation_source,
+ test)
+ return True
- # Check if we've already seen a more precise path.
- return prev_base_path.startswith(os.path.normpath(test_list_path))
+ if prev_num_matches < num_matches:
+ self._add_error(lineno,
+ 'More specific entry on line %d overrides line %d' %
+ (lineno, prev_lineno), test_list_path)
+ # FIXME: return False if we want more specific to win.
+ return True
+
+ self._add_error(lineno,
+ 'More specific entry on line %d overrides line %d' %
+ (prev_lineno, lineno), test_list_path)
+ return True
def _add_error(self, lineno, msg, path):
"""Reports an error that will prevent running the tests. Does not
"""Reports an error that will not prevent running the tests. These are
still errors, but not bad enough to warrant breaking test running."""
self._non_fatal_errors.append('Line:%s %s %s' % (lineno, msg, path))
+
+
+class ModifierMatchResult(object):
+ def __init__(self, options):
+ self.num_matches = ModifierMatcher.NO_MATCH
+ self.options = options
+ self.errors = []
+ self.warnings = []
+ self.modifiers = []
+ self._matched_regexes = set()
+ self._matched_macros = set()
+
+
+class ModifierMatcher(object):
+
+ """
+ This class manages the interpretation of the "modifiers" for a given
+ line in the expectations file. Modifiers are the tokens that appear to the
+ left of the colon on a line. For example, "BUG1234", "DEBUG", and "WIN" are
+ all modifiers. This class gets what the valid modifiers are, and which
+ modifiers are allowed to exist together on a line, from the
+ TestConfiguration object that is passed in to the call.
+
+ This class detects *intra*-line errors like unknown modifiers, but
+ does not detect *inter*-line modifiers like duplicate expectations.
+
+ More importantly, this class is also used to determine if a given line
+ matches the port in question. Matches are ranked according to the number
+ of modifiers that match on a line. A line with no modifiers matches
+ everything and has a score of zero. A line with one modifier matches only
+ ports that have that modifier and gets a score of 1, and so one. Ports
+ that don't match at all get a score of -1.
+
+ Given two lines in a file that apply to the same test, if both expectations
+ match the current config, then the expectation is considered ambiguous,
+ even if one expectation matches more of the config than the other. For
+ example, in:
+
+ BUG1 RELEASE : foo.html = FAIL
+ BUG1 WIN RELEASE : foo.html = PASS
+ BUG2 WIN : bar.html = FAIL
+ BUG2 DEBUG : bar.html = PASS
+
+ lines 1 and 2 would produce an error on a Win XP Release bot (the scores
+ would be 1 and 2, respectively), and lines three and four would produce
+ a duplicate expectation on a Win Debug bot since both the 'win' and the
+ 'debug' expectations would apply (both had scores of 1).
+
+ In addition to the definitions of all of the modifiers, the class
+ supports "macros" that are expanded prior to interpretation, and "ignore
+ regexes" that can be used to skip over modifiers like the BUG* modifiers.
+ """
+ MACROS = {
+ 'mac-snowleopard': ['mac', 'snowleopard'],
+ 'mac-leopard': ['mac', 'leopard'],
+ 'win-xp': ['win', 'xp'],
+ 'win-vista': ['win', 'vista'],
+ 'win-7': ['win', 'win7'],
+ }
+
+ # We don't include the "none" modifier because it isn't actually legal.
+ REGEXES_TO_IGNORE = (['bug\w+'] +
+ TestExpectationsFile.MODIFIERS.keys()[:-1])
+ DUPLICATE_REGEXES_ALLOWED = ['bug\w+']
+
+ # Magic value returned when the options don't match.
+ NO_MATCH = -1
+
+ # FIXME: The code currently doesn't detect combinations of modifiers
+ # that are syntactically valid but semantically invalid, like
+ # 'MAC XP'. See ModifierMatchTest.test_invalid_combinations() in the
+ # _unittest.py file.
+
+ def __init__(self, test_config):
+ """Initialize a ModifierMatcher argument with the TestConfiguration it
+ should be matched against."""
+ self.test_config = test_config
+ self.allowed_configurations = test_config.all_test_configurations()
+ self.macros = self.MACROS
+
+ self.regexes_to_ignore = {}
+ for regex_str in self.REGEXES_TO_IGNORE:
+ self.regexes_to_ignore[regex_str] = re.compile(regex_str)
+
+ # Keep a set of all of the legal modifiers for quick checking.
+ self._all_modifiers = set()
+
+ # Keep a dict mapping values back to their categories.
+ self._categories_for_modifiers = {}
+ for config in self.allowed_configurations:
+ for category, modifier in config.items():
+ self._categories_for_modifiers[modifier] = category
+ self._all_modifiers.add(modifier)
+
+ def match(self, options):
+ """Checks a list of options against the config set in the constructor.
+ Options may be either actual modifier strings, "macro" strings
+ that get expanded to a list of modifiers, or strings that are allowed
+ to be ignored. All of the options must be passed in in lower case.
+
+ Returns the number of matching categories, or NO_MATCH (-1) if it
+ doesn't match or there were errors found. Matches are prioritized
+ by the number of matching categories, because the more specific
+ the options list, the more categories will match.
+
+ The results of the most recent match are available in the 'options',
+ 'modifiers', 'num_matches', 'errors', and 'warnings' properties.
+ """
+ result = ModifierMatchResult(options)
+ self._parse(result)
+ if result.errors:
+ return result
+ self._count_matches(result)
+ return result
+
+ def _parse(self, result):
+ # FIXME: Should we warn about lines having every value in a category?
+ for option in result.options:
+ self._parse_one(option, result)
+
+ def _parse_one(self, option, result):
+ if option in self._all_modifiers:
+ self._add_modifier(option, result)
+ elif option in self.macros:
+ self._expand_macro(option, result)
+ elif not self._matches_any_regex(option, result):
+ result.errors.append("Unrecognized option '%s'" % option)
+
+ def _add_modifier(self, option, result):
+ if option in result.modifiers:
+ result.errors.append("More than one '%s'" % option)
+ else:
+ result.modifiers.append(option)
+
+ def _expand_macro(self, macro, result):
+ if macro in result._matched_macros:
+ result.errors.append("More than one '%s'" % macro)
+ return
+
+ mods = []
+ for modifier in self.macros[macro]:
+ if modifier in result.options:
+ result.errors.append("Can't specify both modifier '%s' and "
+ "macro '%s'" % (modifier, macro))
+ else:
+ mods.append(modifier)
+ result._matched_macros.add(macro)
+ result.modifiers.extend(mods)
+
+ def _matches_any_regex(self, option, result):
+ for regex_str, pattern in self.regexes_to_ignore.iteritems():
+ if pattern.match(option):
+ self._handle_regex_match(regex_str, result)
+ return True
+ return False
+
+ def _handle_regex_match(self, regex_str, result):
+ if (regex_str in result._matched_regexes and
+ regex_str not in self.DUPLICATE_REGEXES_ALLOWED):
+ result.errors.append("More than one option matching '%s'" %
+ regex_str)
+ else:
+ result._matched_regexes.add(regex_str)
+
+ def _count_matches(self, result):
+ """Returns the number of modifiers that match the test config."""
+ categorized_modifiers = self._group_by_category(result.modifiers)
+ result.num_matches = 0
+ for category, modifier in self.test_config.items():
+ if category in categorized_modifiers:
+ if modifier in categorized_modifiers[category]:
+ result.num_matches += 1
+ else:
+ result.num_matches = self.NO_MATCH
+ return
+
+ def _group_by_category(self, modifiers):
+ # Returns a dict of category name -> list of modifiers.
+ modifiers_by_category = {}
+ for m in modifiers:
+ modifiers_by_category.setdefault(self._category(m), []).append(m)
+ return modifiers_by_category
+
+ def _category(self, modifier):
+ return self._categories_for_modifiers[modifier]
import unittest
from webkitpy.layout_tests import port
+from webkitpy.layout_tests.port import base
from webkitpy.layout_tests.layout_package.test_expectations import *
class FunctionsTest(unittest.TestCase):
class Base(unittest.TestCase):
+ # Note that all of these tests are written assuming the configuration
+ # being tested is Mac Leopard, Release build.
+
def __init__(self, testFunc, setUp=None, tearDown=None, description=None):
self._port = port.get('test', None)
self._exp = None
BUG_TEST WONTFIX WIN : failures/expected/image.html = IMAGE
"""
- def parse_exp(self, expectations, overrides=None, is_lint_mode=False,
- is_debug_mode=False):
+ def parse_exp(self, expectations, overrides=None, is_lint_mode=False):
+ test_config = self._port.test_configuration()
self._exp = TestExpectations(self._port,
tests=self.get_basic_tests(),
expectations=expectations,
- test_platform_name=self._port.test_platform_name(),
- is_debug_mode=is_debug_mode,
+ test_config=test_config,
is_lint_mode=is_lint_mode,
overrides=overrides)
set([result]))
-class TestExpectationsTest(Base):
+class BasicTests(Base):
def test_basic(self):
self.parse_exp(self.get_basic_expectations())
self.assert_exp('failures/expected/text.html', TEXT)
self.assert_exp('passes/text.html', PASS)
self.assert_exp('failures/expected/image.html', PASS)
+
+class MiscTests(Base):
def test_multiple_results(self):
self.parse_exp('BUGX : failures/expected/text.html = TEXT CRASH')
self.assertEqual(self._exp.get_expectations(
self.get_test('failures/expected/text.html')),
set([TEXT, CRASH]))
- def test_precedence(self):
- # This tests handling precedence of specific lines over directories
- # and tests expectations covering entire directories.
- exp_str = """
-BUGX : failures/expected/text.html = TEXT
-BUGX WONTFIX : failures/expected = IMAGE
-"""
- self.parse_exp(exp_str)
- self.assert_exp('failures/expected/text.html', TEXT)
- self.assert_exp('failures/expected/crash.html', IMAGE)
-
def test_category_expectations(self):
# This test checks unknown tests are not present in the
# expectations and that known test part of a test category is
unknown_test)
self.assert_exp('failures/expected/crash.html', IMAGE)
- def test_release_mode(self):
- self.parse_exp('BUGX DEBUG : failures/expected/text.html = TEXT',
- is_debug_mode=True)
- self.assert_exp('failures/expected/text.html', TEXT)
- self.parse_exp('BUGX RELEASE : failures/expected/text.html = TEXT',
- is_debug_mode=True)
- self.assert_exp('failures/expected/text.html', PASS)
- self.parse_exp('BUGX DEBUG : failures/expected/text.html = TEXT',
- is_debug_mode=False)
- self.assert_exp('failures/expected/text.html', PASS)
- self.parse_exp('BUGX RELEASE : failures/expected/text.html = TEXT',
- is_debug_mode=False)
- self.assert_exp('failures/expected/text.html', TEXT)
-
def test_get_options(self):
self.parse_exp(self.get_basic_expectations())
self.assertEqual(self._exp.get_options(
self.assertFalse(True, "ParseError wasn't raised")
except ParseError, e:
self.assertTrue(e.fatal)
- exp_errors = [u'Line:1 Invalid modifier for test: foo failures/expected/text.html',
+ exp_errors = [u"Line:1 Unrecognized option 'foo' failures/expected/text.html",
u"Line:2 Missing expectations. [' failures/expected/image.html']"]
self.assertEqual(str(e), '\n'.join(map(str, exp_errors)))
self.assertEqual(e.errors, exp_errors)
self.assertEqual(str(e), '\n'.join(map(str, exp_errors)))
self.assertEqual(e.errors, exp_errors)
- def test_syntax_missing_expectation(self):
+ def test_overrides(self):
+ self.parse_exp("BUG_EXP: failures/expected/text.html = TEXT",
+ "BUG_OVERRIDE : failures/expected/text.html = IMAGE")
+ self.assert_exp('failures/expected/text.html', IMAGE)
+
+ def test_overrides__duplicate(self):
+ self.assertRaises(ParseError, self.parse_exp,
+ "BUG_EXP: failures/expected/text.html = TEXT",
+ """
+BUG_OVERRIDE : failures/expected/text.html = IMAGE
+BUG_OVERRIDE : failures/expected/text.html = CRASH
+""")
+
+ def test_pixel_tests_flag(self):
+ def match(test, result, pixel_tests_enabled):
+ return self._exp.matches_an_expected_result(
+ self.get_test(test), result, pixel_tests_enabled)
+
+ self.parse_exp(self.get_basic_expectations())
+ self.assertTrue(match('failures/expected/text.html', TEXT, True))
+ self.assertTrue(match('failures/expected/text.html', TEXT, False))
+ self.assertFalse(match('failures/expected/text.html', CRASH, True))
+ self.assertFalse(match('failures/expected/text.html', CRASH, False))
+ self.assertTrue(match('failures/expected/image_checksum.html', IMAGE,
+ True))
+ self.assertTrue(match('failures/expected/image_checksum.html', PASS,
+ False))
+ self.assertTrue(match('failures/expected/crash.html', SKIP, False))
+ self.assertTrue(match('passes/text.html', PASS, False))
+
+
+class ExpectationSyntaxTests(Base):
+ def test_missing_expectation(self):
# This is missing the expectation.
self.assertRaises(ParseError, self.parse_exp,
- 'BUG_TEST: failures/expected/text.html',
- is_debug_mode=True)
+ 'BUG_TEST: failures/expected/text.html')
- def test_syntax_invalid_option(self):
+ def test_missing_colon(self):
+ # This is missing the modifiers and the ':'
self.assertRaises(ParseError, self.parse_exp,
- 'BUG_TEST FOO: failures/expected/text.html = PASS')
+ 'failures/expected/text.html = TEXT')
- def test_syntax_invalid_expectation(self):
- # This is missing the expectation.
+ def disabled_test_too_many_colons(self):
+ # FIXME: Enable this test and fix the underlying bug.
+ self.assertRaises(ParseError, self.parse_exp,
+ 'BUG_TEST: failures/expected/text.html = PASS :')
+
+ def test_too_many_equals_signs(self):
self.assertRaises(ParseError, self.parse_exp,
- 'BUG_TEST: failures/expected/text.html = FOO')
+ 'BUG_TEST: failures/expected/text.html = TEXT = IMAGE')
+
+ def test_unrecognized_expectation(self):
+ self.assertRaises(ParseError, self.parse_exp,
+ 'BUG_TEST: failures/expected/text.html = UNKNOWN')
+
+ def test_macro(self):
+ exp_str = """
+BUG_TEST MAC-LEOPARD : failures/expected/text.html = TEXT
+"""
+ self.parse_exp(exp_str)
+ self.assert_exp('failures/expected/text.html', TEXT)
+
+
+class SemanticTests(Base):
+ def test_bug_format(self):
+ self.assertRaises(ParseError, self.parse_exp, 'BUG1234 : failures/expected/text.html = TEXT')
- def test_syntax_missing_bugid(self):
+ def test_missing_bugid(self):
# This should log a non-fatal error.
self.parse_exp('SLOW : failures/expected/text.html = TEXT')
self.assertEqual(
len(self._exp._expected_failures.get_non_fatal_errors()), 1)
- def test_semantic_slow_and_timeout(self):
+ def test_slow_and_timeout(self):
# A test cannot be SLOW and expected to TIMEOUT.
self.assertRaises(ParseError, self.parse_exp,
'BUG_TEST SLOW : failures/expected/timeout.html = TIMEOUT')
- def test_semantic_rebaseline(self):
+ def test_rebaseline(self):
# Can't lint a file w/ 'REBASELINE' in it.
self.assertRaises(ParseError, self.parse_exp,
'BUG_TEST REBASELINE : failures/expected/text.html = TEXT',
is_lint_mode=True)
- def test_semantic_duplicates(self):
+ def test_duplicates(self):
self.assertRaises(ParseError, self.parse_exp, """
-BUG_TEST : failures/expected/text.html = TEXT
-BUG_TEST : failures/expected/text.html = IMAGE""")
+BUG_EXP : failures/expected/text.html = TEXT
+BUG_EXP : failures/expected/text.html = IMAGE""")
self.assertRaises(ParseError, self.parse_exp,
- self.get_basic_expectations(), """
-BUG_TEST : failures/expected/text.html = TEXT
-BUG_TEST : failures/expected/text.html = IMAGE""")
+ self.get_basic_expectations(), overrides="""
+BUG_OVERRIDE : failures/expected/text.html = TEXT
+BUG_OVERRIDE : failures/expected/text.html = IMAGE""", )
- def test_semantic_missing_file(self):
+ def test_missing_file(self):
# This should log a non-fatal error.
self.parse_exp('BUG_TEST : missing_file.html = TEXT')
self.assertEqual(
len(self._exp._expected_failures.get_non_fatal_errors()), 1)
- def test_overrides(self):
- self.parse_exp(self.get_basic_expectations(), """
-BUG_OVERRIDE : failures/expected/text.html = IMAGE""")
- self.assert_exp('failures/expected/text.html', IMAGE)
+class PrecedenceTests(Base):
+ def test_file_over_directory(self):
+ # This tests handling precedence of specific lines over directories
+ # and tests expectations covering entire directories.
+ exp_str = """
+BUGX : failures/expected/text.html = TEXT
+BUGX WONTFIX : failures/expected = IMAGE
+"""
+ self.parse_exp(exp_str)
+ self.assert_exp('failures/expected/text.html', TEXT)
+ self.assert_exp('failures/expected/crash.html', IMAGE)
- def test_matches_an_expected_result(self):
+ exp_str = """
+BUGX WONTFIX : failures/expected = IMAGE
+BUGX : failures/expected/text.html = TEXT
+"""
+ self.parse_exp(exp_str)
+ self.assert_exp('failures/expected/text.html', TEXT)
+ self.assert_exp('failures/expected/crash.html', IMAGE)
- def match(test, result, pixel_tests_enabled):
- return self._exp.matches_an_expected_result(
- self.get_test(test), result, pixel_tests_enabled)
+ def test_ambiguous(self):
+ self.assertRaises(ParseError, self.parse_exp, """
+BUG_TEST RELEASE : passes/text.html = PASS
+BUG_TEST MAC : passes/text.html = FAIL
+""")
- self.parse_exp(self.get_basic_expectations())
- self.assertTrue(match('failures/expected/text.html', TEXT, True))
- self.assertTrue(match('failures/expected/text.html', TEXT, False))
- self.assertFalse(match('failures/expected/text.html', CRASH, True))
- self.assertFalse(match('failures/expected/text.html', CRASH, False))
- self.assertTrue(match('failures/expected/image_checksum.html', IMAGE,
- True))
- self.assertTrue(match('failures/expected/image_checksum.html', PASS,
- False))
- self.assertTrue(match('failures/expected/crash.html', SKIP, False))
- self.assertTrue(match('passes/text.html', PASS, False))
+ def test_more_modifiers(self):
+ exp_str = """
+BUG_TEST RELEASE : passes/text.html = PASS
+BUG_TEST MAC RELEASE : passes/text.html = TEXT
+"""
+ self.assertRaises(ParseError, self.parse_exp, exp_str)
+
+ def test_order_in_file(self):
+ exp_str = """
+BUG_TEST MAC RELEASE : passes/text.html = TEXT
+BUG_TEST RELEASE : passes/text.html = PASS
+"""
+ self.assertRaises(ParseError, self.parse_exp, exp_str)
+
+ def test_version_overrides(self):
+ exp_str = """
+BUG_TEST MAC : passes/text.html = PASS
+BUG_TEST MAC LEOPARD : passes/text.html = TEXT
+"""
+ self.assertRaises(ParseError, self.parse_exp, exp_str)
+
+ def test_macro_overrides(self):
+ exp_str = """
+BUG_TEST MAC : passes/text.html = PASS
+BUG_TEST MAC-LEOPARD : passes/text.html = TEXT
+"""
+ self.assertRaises(ParseError, self.parse_exp, exp_str)
class RebaseliningTest(Base):
'\n\n')
+class ModifierTests(unittest.TestCase):
+ def setUp(self):
+ port_obj = port.get('test', None)
+ self.config = port_obj.test_configuration()
+ self.matcher = ModifierMatcher(self.config)
+
+ def match(self, modifiers, expected_num_matches=-1, values=None, num_errors=0):
+ matcher = self.matcher
+ if values:
+ matcher = ModifierMatcher(self.FakeTestConfiguration(values))
+ match_result = matcher.match(modifiers)
+ self.assertEqual(len(match_result.warnings), 0)
+ self.assertEqual(len(match_result.errors), num_errors)
+ self.assertEqual(match_result.num_matches, expected_num_matches,
+ 'match(%s, %s) returned -> %d, expected %d' %
+ (modifiers, str(self.config.values()),
+ match_result.num_matches, expected_num_matches))
+
+ def test_bad_match_modifier(self):
+ self.match(['foo'], num_errors=1)
+
+ def test_none(self):
+ self.match([], 0)
+
+ def test_one(self):
+ self.match(['leopard'], 1)
+ self.match(['mac'], 1)
+ self.match(['release'], 1)
+ self.match(['cpu'], 1)
+ self.match(['x86'], 1)
+ self.match(['xp'], -1)
+ self.match(['gpu'], -1)
+ self.match(['debug'], -1)
+
+ def test_two(self):
+ self.match(['leopard', 'release'], 2)
+ self.match(['xp', 'release'], -1)
+ self.match(['xp', 'release'], -1)
+ self.match(['snowleopard', 'leopard'], 1)
+
+ def test_three(self):
+ self.match(['snowleopard', 'leopard', 'release'], 2)
+ self.match(['leopard', 'debug', 'x86'], -1)
+ self.match(['leopard', 'release', 'x86'], 3)
+ self.match(['leopard', 'cpu', 'release'], 3)
+
+ def test_four(self):
+ self.match(['leopard', 'release', 'cpu', 'x86'], 4)
+ self.match(['snowleopard', 'leopard', 'release', 'cpu'], 3)
+ self.match(['snowleopard', 'leopard', 'debug', 'cpu'], -1)
+
+ def test_case_insensitivity(self):
+ self.match(['Mac'], num_errors=1)
+ self.match(['MAC'], num_errors=1)
+ self.match(['mac'], 1)
+
+ def test_duplicates(self):
+ self.match(['release', 'release'], num_errors=1)
+ self.match(['mac-leopard', 'leopard'], num_errors=1)
+ self.match(['mac-leopard', 'mac-leopard'], num_errors=1)
+ self.match(['xp', 'release', 'xp', 'release'], num_errors=2)
+ self.match(['rebaseline', 'rebaseline'], num_errors=1)
+
+ def test_unknown_option(self):
+ self.match(['vms'], num_errors=1)
+
+ def test_duplicate_bugs(self):
+ # BUG* regexes can appear multiple times.
+ self.match(['bugfoo', 'bugbar'], 0)
+
+ def test_invalid_combinations(self):
+ # FIXME: This should probably raise an error instead of NO_MATCH.
+ self.match(['mac', 'xp'], num_errors=0)
+
+ def test_regexes_are_ignored(self):
+ self.match(['bug123xy', 'rebaseline', 'wontfix', 'slow', 'skip'], 0)
+
+ def test_none_is_invalid(self):
+ self.match(['none'], num_errors=1)
+
+
if __name__ == '__main__':
unittest.main()
def lint(self):
lint_failed = False
-
- # Creating the expecations for each platform/configuration pair does
- # all the test list parsing and ensures it's correct syntax (e.g. no
- # dupes).
- for platform_name in self._port.test_platform_names():
- try:
- self.parse_expectations(platform_name, is_debug_mode=True)
- except test_expectations.ParseError:
- lint_failed = True
+ for test_configuration in self._port.all_test_configurations():
try:
- self.parse_expectations(platform_name, is_debug_mode=False)
+ self.lint_expectations(test_configuration)
except test_expectations.ParseError:
lint_failed = True
+ self._printer.write("")
- self._printer.write("")
if lint_failed:
_log.error("Lint failed.")
return -1
_log.info("Lint succeeded.")
return 0
- def parse_expectations(self, test_platform_name, is_debug_mode):
+ def lint_expectations(self, config):
+ port = self._port
+ test_expectations.TestExpectations(
+ port,
+ None,
+ port.test_expectations(),
+ config,
+ self._options.lint_test_files,
+ port.test_expectations_overrides())
+
+ def parse_expectations(self):
"""Parse the expectations from the test_list files and return a data
structure holding them. Throws an error if the test_list files have
invalid syntax."""
- if self._options.lint_test_files:
- test_files = None
- else:
- test_files = self._test_files
-
- expectations_str = self._port.test_expectations()
- overrides_str = self._port.test_expectations_overrides()
+ port = self._port
self._expectations = test_expectations.TestExpectations(
- self._port, test_files, expectations_str, test_platform_name,
- is_debug_mode, self._options.lint_test_files,
- overrides=overrides_str)
- return self._expectations
+ port,
+ self._test_files,
+ port.test_expectations(),
+ port.test_configuration(),
+ self._options.lint_test_files,
+ port.test_expectations_overrides())
# FIXME: This method is way too long and needs to be broken into pieces.
def prepare_lists_and_print_output(self):
self._test_files_list = files + skip_chunk_list
self._test_files = set(self._test_files_list)
- self._expectations = self.parse_expectations(
- self._port.test_platform_name(),
- self._options.configuration == 'Debug')
+ self.parse_expectations()
self._test_files = set(files)
self._test_files_list = files
self.set_option_default('configuration', None)
if self._options.configuration is None:
self._options.configuration = self.default_configuration()
+ self._test_configuration = None
def default_child_processes(self):
"""Return the number of DumpRenderTree instances to use for this
if self._http_lock:
self._http_lock.cleanup_http_lock()
+ #
+ # TEST EXPECTATION-RELATED METHODS
+ #
+
+ def test_configuration(self):
+ """Returns the current TestConfiguration for the port."""
+ if not self._test_configuration:
+ self._test_configuration = TestConfiguration(self)
+ return self._test_configuration
+
+ def all_test_configurations(self):
+ return self.test_configuration().all_test_configurations()
+
def test_expectations(self):
"""Returns the test expectations for this port.
def stop(self):
raise NotImplementedError('Driver.stop')
+
+
+class TestConfiguration(object):
+ def __init__(self, port=None, os=None, version=None, architecture=None,
+ build_type=None, graphics_type=None):
+
+ # FIXME: We can get the O/S and version from test_platform_name()
+ # and version() for now, but those should go away and be cleaned up
+ # with more generic methods like operation_system() and os_version()
+ # or something. Note that we need to strip the leading '-' off the
+ # version string if it is present.
+ if port:
+ port_version = port.version()
+ self.os = os or port.test_platform_name().replace(port_version, '')
+ self.version = version or port_version[1:]
+ self.architecture = architecture or 'x86'
+ self.build_type = build_type or port._options.configuration.lower()
+ self.graphics_type = graphics_type or 'cpu'
+
+ def items(self):
+ return self.__dict__.items()
+
+ def keys(self):
+ return self.__dict__.keys()
+
+ def __str__(self):
+ return ("<%(version)s, %(build_type)s, %(graphics_type)s>" %
+ self.__dict__)
+
+ def __repr__(self):
+ return "TestConfig(os='%(os)s', version='%(version)s', architecture='%(architecture)s', build_type='%(build_type)s', graphics_type='%(graphics_type)s')" % self.__dict__
+
+ def values(self):
+ """Returns the configuration values of this instance as a tuple."""
+ return self.__dict__.values()
+
+ def all_test_configurations(self):
+ """Returns a sequence of the TestConfigurations the port supports."""
+ # By default, we assume we want to test every graphics type in
+ # every configuration on every system.
+ test_configurations = []
+ for system in self.all_systems():
+ for build_type in self.all_build_types():
+ for graphics_type in self.all_graphics_types():
+ test_configurations.append(TestConfiguration(
+ os=system[0],
+ version=system[1],
+ architecture=system[2],
+ build_type=build_type,
+ graphics_type=graphics_type))
+ return test_configurations
+
+ def all_systems(self):
+ return (('mac', 'leopard', 'x86'),
+ ('mac', 'snowleopard', 'x86'),
+ ('win', 'xp', 'x86'),
+ ('win', 'vista', 'x86'),
+ ('win', 'win7', 'x86'),
+ ('linux', 'hardy', 'x86'))
+
+ def all_build_types(self):
+ return ('debug', 'release')
+
+ def all_graphics_types(self):
+ return ('cpu', 'gpu')
all_test_files.update(extra_test_files)
expectations = test_expectations.TestExpectations(
- self, all_test_files, expectations_str, test_platform_name,
- is_debug_mode, is_lint_mode=False, overrides=overrides_str)
+ self, all_test_files, expectations_str, self.test_configuration(),
+ is_lint_mode=False, overrides=overrides_str)
tests_dir = self.layout_tests_dir()
return [self.relative_test_filename(test)
for test in expectations.get_tests_with_result_type(test_expectations.SKIP)]
return
port.start_websocket_server()
port.stop_websocket_server()
+
+ def test_test_configuration(self):
+ port = self.make_port()
+ if not port:
+ return
+ self.assertTrue(port.test_configuration())
+
+ def test_all_test_configurations(self):
+ port = self.make_port()
+ if not port:
+ return
+ self.assertTrue(len(port.all_test_configurations()) > 0)
def check_build(self, needs_http):
return True
+ def default_configuration(self):
+ return 'Release'
+
def diff_image(self, expected_contents, actual_contents,
diff_filename=None):
diffed = actual_contents != expected_contents
return test_platform_name
def version(self):
- return ''
+ return '-leopard'
class TestDriver(base.Driver):
test_expectations.TestExpectations(self._rebaseline_port,
None,
expectations_str,
- self._platform,
- False,
+ self._target_port.test_configuration(),
False)
self._scm = scm.default_scm()
return -1
raise
- printer.print_update("Parsing expectations ...")
if options.lint_test_files:
return runner.lint()
- runner.parse_expectations(port.test_platform_name(),
- options.configuration == 'Debug')
+
+ printer.print_update("Parsing expectations ...")
+ runner.parse_expectations()
printer.print_update("Checking build ...")
if not port.check_build(runner.needs_http()):
"Using 'test' port, but platform-specific expectations "
"will fail the check." % self._file_path)
self._port_obj = port.get('test')
- self._port_to_check = self._port_obj.test_platform_name()
# Suppress error messages of test_expectations module since they will be
# reported later.
log = logging.getLogger("webkitpy.layout_tests.layout_package."
try:
expectations = test_expectations.TestExpectationsFile(
port=self._port_obj, expectations=expectations_str, full_test_list=tests,
- test_platform_name=self._port_to_check, is_debug_mode=False,
+ test_config=self._port_obj.test_configuration(),
is_lint_mode=True, overrides=overrides)
except test_expectations.ParseError, error:
err = error
self.assertEqual(expected, self._error_collector.get_errors())
def test_valid_expectations(self):
- self.assert_lines_lint(
- ["passes/text.html = PASS"],
- "")
- self.assert_lines_lint(
- ["passes/text.html = FAIL PASS"],
- "")
- self.assert_lines_lint(
- ["passes/text.html = CRASH TIMEOUT FAIL PASS"],
- "")
self.assert_lines_lint(
["BUGCR1234 MAC : passes/text.html = PASS FAIL"],
"")
def test_modifier_errors(self):
self.assert_lines_lint(
["BUG1234 : passes/text.html = FAIL"],
- 'Bug must be either BUGCR, BUGWK, or BUGV8_ for test: bug1234 passes/text.html [test/expectations] [5]')
+ "BUG\\d+ is not allowed, must be one of BUGCR\\d+, BUGWK\\d+, BUGV8_\\d+, or a non-numeric bug identifier. passes/text.html [test/expectations] [5]")
def test_valid_modifiers(self):
self.assert_lines_lint(
["INVALID-MODIFIER : passes/text.html = PASS"],
- "Invalid modifier for test: invalid-modifier "
+ "Unrecognized option 'invalid-modifier' "
"passes/text.html [test/expectations] [5]")
self.assert_lines_lint(
["SKIP : passes/text.html = PASS"],
def test_expectation_errors(self):
self.assert_lines_lint(
["missing expectations"],
- "Missing expectations. ['missing expectations'] [test/expectations] [5]")
+ "Missing a ':' missing expectations [test/expectations] [5]")
self.assert_lines_lint(
["SLOW : passes/text.html = TIMEOUT"],
"A test can not be both slow and timeout. "
"If it times out indefinitely, then it should be just timeout. "
"passes/text.html [test/expectations] [5]")
self.assert_lines_lint(
- ["does/not/exist.html = FAIL"],
+ ["BUGWK1 : does/not/exist.html = FAIL"],
"Path does not exist. does/not/exist.html [test/expectations] [2]")
def test_parse_expectations(self):
self.assert_lines_lint(
- ["passes/text.html = PASS"],
+ ["BUGWK1 : passes/text.html = PASS"],
"")
self.assert_lines_lint(
- ["passes/text.html = UNSUPPORTED"],
+ ["BUGWK1 : passes/text.html = UNSUPPORTED"],
"Unsupported expectation: unsupported "
"passes/text.html [test/expectations] [5]")
self.assert_lines_lint(
- ["passes/text.html = PASS UNSUPPORTED"],
+ ["BUGWK1 : passes/text.html = PASS UNSUPPORTED"],
"Unsupported expectation: unsupported "
"passes/text.html [test/expectations] [5]")
def test_already_seen_test(self):
self.assert_lines_lint(
- ["passes/text.html = PASS",
- "passes/text.html = TIMEOUT"],
- "Duplicate expectation. %s [test/expectations] [5]" % self._test_file)
+ ["BUGWK1 : passes/text.html = PASS",
+ "BUGWK1 : passes/text.html = TIMEOUT"],
+ "Duplicate or ambiguous expectation. %s [test/expectations] [5]" % self._test_file)
def test_tab(self):
self.assert_lines_lint(
- ["\tpasses/text.html = PASS"],
+ ["\tBUGWK1 : passes/text.html = PASS"],
"Line contains tab character. [whitespace/tab] [5]")
if __name__ == '__main__':