+2010-05-18 Dirk Pranke <dpranke@chromium.org>
+
+ Reviewed by Ojan Vafai.
+
+ new-run-webkit-tests: implement '--reset-results' flag to complement
+ the '--new-baseline' flag. '--new-baseline' will always write the
+ results into the platform directory; '--reset-results' will update the
+ existing baseline wherever it happens to be. Both sets of behavior
+ are useful in different circumstances.
+
+ https://bugs.webkit.org/show_bug.cgi?id=38879
+
+ * Scripts/webkitpy/layout_tests/data/image/canvas-bg.html: Added.
+ * Scripts/webkitpy/layout_tests/data/image/canvas-zoom-expected.checksum: Added.
+ * Scripts/webkitpy/layout_tests/data/image/canvas-zoom-expected.png: Added.
+ * Scripts/webkitpy/layout_tests/data/image/canvas-zoom-expected.txt: Added.
+ * Scripts/webkitpy/layout_tests/data/image/canvas-zoom.html: Added.
+ * Scripts/webkitpy/layout_tests/data/misc/crash-expected.txt: Added.
+ * Scripts/webkitpy/layout_tests/data/misc/crash.html: Added.
+ * Scripts/webkitpy/layout_tests/data/misc/missing-expectation.html: Added.
+ * Scripts/webkitpy/layout_tests/data/misc/passing-expected.txt: Added.
+ * Scripts/webkitpy/layout_tests/data/misc/passing.html: Added.
+ * Scripts/webkitpy/layout_tests/data/platform/test/image/canvas-bg-expected.checksum: Added.
+ * Scripts/webkitpy/layout_tests/data/platform/test/image/canvas-bg-expected.png: Added.
+ * Scripts/webkitpy/layout_tests/data/platform/test/image/canvas-bg-expected.txt: Added.
+ * Scripts/webkitpy/layout_tests/data/platform/test/test_expectations.txt: Added.
+ * Scripts/webkitpy/layout_tests/data/text/article-element-expected.txt: Added.
+ * Scripts/webkitpy/layout_tests/data/text/article-element.html: Added.
+ * Scripts/webkitpy/layout_tests/layout_package/test_expectations_unittest.py:
+ * Scripts/webkitpy/layout_tests/port/test.py:
+ * Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py:
+ * Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py:
+ * Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py:
+ * Scripts/webkitpy/layout_tests/run_webkit_tests.py:
+ * Scripts/webkitpy/layout_tests/test_types/image_diff.py:
+ * Scripts/webkitpy/layout_tests/test_types/test_type_base.py:
+ * Scripts/webkitpy/layout_tests/test_types/text_diff.py:
+ * Scripts/webkitpy/layout_tests/port/test.py:
+ * Scripts/webkitpy/layout_tests/port/dryrun.py:
+
2010-05-18 Eric Seidel <eric@webkit.org>
Reviewed by Adam Roben.
--- /dev/null
+<html>
+ <head>
+ <style>
+ div { background: -webkit-canvas(squares); width:600px; height:600px; border:2px solid black }
+ </style>
+
+ <script type="application/x-javascript">
+function draw(w, h) {
+ var ctx = document.getCSSCanvasContext("2d", "squares", w, h);
+
+ ctx.fillStyle = "rgb(200,0,0)";
+ ctx.fillRect (10, 10, 55, 50);
+
+ ctx.fillStyle = "rgba(0, 0, 200, 0.5)";
+ ctx.fillRect (30, 30, 55, 50);
+}
+ </script>
+ </head>
+ <body onload="draw(300, 300)">
+ <div></div>
+ </body>
+</html>
\ No newline at end of file
--- /dev/null
+afa0f2d246120c180005d67d47636b92
\ No newline at end of file
--- /dev/null
+layer at (0,0) size 800x600
+ RenderView at (0,0) size 800x600
+layer at (0,0) size 800x600
+ RenderBlock {HTML} at (0,0) size 800x600
+ RenderBody {BODY} at (8,8) size 784x584
+ RenderBlock {P} at (0,0) size 784x18
+ RenderText {#text} at (0,0) size 624x18
+ text run at (0,0) width 624: "These should be four green hollow boxes with dimensions 600x300, 100x300, 600x100, 100x100."
+ RenderBlock (anonymous) at (0,34) size 784x420
+ RenderHTMLCanvas {CANVAS} at (0,0) size 606x306 [border: (3px solid #008000)]
+ RenderText {#text} at (606,292) size 4x18
+ text run at (606,292) width 4: " "
+ RenderText {#text} at (0,0) size 0x0
+ RenderHTMLCanvas {CANVAS} at (610,0) size 106x306 [border: (3px solid #008000)]
+ RenderText {#text} at (0,0) size 0x0
+ RenderText {#text} at (0,0) size 0x0
+ RenderHTMLCanvas {CANVAS} at (0,310) size 606x106 [border: (3px solid #008000)]
+ RenderText {#text} at (606,402) size 4x18
+ text run at (606,402) width 4: " "
+ RenderText {#text} at (0,0) size 0x0
+ RenderHTMLCanvas {CANVAS} at (610,310) size 106x106 [border: (3px solid #008000)]
+ RenderText {#text} at (0,0) size 0x0
--- /dev/null
+<style>
+ canvas { border: solid green;
+ zoom: 2; }
+</style>
+<p>
+ These should be four green hollow boxes with dimensions 600x300, 100x300, 600x100, 100x100.
+</p>
+<!-- 300x150 -->
+<canvas id="canvas"></canvas>
+<!-- 50x150 -->
+<canvas id="canvas" width="50"></canvas>
+<!-- 300x50 -->
+<canvas id="canvas" height="50"></canvas>
+<!-- 50x50 -->
+<canvas id="canvas" width="50" height="50"></canvas>
--- /dev/null
+This test is expected to crash.
--- /dev/null
+<html>
+<body>
+This test is expected to crash.
+</body>
+</html>
--- /dev/null
+<html>
+<body>
+This test intentionally doesn't have an expected result checked in.
+</body>
+</html>
--- /dev/null
+This test is expected to pass.
--- /dev/null
+<html>
+<body>
+This test is expected to pass.
+</body>
+</html>
--- /dev/null
+790b681a41697634fcf2a2587afb89c6
\ No newline at end of file
--- /dev/null
+layer at (0,0) size 785x620
+ RenderView at (0,0) size 785x600
+layer at (0,0) size 785x620
+ RenderBlock {HTML} at (0,0) size 785x620
+ RenderBody {BODY} at (8,8) size 769x604
+ RenderBlock {DIV} at (0,0) size 604x604 [border: (2px solid #000000)]
--- /dev/null
+WONTFIX : misc/missing-expectation.html = MISSING PASS
--- /dev/null
+Various tests for the article element.
+
+On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
+
+
+<article> closes <p>:
+PASS article1.parentNode.nodeName == "p" is false
+<p> does not close <article>:
+PASS p1.parentNode.nodeName is "ARTICLE"
+<article> can be nested inside <article>:
+PASS article3.parentNode.id is "article2"
+Residual style:
+PASS getWeight("article4") is "bold"
+PASS getWeight("span1") is "bold"
+FormatBlock:
+PASS document.getElementById("span2").parentNode.nodeName is "ARTICLE"
+PASS successfullyParsed is true
+
+TEST COMPLETE
+
--- /dev/null
+<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML//EN">
+<html>
+<head>
+<link rel="stylesheet" href="../../fast/js/resources/js-test-style.css">
+<script src="../../fast/js/resources/js-test-pre.js"></script>
+</head>
+<body>
+<p id="description"></p>
+<div id="console"></div>
+<script src="script-tests/article-element.js"></script>
+<script src="../../fast/js/resources/js-test-post.js"></script>
+</body>
+</html>
# are generating a new baseline. (Otherwise, an image from a
# previous run will be copied into the baseline.)
image_hash = test_info.image_hash()
- if image_hash and self._test_args.new_baseline:
+ if (image_hash and
+ (self._test_args.new_baseline or self._test_args.reset_results)):
image_hash = ""
start = time.time()
crash, timeout, actual_checksum, output, error = \
def test_print_test_result(self):
result = get_result('foo.html')
printer, err, out = self.get_printer(['--print', 'nothing'])
+ result = get_result(os.path.join(self._port.layout_tests_dir(),
+ 'foo.html'))
printer.print_test_result(result, expected=False, exp_str='',
got_str='')
self.assertTrue(err.empty())
return os.path.join(self._port.layout_tests_dir(), test_name)
def get_basic_tests(self):
- return [self.get_test('fast/html/article-element.html'),
- self.get_test('fast/html/header-element.html'),
- self.get_test('fast/html/keygen.html'),
- self.get_test('fast/html/tab-order.html'),
- self.get_test('fast/events/space-scroll-event.html'),
- self.get_test('fast/events/tab-imagemap.html')]
+ return [self.get_test('text/article-element.html'),
+ self.get_test('image/canvas-bg.html'),
+ self.get_test('image/canvas-zoom.html'),
+ self.get_test('misc/crash.html'),
+ self.get_test('misc/passing.html')]
def get_basic_expectations(self):
return """
-BUG_TEST : fast/html/article-element.html = TEXT
-BUG_TEST SKIP : fast/html/keygen.html = CRASH
-BUG_TEST REBASELINE : fast/htmltab-order.html = MISSING
-BUG_TEST : fast/events = IMAGE
+BUG_TEST : text/article-element.html = TEXT
+BUG_TEST SKIP : misc/crash.html = CRASH
+BUG_TEST REBASELINE : misc/missing-expectation.html = MISSING
+BUG_TEST : image = IMAGE
"""
def parse_exp(self, expectations, overrides=None):
set([result]))
def test_basic(self):
- self.parse_exp(self.get_basic_expectations())
- self.assert_exp('fast/html/article-element.html', TEXT)
- self.assert_exp('fast/events/tab-imagemap.html', IMAGE)
- self.assert_exp('fast/html/header-element.html', PASS)
+ self.parse_exp(self.get_basic_expectations())
+ self.assert_exp('text/article-element.html', TEXT)
+ self.assert_exp('image/canvas-zoom.html', IMAGE)
+ self.assert_exp('misc/passing.html', PASS)
def test_duplicates(self):
- self.assertRaises(SyntaxError, self.parse_exp, """
-BUG_TEST : fast/html/article-element.html = TEXT
-BUG_TEST : fast/html/article-element.html = IMAGE""")
- self.assertRaises(SyntaxError, self.parse_exp,
- self.get_basic_expectations(), """
-BUG_TEST : fast/html/article-element.html = TEXT
-BUG_TEST : fast/html/article-element.html = IMAGE""")
+ self.assertRaises(SyntaxError, self.parse_exp, """
+BUG_TEST : text/article-element.html = TEXT
+BUG_TEST : text/article-element.html = IMAGE""")
+ self.assertRaises(SyntaxError, self.parse_exp,
+ self.get_basic_expectations(), """
+BUG_TEST : text/article-element.html = TEXT
+BUG_TEST : text/article-element.html = IMAGE""")
def test_overrides(self):
- self.parse_exp(self.get_basic_expectations(), """
-BUG_OVERRIDE : fast/html/article-element.html = IMAGE""")
- self.assert_exp('fast/html/article-element.html', IMAGE)
+ self.parse_exp(self.get_basic_expectations(), """
+BUG_OVERRIDE : text/article-element.html = IMAGE""")
+ self.assert_exp('text/article-element.html', IMAGE)
def test_matches_an_expected_result(self):
- def match(test, result, pixel_tests_enabled):
- return self._exp.matches_an_expected_result(
- self.get_test(test), result, pixel_tests_enabled)
+ def match(test, result, pixel_tests_enabled):
+ return self._exp.matches_an_expected_result(
+ self.get_test(test), result, pixel_tests_enabled)
- self.parse_exp(self.get_basic_expectations())
- self.assertTrue(match('fast/html/article-element.html', TEXT, True))
- self.assertTrue(match('fast/html/article-element.html', TEXT, False))
- self.assertFalse(match('fast/html/article-element.html', CRASH, True))
- self.assertFalse(match('fast/html/article-element.html', CRASH, False))
+ self.parse_exp(self.get_basic_expectations())
+ self.assertTrue(match('text/article-element.html', TEXT, True))
+ self.assertTrue(match('text/article-element.html', TEXT, False))
+ self.assertFalse(match('text/article-element.html', CRASH, True))
+ self.assertFalse(match('text/article-element.html', CRASH, False))
- self.assertTrue(match('fast/events/tab-imagemap.html', IMAGE, True))
- self.assertTrue(match('fast/events/tab-imagemap.html', PASS, False))
+ self.assertTrue(match('image/canvas-bg.html', IMAGE, True))
+ self.assertTrue(match('image/canvas-bg.html', PASS, False))
- self.assertTrue(match('fast/html/keygen.html', SKIP, False))
- self.assertTrue(match('fast/html/tab-order.html', PASS, False))
+ self.assertTrue(match('misc/crash.html', SKIP, False))
+ self.assertTrue(match('misc/passing.html', PASS, False))
if __name__ == '__main__':
unittest.main()
text_filename = self._port.expected_filename(test_name, '.txt')
text_output = _read_file(text_filename)
- if image_hash:
+ if image_hash is not None:
image_filename = self._port.expected_filename(test_name, '.png')
image = _read_file(image_filename, 'rb')
if self._image_path:
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Dummy Port implementation used for testing."""
+from __future__ import with_statement
+import codecs
import os
import time
return ('test',)
def baseline_path(self):
- curdir = os.path.abspath(__file__)
- self.topdir = curdir[0:curdir.index("WebKitTools")]
- return os.path.join(self.topdir, 'LayoutTests', 'platform', 'test')
+ return os.path.join(self.layout_tests_dir(), 'platform',
+ self.name())
def baseline_search_path(self):
return [self.baseline_path()]
expected_filename, actual_filename):
return ''
- def relative_test_filename(self, filename):
- return filename
-
- def expected_filename(self, filename, suffix):
- (basename, ext) = os.path.splitext(filename)
- return basename + '.' + suffix
+ def layout_tests_dir(self):
+ return self.path_from_webkit_base('WebKitTools', 'Scripts',
+ 'webkitpy', 'layout_tests', 'data')
def name(self):
return self._name
def options(self):
return self._options
+ def path_to_test_expectations_file(self):
+ return self.path_from_webkit_base('WebKitTools', 'Scripts',
+ 'webkitpy', 'layout_tests', 'data', 'platform', 'test',
+ 'test_expectations.txt')
+
def results_directory(self):
return '/tmp/' + self._options.results_directory
pass
def test_expectations(self):
- return ''
+ """Returns the test expectations for this port.
+
+ Basically this string should contain the equivalent of a
+ test_expectations file. See test_expectations.py for more details."""
+ expectations_path = self.path_to_test_expectations_file()
+ with codecs.open(expectations_path, "r", "utf-8") as file:
+ return file.read()
def test_base_platform_names(self):
return ('test',)
self._driver_options = test_driver_options
self._image_path = image_path
self._port = port
+ self._image_written = False
def poll(self):
return True
return 0
def run_test(self, uri, timeoutms, image_hash):
+ if not self._image_written and self._port._options.pixel_tests:
+ with open(self._image_path, "w") as f:
+ f.write("bad png file from TestDriver")
+ self._image_written = True
+
+ # We special-case this because we can't fake an image hash for a
+ # missing expectation.
+ if uri.find('misc/missing-expectation') != -1:
+ return (False, False, 'deadbeefdeadbeefdeadbeefdeadbeef', '', None)
return (False, False, image_hash, '', None)
def start(self):
test_args.png_path = png_path
test_args.new_baseline = self._options.new_baseline
+ test_args.reset_results = self._options.reset_results
test_args.show_sources = self._options.sources
default=False, help="Save all generated results as new baselines "
"into the platform directory, overwriting whatever's "
"already there."),
+ optparse.make_option("--reset-results", action="store_true",
+ default=False, help="Reset any existing baselines to the "
+ "generated results"),
optparse.make_option("--no-show-results", action="store_false",
default=True, dest="show_results",
help="Don't launch a browser with results after the tests "
"""Unit tests for run_webkit_tests."""
+import codecs
import os
import sys
import unittest
class MainTest(unittest.TestCase):
def test_fast(self):
+ self.assertTrue(passing_run(['--platform', 'test']))
+ self.assertTrue(passing_run(['--platform', 'test', '--run-singly']))
self.assertTrue(passing_run(['--platform', 'test',
- 'fast/html']))
- self.assertTrue(passing_run(['--platform', 'test',
- '--run-singly',
- 'fast/html']))
- self.assertTrue(passing_run(['--platform', 'test',
- 'fast/html/article-element.html']))
+ 'text/article-element.html']))
self.assertTrue(passing_run(['--platform', 'test',
'--child-processes', '1',
- '--print', 'unexpected',
- 'fast/html']))
+ '--print', 'unexpected']))
def test_child_processes(self):
(res, buildbot_output, regular_output) = logging_run(
['--platform', 'test', '--print', 'config', '--child-processes',
- '1', 'fast/html'])
+ '1'])
self.assertTrue('Running one DumpRenderTree\n'
in regular_output.get())
(res, buildbot_output, regular_output) = logging_run(
['--platform', 'test', '--print', 'config', '--child-processes',
- '2', 'fast/html'])
+ '2'])
self.assertTrue('Running 2 DumpRenderTrees in parallel\n'
in regular_output.get())
def test_last_results(self):
- passing_run(['--platform', 'test', 'fast/html'])
+ passing_run(['--platform', 'test'])
(res, buildbot_output, regular_output) = logging_run(
['--platform', 'test', '--print-last-failures'])
self.assertEqual(regular_output.get(), ['\n\n'])
self.assertEqual(buildbot_output.get(), [])
+def _mocked_open(original_open, file_list):
+ def _wrapper(name, mode, encoding):
+ if name.find("-expected.") != -1 and mode == "w":
+ # we don't want to actually write new baselines, so stub these out
+ name.replace('\\', '/')
+ file_list.append(name)
+ return original_open(os.devnull, mode, encoding)
+ return original_open(name, mode, encoding)
+ return _wrapper
+
+
+class RebaselineTest(unittest.TestCase):
+ def assertBaselines(self, file_list, file):
+ "assert that the file_list contains the baselines."""
+ for ext in [".txt", ".png", ".checksum"]:
+ baseline = file + "-expected" + ext
+ self.assertTrue(any(f.find(baseline) != -1 for f in file_list))
+
+ def test_reset_results(self):
+ file_list = []
+ original_open = codecs.open
+ try:
+ # Test that we update expectations in place. If the expectation
+ # is mssing, update the expected generic location.
+ file_list = []
+ codecs.open = _mocked_open(original_open, file_list)
+ passing_run(['--platform', 'test', '--pixel-tests',
+ '--reset-results',
+ 'image/canvas-bg.html',
+ 'image/canvas-zoom.html',
+ 'misc/missing-expectation.html'])
+ self.assertEqual(len(file_list), 9)
+ self.assertBaselines(file_list,
+ "data/image/canvas-zoom")
+ self.assertBaselines(file_list,
+ "data/platform/test/image/canvas-bg")
+ self.assertBaselines(file_list,
+ "data/misc/missing-expectation")
+ finally:
+ codecs.open = original_open
+
+ def test_new_baseline(self):
+ file_list = []
+ original_open = codecs.open
+ try:
+ # Test that we update the platform expectations. If the expectation
+ # is mssing, then create a new expectation in the platform dir.
+ file_list = []
+ codecs.open = _mocked_open(original_open, file_list)
+ passing_run(['--platform', 'test', '--pixel-tests',
+ '--new-baseline',
+ 'image/canvas-zoom.html',
+ 'image/canvas-bg.html',
+ 'misc/missing-expectation.html'])
+ self.assertEqual(len(file_list), 9)
+ self.assertBaselines(file_list,
+ "data/platform/test/image/canvas-zoom")
+ self.assertBaselines(file_list,
+ "data/platform/test/image/canvas-bg")
+ self.assertBaselines(file_list,
+ "data/platform/test/misc/missing-expectation")
+ finally:
+ codecs.open = original_open
+
class TestRunnerTest(unittest.TestCase):
def test_results_html(self):
mock_port = Mock()
if errno.ENOENT != e.errno:
raise
- def _save_baseline_files(self, filename, png_path, checksum):
+ def _save_baseline_files(self, filename, png_path, checksum,
+ generate_new_baseline):
"""Saves new baselines for the PNG and checksum.
Args:
filename: test filename
png_path: path to the actual PNG result file
checksum: value of the actual checksum result
+ generate_new_baseline: whether to generate a new, platform-specific
+ baseline, or update the existing one
"""
with open(png_path, "rb") as png_file:
png_data = png_file.read()
- self._save_baseline_data(filename, png_data, ".png", encoding=None)
- self._save_baseline_data(filename, checksum, ".checksum", encoding="ascii")
+ self._save_baseline_data(filename, png_data, ".png", encoding=None,
+ generate_new_baseline=generate_new_baseline)
+ self._save_baseline_data(filename, checksum, ".checksum",
+ encoding="ascii",
+ generate_new_baseline=generate_new_baseline)
def _create_image_diff(self, port, filename, configuration):
"""Creates the visual diff of the expected/actual PNGs.
return failures
# If we're generating a new baseline, we pass.
- if test_args.new_baseline:
+ if test_args.new_baseline or test_args.reset_results:
self._save_baseline_files(filename, test_args.png_path,
- test_args.hash)
+ test_args.hash, test_args.new_baseline)
return failures
# Compare hashes.
self._port.relative_test_filename(filename))
self._port.maybe_make_directory(os.path.split(output_filename)[0])
- def _save_baseline_data(self, filename, data, modifier, encoding):
+ def _save_baseline_data(self, filename, data, modifier, encoding,
+ generate_new_baseline=True):
"""Saves a new baseline file into the port's baseline directory.
The file will be named simply "<test>-expected<modifier>", suitable for
filename: path to the test file
data: result to be saved as the new baseline
modifier: type of the result file, e.g. ".txt" or ".png"
+ encoding: file encoding (none, "utf-8", etc.)
+ generate_new_baseline: whether to enerate a new, platform-specific
+ baseline, or update the existing one
"""
- relative_dir = os.path.dirname(
- self._port.relative_test_filename(filename))
- baseline_path = self._port.baseline_path()
- output_dir = os.path.join(baseline_path, relative_dir)
- output_file = os.path.basename(os.path.splitext(filename)[0] +
- self.FILENAME_SUFFIX_EXPECTED + modifier)
+ if generate_new_baseline:
+ relative_dir = os.path.dirname(
+ self._port.relative_test_filename(filename))
+ baseline_path = self._port.baseline_path()
+ output_dir = os.path.join(baseline_path, relative_dir)
+ output_file = os.path.basename(os.path.splitext(filename)[0] +
+ self.FILENAME_SUFFIX_EXPECTED + modifier)
+ self._port.maybe_make_directory(output_dir)
+ output_path = os.path.join(output_dir, output_file)
+ _log.debug('writing new baseline result "%s"' % (output_path))
+ else:
+ output_path = self._port.expected_filename(filename, modifier)
+ _log.debug('resetting baseline result "%s"' % output_path)
- self._port.maybe_make_directory(output_dir)
- output_path = os.path.join(output_dir, output_file)
- _log.debug('writing new baseline to "%s"' % (output_path))
self._write_into_file_at_path(output_path, data, encoding)
def output_filename(self, filename, modifier):
failures = []
# If we're generating a new baseline, we pass.
- if test_args.new_baseline:
+ if test_args.new_baseline or test_args.reset_results:
# Although all test_shell/DumpRenderTree output should be utf-8,
# we do not ever decode it inside run-webkit-tests. For some tests
# DumpRenderTree may not output utf-8 text (e.g. webarchives).
- self._save_baseline_data(filename, output, ".txt", encoding=None)
+ self._save_baseline_data(filename, output, ".txt", encoding=None,
+ generate_new_baseline=test_args.new_baseline)
return failures
# Normalize text to diff