Unreviewed. Update W3C WebDriver imported tests.
authorcarlosgc@webkit.org <carlosgc@webkit.org@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Tue, 14 Aug 2018 07:08:15 +0000 (07:08 +0000)
committercarlosgc@webkit.org <carlosgc@webkit.org@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Tue, 14 Aug 2018 07:08:15 +0000 (07:08 +0000)
Tools:

Bump pytest version to 3.6.2.

* Scripts/webkitpy/thirdparty/__init__.py:
(AutoinstallImportHook._install_pytest):

WebDriverTests:

* imported/w3c/importer.json:
* imported/w3c/tools/webdriver/README.md:
* imported/w3c/tools/webdriver/webdriver/__init__.py:
* imported/w3c/tools/webdriver/webdriver/client.py:
* imported/w3c/tools/webdriver/webdriver/error.py:
* imported/w3c/tools/webdriver/webdriver/protocol.py:
* imported/w3c/tools/webdriver/webdriver/transport.py:
* imported/w3c/tools/wptrunner/README.rst:
* imported/w3c/tools/wptrunner/docs/conf.py:
* imported/w3c/tools/wptrunner/docs/usage.rst:
* imported/w3c/tools/wptrunner/requirements.txt:
* imported/w3c/tools/wptrunner/requirements_chrome.txt:
* imported/w3c/tools/wptrunner/requirements_chrome_android.txt:
* imported/w3c/tools/wptrunner/requirements_edge.txt:
* imported/w3c/tools/wptrunner/requirements_firefox.txt:
* imported/w3c/tools/wptrunner/requirements_ie.txt:
* imported/w3c/tools/wptrunner/requirements_opera.txt:
* imported/w3c/tools/wptrunner/requirements_safari.txt:
* imported/w3c/tools/wptrunner/requirements_sauce.txt:
* imported/w3c/tools/wptrunner/tox.ini:
* imported/w3c/tools/wptrunner/wptrunner.default.ini:
* imported/w3c/tools/wptrunner/wptrunner/browsers/__init__.py:
* imported/w3c/tools/wptrunner/wptrunner/browsers/base.py:
* imported/w3c/tools/wptrunner/wptrunner/browsers/chrome.py:
* imported/w3c/tools/wptrunner/wptrunner/browsers/chrome_android.py:
* imported/w3c/tools/wptrunner/wptrunner/browsers/edge.py:
* imported/w3c/tools/wptrunner/wptrunner/browsers/fennec.py: Added.
* imported/w3c/tools/wptrunner/wptrunner/browsers/firefox.py:
* imported/w3c/tools/wptrunner/wptrunner/browsers/ie.py:
* imported/w3c/tools/wptrunner/wptrunner/browsers/opera.py:
* imported/w3c/tools/wptrunner/wptrunner/browsers/safari.py:
* imported/w3c/tools/wptrunner/wptrunner/browsers/sauce.py:
* imported/w3c/tools/wptrunner/wptrunner/browsers/sauce_setup/edge-prerun.bat:
* imported/w3c/tools/wptrunner/wptrunner/browsers/sauce_setup/safari-prerun.sh:
* imported/w3c/tools/wptrunner/wptrunner/browsers/servo.py:
* imported/w3c/tools/wptrunner/wptrunner/browsers/servodriver.py:
* imported/w3c/tools/wptrunner/wptrunner/browsers/webkit.py:
* imported/w3c/tools/wptrunner/wptrunner/environment.py:
* imported/w3c/tools/wptrunner/wptrunner/executors/__init__.py:
* imported/w3c/tools/wptrunner/wptrunner/executors/base.py:
* imported/w3c/tools/wptrunner/wptrunner/executors/executormarionette.py:
* imported/w3c/tools/wptrunner/wptrunner/executors/executorsafari.py: Added.
* imported/w3c/tools/wptrunner/wptrunner/executors/executorselenium.py:
* imported/w3c/tools/wptrunner/wptrunner/executors/executorservo.py:
* imported/w3c/tools/wptrunner/wptrunner/executors/executorservodriver.py:
* imported/w3c/tools/wptrunner/wptrunner/executors/protocol.py:
* imported/w3c/tools/wptrunner/wptrunner/executors/pytestrunner/__init__.py:
* imported/w3c/tools/wptrunner/wptrunner/executors/reftest-wait_marionette.js:
* imported/w3c/tools/wptrunner/wptrunner/executors/testharness_webdriver.js:
* imported/w3c/tools/wptrunner/wptrunner/font.py:
* imported/w3c/tools/wptrunner/wptrunner/formatters.py:
* imported/w3c/tools/wptrunner/wptrunner/manifestexpected.py:
* imported/w3c/tools/wptrunner/wptrunner/manifestupdate.py:
* imported/w3c/tools/wptrunner/wptrunner/metadata.py:
* imported/w3c/tools/wptrunner/wptrunner/products.py:
* imported/w3c/tools/wptrunner/wptrunner/stability.py:
* imported/w3c/tools/wptrunner/wptrunner/testloader.py:
* imported/w3c/tools/wptrunner/wptrunner/testrunner.py:
* imported/w3c/tools/wptrunner/wptrunner/tests/browsers/test_sauce.py:
* imported/w3c/tools/wptrunner/wptrunner/tests/test_chunker.py:
* imported/w3c/tools/wptrunner/wptrunner/tests/test_formatters.py: Added.
* imported/w3c/tools/wptrunner/wptrunner/tests/test_products.py:
* imported/w3c/tools/wptrunner/wptrunner/tests/test_stability.py: Added.
* imported/w3c/tools/wptrunner/wptrunner/tests/test_update.py:
* imported/w3c/tools/wptrunner/wptrunner/tests/test_wpttest.py: Added.
* imported/w3c/tools/wptrunner/wptrunner/update/__init__.py:
* imported/w3c/tools/wptrunner/wptrunner/update/metadata.py:
* imported/w3c/tools/wptrunner/wptrunner/update/sync.py:
* imported/w3c/tools/wptrunner/wptrunner/update/tree.py:
* imported/w3c/tools/wptrunner/wptrunner/update/update.py:
* imported/w3c/tools/wptrunner/wptrunner/vcs.py:
* imported/w3c/tools/wptrunner/wptrunner/webdriver_server.py:
* imported/w3c/tools/wptrunner/wptrunner/wptcommandline.py:
* imported/w3c/tools/wptrunner/wptrunner/wptlogging.py:
* imported/w3c/tools/wptrunner/wptrunner/wptmanifest/__init__.py:
* imported/w3c/tools/wptrunner/wptrunner/wptmanifest/backends/conditional.py:
* imported/w3c/tools/wptrunner/wptrunner/wptmanifest/parser.py:
* imported/w3c/tools/wptrunner/wptrunner/wptmanifest/serializer.py:
* imported/w3c/tools/wptrunner/wptrunner/wptmanifest/tests/test_conditional.py:
* imported/w3c/tools/wptrunner/wptrunner/wptmanifest/tests/test_serializer.py:
* imported/w3c/tools/wptrunner/wptrunner/wptmanifest/tests/test_static.py:
* imported/w3c/tools/wptrunner/wptrunner/wptrunner.py:
* imported/w3c/tools/wptrunner/wptrunner/wpttest.py:
* imported/w3c/webdriver/META.yml: Added.
* imported/w3c/webdriver/OWNERS: Removed.
* imported/w3c/webdriver/tests/accept_alert/accept.py:
* imported/w3c/webdriver/tests/actions/control_click.py: Added.
* imported/w3c/webdriver/tests/actions/key.py:
* imported/w3c/webdriver/tests/actions/modifier_click.py:
* imported/w3c/webdriver/tests/actions/mouse.py:
* imported/w3c/webdriver/tests/actions/support/keys.py:
* imported/w3c/webdriver/tests/add_cookie/add.py:
* imported/w3c/webdriver/tests/back/__init__.py: Copied from WebDriverTests/imported/w3c/webdriver/tests/element_send_keys/__init__.py.
* imported/w3c/webdriver/tests/back/back.py: Added.
* imported/w3c/webdriver/tests/back/conftest.py: Added.
* imported/w3c/webdriver/tests/close_window/close.py:
* imported/w3c/webdriver/tests/close_window/user_prompts.py:
* imported/w3c/webdriver/tests/conftest.py:
* imported/w3c/webdriver/tests/delete_all_cookies/__init__.py: Copied from WebDriverTests/imported/w3c/webdriver/tests/element_send_keys/__init__.py.
* imported/w3c/webdriver/tests/delete_all_cookies/delete.py: Added.
* imported/w3c/webdriver/tests/delete_cookie/delete.py:
* imported/w3c/webdriver/tests/delete_cookie/user_prompts.py:
* imported/w3c/webdriver/tests/delete_session/__init__.py: Copied from WebDriverTests/imported/w3c/webdriver/tests/element_send_keys/__init__.py.
* imported/w3c/webdriver/tests/delete_session/delete.py: Added.
* imported/w3c/webdriver/tests/dismiss_alert/dismiss.py:
* imported/w3c/webdriver/tests/element_clear/clear.py:
* imported/w3c/webdriver/tests/element_click/click.py: Added.
* imported/w3c/webdriver/tests/element_click/file_upload.py: Added.
* imported/w3c/webdriver/tests/element_click/interactability.py: Added.
* imported/w3c/webdriver/tests/element_click/navigate.py: Added.
* imported/w3c/webdriver/tests/element_click/scroll_into_view.py: Added.
* imported/w3c/webdriver/tests/element_click/support/close_window.html: Added.
* imported/w3c/webdriver/tests/element_click/support/input.html: Added.
* imported/w3c/webdriver/tests/element_send_keys/__init__.py:
* imported/w3c/webdriver/tests/element_send_keys/conftest.py: Added.
* imported/w3c/webdriver/tests/element_send_keys/events.py: Added.
* imported/w3c/webdriver/tests/element_send_keys/file_upload.py: Added.
* imported/w3c/webdriver/tests/element_send_keys/form_controls.py:
* imported/w3c/webdriver/tests/element_send_keys/send_keys.py: Added.
* imported/w3c/webdriver/tests/element_send_keys/user_prompts.py: Added.
* imported/w3c/webdriver/tests/execute_async_script/collections.py:
* imported/w3c/webdriver/tests/execute_async_script/execute_async.py: Added.
* imported/w3c/webdriver/tests/execute_async_script/user_prompts.py:
* imported/w3c/webdriver/tests/execute_script/cyclic.py:
* imported/w3c/webdriver/tests/execute_script/execute.py: Added.
* imported/w3c/webdriver/tests/execute_script/user_prompts.py:
* imported/w3c/webdriver/tests/find_element/find.py:
* imported/w3c/webdriver/tests/find_element_from_element/find.py:
* imported/w3c/webdriver/tests/find_elements/find.py:
* imported/w3c/webdriver/tests/find_elements_from_element/find.py:
* imported/w3c/webdriver/tests/forward/__init__.py: Copied from WebDriverTests/imported/w3c/webdriver/tests/element_send_keys/__init__.py.
* imported/w3c/webdriver/tests/forward/conftest.py: Added.
* imported/w3c/webdriver/tests/forward/forward.py: Added.
* imported/w3c/webdriver/tests/forward/user_prompts.py: Added.
* imported/w3c/webdriver/tests/fullscreen_window/fullscreen.py:
* imported/w3c/webdriver/tests/fullscreen_window/user_prompts.py:
* imported/w3c/webdriver/tests/get_active_element/get.py:
* imported/w3c/webdriver/tests/get_alert_text/get.py:
* imported/w3c/webdriver/tests/get_current_url/get.py:
* imported/w3c/webdriver/tests/get_current_url/user_prompts.py:
* imported/w3c/webdriver/tests/get_element_attribute/get.py:
* imported/w3c/webdriver/tests/get_element_property/get.py:
* imported/w3c/webdriver/tests/get_element_property/user_prompts.py:
* imported/w3c/webdriver/tests/get_element_tag_name/get.py:
* imported/w3c/webdriver/tests/get_element_tag_name/user_prompts.py:
* imported/w3c/webdriver/tests/get_element_text/get.py:
* imported/w3c/webdriver/tests/get_named_cookie/get.py:
* imported/w3c/webdriver/tests/get_timeouts/get.py:
* imported/w3c/webdriver/tests/get_title/get.py:
* imported/w3c/webdriver/tests/get_title/user_prompts.py:
* imported/w3c/webdriver/tests/get_window_rect/get.py:
* imported/w3c/webdriver/tests/get_window_rect/user_prompts.py:
* imported/w3c/webdriver/tests/interface.html:
* imported/w3c/webdriver/tests/is_element_selected/selected.py:
* imported/w3c/webdriver/tests/is_element_selected/user_prompts.py:
* imported/w3c/webdriver/tests/maximize_window/maximize.py:
* imported/w3c/webdriver/tests/maximize_window/user_prompts.py:
* imported/w3c/webdriver/tests/minimize_window/minimize.py:
* imported/w3c/webdriver/tests/minimize_window/user_prompts.py:
* imported/w3c/webdriver/tests/navigate_to/__init__.py: Copied from WebDriverTests/imported/w3c/webdriver/tests/element_send_keys/__init__.py.
* imported/w3c/webdriver/tests/navigate_to/navigate.py: Added.
* imported/w3c/webdriver/tests/new_session/conftest.py:
* imported/w3c/webdriver/tests/new_session/create_alwaysMatch.py:
* imported/w3c/webdriver/tests/new_session/create_firstMatch.py:
* imported/w3c/webdriver/tests/new_session/default_values.py:
* imported/w3c/webdriver/tests/new_session/invalid_capabilities.py:
* imported/w3c/webdriver/tests/new_session/merge.py:
* imported/w3c/webdriver/tests/new_session/page_load_strategy.py: Added.
* imported/w3c/webdriver/tests/new_session/platform_name.py: Added.
* imported/w3c/webdriver/tests/new_session/response.py:
* imported/w3c/webdriver/tests/new_session/timeouts.py: Added.
* imported/w3c/webdriver/tests/page_source/source.py:
* imported/w3c/webdriver/tests/refresh/__init__.py: Copied from WebDriverTests/imported/w3c/webdriver/tests/element_send_keys/__init__.py.
* imported/w3c/webdriver/tests/refresh/refresh.py: Added.
* imported/w3c/webdriver/tests/refresh/user_prompts.py: Added.
* imported/w3c/webdriver/tests/send_alert_text/send.py:
* imported/w3c/webdriver/tests/set_timeouts/__init__.py: Copied from WebDriverTests/imported/w3c/webdriver/tests/element_send_keys/__init__.py.
* imported/w3c/webdriver/tests/set_timeouts/set.py: Added.
* imported/w3c/webdriver/tests/set_window_rect/resizing_and_positioning.py: Removed.
* imported/w3c/webdriver/tests/set_window_rect/set.py:
* imported/w3c/webdriver/tests/set_window_rect/user_prompts.py:
* imported/w3c/webdriver/tests/status/status.py:
* imported/w3c/webdriver/tests/support/__init__.py:
* imported/w3c/webdriver/tests/support/asserts.py:
* imported/w3c/webdriver/tests/support/fixtures.py:
* imported/w3c/webdriver/tests/switch_to_frame/__init__.py: Copied from WebDriverTests/imported/w3c/webdriver/tests/element_send_keys/__init__.py.
* imported/w3c/webdriver/tests/switch_to_frame/switch.py: Added.
* imported/w3c/webdriver/tests/switch_to_parent_frame/switch.py:
* imported/w3c/webdriver/tests/switch_to_window/__init__.py: Copied from WebDriverTests/imported/w3c/webdriver/tests/element_send_keys/__init__.py.
* imported/w3c/webdriver/tests/switch_to_window/switch.py: Added.

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@234839 268f45cc-cd09-0410-ab3c-d52691b4dbfc

193 files changed:
Tools/ChangeLog
Tools/Scripts/webkitpy/thirdparty/__init__.py
WebDriverTests/ChangeLog
WebDriverTests/imported/w3c/importer.json
WebDriverTests/imported/w3c/tools/webdriver/README.md
WebDriverTests/imported/w3c/tools/webdriver/webdriver/__init__.py
WebDriverTests/imported/w3c/tools/webdriver/webdriver/client.py
WebDriverTests/imported/w3c/tools/webdriver/webdriver/error.py
WebDriverTests/imported/w3c/tools/webdriver/webdriver/protocol.py
WebDriverTests/imported/w3c/tools/webdriver/webdriver/transport.py
WebDriverTests/imported/w3c/tools/wptrunner/README.rst
WebDriverTests/imported/w3c/tools/wptrunner/docs/conf.py
WebDriverTests/imported/w3c/tools/wptrunner/docs/usage.rst
WebDriverTests/imported/w3c/tools/wptrunner/requirements.txt
WebDriverTests/imported/w3c/tools/wptrunner/requirements_chrome.txt
WebDriverTests/imported/w3c/tools/wptrunner/requirements_chrome_android.txt
WebDriverTests/imported/w3c/tools/wptrunner/requirements_edge.txt
WebDriverTests/imported/w3c/tools/wptrunner/requirements_firefox.txt
WebDriverTests/imported/w3c/tools/wptrunner/requirements_ie.txt
WebDriverTests/imported/w3c/tools/wptrunner/requirements_opera.txt
WebDriverTests/imported/w3c/tools/wptrunner/requirements_safari.txt
WebDriverTests/imported/w3c/tools/wptrunner/requirements_sauce.txt
WebDriverTests/imported/w3c/tools/wptrunner/tox.ini
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner.default.ini
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/browsers/__init__.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/browsers/base.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/browsers/chrome.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/browsers/chrome_android.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/browsers/edge.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/browsers/fennec.py [new file with mode: 0644]
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/browsers/firefox.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/browsers/ie.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/browsers/opera.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/browsers/safari.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/browsers/sauce.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/browsers/sauce_setup/edge-prerun.bat
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/browsers/sauce_setup/safari-prerun.sh
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/browsers/servo.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/browsers/servodriver.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/browsers/webkit.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/environment.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/executors/__init__.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/executors/base.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/executors/executormarionette.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/executors/executorsafari.py [new file with mode: 0644]
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/executors/executorselenium.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/executors/executorservo.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/executors/executorservodriver.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/executors/protocol.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/executors/pytestrunner/__init__.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/executors/reftest-wait_marionette.js
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/executors/testharness_webdriver.js
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/font.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/formatters.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/manifestexpected.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/manifestupdate.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/metadata.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/products.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/stability.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/testloader.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/testrunner.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/tests/browsers/test_sauce.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/tests/test_chunker.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/tests/test_formatters.py [new file with mode: 0644]
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/tests/test_products.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/tests/test_stability.py [new file with mode: 0644]
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/tests/test_update.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/tests/test_wpttest.py [new file with mode: 0644]
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/update/__init__.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/update/metadata.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/update/sync.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/update/tree.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/update/update.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/vcs.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/webdriver_server.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/wptcommandline.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/wptlogging.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/wptmanifest/__init__.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/wptmanifest/backends/conditional.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/wptmanifest/parser.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/wptmanifest/serializer.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/wptmanifest/tests/test_conditional.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/wptmanifest/tests/test_serializer.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/wptmanifest/tests/test_static.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/wptrunner.py
WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/wpttest.py
WebDriverTests/imported/w3c/webdriver/META.yml [new file with mode: 0644]
WebDriverTests/imported/w3c/webdriver/OWNERS [deleted file]
WebDriverTests/imported/w3c/webdriver/tests/accept_alert/accept.py
WebDriverTests/imported/w3c/webdriver/tests/actions/control_click.py [new file with mode: 0644]
WebDriverTests/imported/w3c/webdriver/tests/actions/key.py
WebDriverTests/imported/w3c/webdriver/tests/actions/modifier_click.py
WebDriverTests/imported/w3c/webdriver/tests/actions/mouse.py
WebDriverTests/imported/w3c/webdriver/tests/actions/support/keys.py
WebDriverTests/imported/w3c/webdriver/tests/add_cookie/add.py
WebDriverTests/imported/w3c/webdriver/tests/back/__init__.py [new file with mode: 0644]
WebDriverTests/imported/w3c/webdriver/tests/back/back.py [new file with mode: 0644]
WebDriverTests/imported/w3c/webdriver/tests/back/conftest.py [new file with mode: 0644]
WebDriverTests/imported/w3c/webdriver/tests/close_window/close.py
WebDriverTests/imported/w3c/webdriver/tests/close_window/user_prompts.py
WebDriverTests/imported/w3c/webdriver/tests/conftest.py
WebDriverTests/imported/w3c/webdriver/tests/delete_all_cookies/__init__.py [new file with mode: 0644]
WebDriverTests/imported/w3c/webdriver/tests/delete_all_cookies/delete.py [new file with mode: 0644]
WebDriverTests/imported/w3c/webdriver/tests/delete_cookie/delete.py
WebDriverTests/imported/w3c/webdriver/tests/delete_cookie/user_prompts.py
WebDriverTests/imported/w3c/webdriver/tests/delete_session/__init__.py [new file with mode: 0644]
WebDriverTests/imported/w3c/webdriver/tests/delete_session/delete.py [new file with mode: 0644]
WebDriverTests/imported/w3c/webdriver/tests/dismiss_alert/dismiss.py
WebDriverTests/imported/w3c/webdriver/tests/element_clear/clear.py
WebDriverTests/imported/w3c/webdriver/tests/element_click/click.py [new file with mode: 0644]
WebDriverTests/imported/w3c/webdriver/tests/element_click/file_upload.py [new file with mode: 0644]
WebDriverTests/imported/w3c/webdriver/tests/element_click/interactability.py [new file with mode: 0644]
WebDriverTests/imported/w3c/webdriver/tests/element_click/navigate.py [new file with mode: 0644]
WebDriverTests/imported/w3c/webdriver/tests/element_click/scroll_into_view.py [new file with mode: 0644]
WebDriverTests/imported/w3c/webdriver/tests/element_click/support/close_window.html [new file with mode: 0644]
WebDriverTests/imported/w3c/webdriver/tests/element_click/support/input.html [new file with mode: 0644]
WebDriverTests/imported/w3c/webdriver/tests/element_send_keys/__init__.py
WebDriverTests/imported/w3c/webdriver/tests/element_send_keys/conftest.py [new file with mode: 0644]
WebDriverTests/imported/w3c/webdriver/tests/element_send_keys/events.py [new file with mode: 0644]
WebDriverTests/imported/w3c/webdriver/tests/element_send_keys/file_upload.py [new file with mode: 0644]
WebDriverTests/imported/w3c/webdriver/tests/element_send_keys/form_controls.py
WebDriverTests/imported/w3c/webdriver/tests/element_send_keys/send_keys.py [new file with mode: 0644]
WebDriverTests/imported/w3c/webdriver/tests/element_send_keys/user_prompts.py [new file with mode: 0644]
WebDriverTests/imported/w3c/webdriver/tests/execute_async_script/collections.py
WebDriverTests/imported/w3c/webdriver/tests/execute_async_script/execute_async.py [new file with mode: 0644]
WebDriverTests/imported/w3c/webdriver/tests/execute_async_script/user_prompts.py
WebDriverTests/imported/w3c/webdriver/tests/execute_script/cyclic.py
WebDriverTests/imported/w3c/webdriver/tests/execute_script/execute.py [new file with mode: 0644]
WebDriverTests/imported/w3c/webdriver/tests/execute_script/user_prompts.py
WebDriverTests/imported/w3c/webdriver/tests/find_element/find.py
WebDriverTests/imported/w3c/webdriver/tests/find_element_from_element/find.py
WebDriverTests/imported/w3c/webdriver/tests/find_elements/find.py
WebDriverTests/imported/w3c/webdriver/tests/find_elements_from_element/find.py
WebDriverTests/imported/w3c/webdriver/tests/forward/__init__.py [new file with mode: 0644]
WebDriverTests/imported/w3c/webdriver/tests/forward/conftest.py [new file with mode: 0644]
WebDriverTests/imported/w3c/webdriver/tests/forward/forward.py [new file with mode: 0644]
WebDriverTests/imported/w3c/webdriver/tests/forward/user_prompts.py [new file with mode: 0644]
WebDriverTests/imported/w3c/webdriver/tests/fullscreen_window/fullscreen.py
WebDriverTests/imported/w3c/webdriver/tests/fullscreen_window/user_prompts.py
WebDriverTests/imported/w3c/webdriver/tests/get_active_element/get.py
WebDriverTests/imported/w3c/webdriver/tests/get_alert_text/get.py
WebDriverTests/imported/w3c/webdriver/tests/get_current_url/get.py
WebDriverTests/imported/w3c/webdriver/tests/get_current_url/user_prompts.py
WebDriverTests/imported/w3c/webdriver/tests/get_element_attribute/get.py
WebDriverTests/imported/w3c/webdriver/tests/get_element_property/get.py
WebDriverTests/imported/w3c/webdriver/tests/get_element_property/user_prompts.py
WebDriverTests/imported/w3c/webdriver/tests/get_element_tag_name/get.py
WebDriverTests/imported/w3c/webdriver/tests/get_element_tag_name/user_prompts.py
WebDriverTests/imported/w3c/webdriver/tests/get_element_text/get.py
WebDriverTests/imported/w3c/webdriver/tests/get_named_cookie/get.py
WebDriverTests/imported/w3c/webdriver/tests/get_timeouts/get.py
WebDriverTests/imported/w3c/webdriver/tests/get_title/get.py
WebDriverTests/imported/w3c/webdriver/tests/get_title/user_prompts.py
WebDriverTests/imported/w3c/webdriver/tests/get_window_rect/get.py
WebDriverTests/imported/w3c/webdriver/tests/get_window_rect/user_prompts.py
WebDriverTests/imported/w3c/webdriver/tests/interface.html
WebDriverTests/imported/w3c/webdriver/tests/is_element_selected/selected.py
WebDriverTests/imported/w3c/webdriver/tests/is_element_selected/user_prompts.py
WebDriverTests/imported/w3c/webdriver/tests/maximize_window/maximize.py
WebDriverTests/imported/w3c/webdriver/tests/maximize_window/user_prompts.py
WebDriverTests/imported/w3c/webdriver/tests/minimize_window/minimize.py
WebDriverTests/imported/w3c/webdriver/tests/minimize_window/user_prompts.py
WebDriverTests/imported/w3c/webdriver/tests/navigate_to/__init__.py [new file with mode: 0644]
WebDriverTests/imported/w3c/webdriver/tests/navigate_to/navigate.py [new file with mode: 0644]
WebDriverTests/imported/w3c/webdriver/tests/new_session/conftest.py
WebDriverTests/imported/w3c/webdriver/tests/new_session/create_alwaysMatch.py
WebDriverTests/imported/w3c/webdriver/tests/new_session/create_firstMatch.py
WebDriverTests/imported/w3c/webdriver/tests/new_session/default_values.py
WebDriverTests/imported/w3c/webdriver/tests/new_session/invalid_capabilities.py
WebDriverTests/imported/w3c/webdriver/tests/new_session/merge.py
WebDriverTests/imported/w3c/webdriver/tests/new_session/page_load_strategy.py [new file with mode: 0644]
WebDriverTests/imported/w3c/webdriver/tests/new_session/platform_name.py [new file with mode: 0644]
WebDriverTests/imported/w3c/webdriver/tests/new_session/response.py
WebDriverTests/imported/w3c/webdriver/tests/new_session/timeouts.py [new file with mode: 0644]
WebDriverTests/imported/w3c/webdriver/tests/page_source/source.py
WebDriverTests/imported/w3c/webdriver/tests/refresh/__init__.py [new file with mode: 0644]
WebDriverTests/imported/w3c/webdriver/tests/refresh/refresh.py [new file with mode: 0644]
WebDriverTests/imported/w3c/webdriver/tests/refresh/user_prompts.py [new file with mode: 0644]
WebDriverTests/imported/w3c/webdriver/tests/send_alert_text/send.py
WebDriverTests/imported/w3c/webdriver/tests/set_timeouts/__init__.py [new file with mode: 0644]
WebDriverTests/imported/w3c/webdriver/tests/set_timeouts/set.py [new file with mode: 0644]
WebDriverTests/imported/w3c/webdriver/tests/set_window_rect/resizing_and_positioning.py [deleted file]
WebDriverTests/imported/w3c/webdriver/tests/set_window_rect/set.py
WebDriverTests/imported/w3c/webdriver/tests/set_window_rect/user_prompts.py
WebDriverTests/imported/w3c/webdriver/tests/status/status.py
WebDriverTests/imported/w3c/webdriver/tests/support/__init__.py
WebDriverTests/imported/w3c/webdriver/tests/support/asserts.py
WebDriverTests/imported/w3c/webdriver/tests/support/fixtures.py
WebDriverTests/imported/w3c/webdriver/tests/switch_to_frame/__init__.py [new file with mode: 0644]
WebDriverTests/imported/w3c/webdriver/tests/switch_to_frame/switch.py [new file with mode: 0644]
WebDriverTests/imported/w3c/webdriver/tests/switch_to_parent_frame/switch.py
WebDriverTests/imported/w3c/webdriver/tests/switch_to_window/__init__.py [new file with mode: 0644]
WebDriverTests/imported/w3c/webdriver/tests/switch_to_window/switch.py [new file with mode: 0644]

index 7512a15..02b765b 100644 (file)
@@ -1,3 +1,12 @@
+2018-08-13  Carlos Garcia Campos  <cgarcia@igalia.com>
+
+        Unreviewed. Update W3C WebDriver imported tests.
+
+        Bump pytest version to 3.6.2.
+
+        * Scripts/webkitpy/thirdparty/__init__.py:
+        (AutoinstallImportHook._install_pytest):
+
 2018-08-13  Thomas Denney  <tdenney@apple.com>
 
         Allow the substring 'me' in contributor names and email addresses
index 6ca117d..0509146 100644 (file)
@@ -144,10 +144,10 @@ class AutoinstallImportHook(object):
     def _install_pytest(self):
         self._install("https://pypi.python.org/packages/90/e3/e075127d39d35f09a500ebb4a90afd10f9ef0a1d28a6d09abeec0e444fdd/py-1.5.2.tar.gz#md5=279ca69c632069e1b71e11b14641ca28",
                               "py-1.5.2/py")
-        self._install("https://pypi.python.org/packages/1f/f8/8cd74c16952163ce0db0bd95fdd8810cbf093c08be00e6e665ebf0dc3138/pytest-3.2.5.tar.gz#md5=6dbe9bb093883f75394a689a1426ac6f",
-                              "pytest-3.2.5/_pytest")
-        self._install("https://pypi.python.org/packages/1f/f8/8cd74c16952163ce0db0bd95fdd8810cbf093c08be00e6e665ebf0dc3138/pytest-3.2.5.tar.gz#md5=6dbe9bb093883f75394a689a1426ac6f",
-                              "pytest-3.2.5/pytest.py")
+        self._install("https://pypi.python.org/packages/a2/ec/415d0cccc1ed41cd7fdf69ad989da16a8d13057996371004cab4bafc48f3/pytest-3.6.2.tar.gz",
+                              "pytest-3.6.2/src/_pytest")
+        self._install("https://pypi.python.org/packages/a2/ec/415d0cccc1ed41cd7fdf69ad989da16a8d13057996371004cab4bafc48f3/pytest-3.6.2.tar.gz",
+                              "pytest-3.6.2/src/pytest.py")
 
     def _install_pylint(self):
         self._ensure_autoinstalled_dir_is_in_sys_path()
index 0b50ae6..3de959d 100644 (file)
@@ -1,5 +1,200 @@
 2018-08-13  Carlos Garcia Campos  <cgarcia@igalia.com>
 
+        Unreviewed. Update W3C WebDriver imported tests.
+
+        * imported/w3c/importer.json:
+        * imported/w3c/tools/webdriver/README.md:
+        * imported/w3c/tools/webdriver/webdriver/__init__.py:
+        * imported/w3c/tools/webdriver/webdriver/client.py:
+        * imported/w3c/tools/webdriver/webdriver/error.py:
+        * imported/w3c/tools/webdriver/webdriver/protocol.py:
+        * imported/w3c/tools/webdriver/webdriver/transport.py:
+        * imported/w3c/tools/wptrunner/README.rst:
+        * imported/w3c/tools/wptrunner/docs/conf.py:
+        * imported/w3c/tools/wptrunner/docs/usage.rst:
+        * imported/w3c/tools/wptrunner/requirements.txt:
+        * imported/w3c/tools/wptrunner/requirements_chrome.txt:
+        * imported/w3c/tools/wptrunner/requirements_chrome_android.txt:
+        * imported/w3c/tools/wptrunner/requirements_edge.txt:
+        * imported/w3c/tools/wptrunner/requirements_firefox.txt:
+        * imported/w3c/tools/wptrunner/requirements_ie.txt:
+        * imported/w3c/tools/wptrunner/requirements_opera.txt:
+        * imported/w3c/tools/wptrunner/requirements_safari.txt:
+        * imported/w3c/tools/wptrunner/requirements_sauce.txt:
+        * imported/w3c/tools/wptrunner/tox.ini:
+        * imported/w3c/tools/wptrunner/wptrunner.default.ini:
+        * imported/w3c/tools/wptrunner/wptrunner/browsers/__init__.py:
+        * imported/w3c/tools/wptrunner/wptrunner/browsers/base.py:
+        * imported/w3c/tools/wptrunner/wptrunner/browsers/chrome.py:
+        * imported/w3c/tools/wptrunner/wptrunner/browsers/chrome_android.py:
+        * imported/w3c/tools/wptrunner/wptrunner/browsers/edge.py:
+        * imported/w3c/tools/wptrunner/wptrunner/browsers/fennec.py: Added.
+        * imported/w3c/tools/wptrunner/wptrunner/browsers/firefox.py:
+        * imported/w3c/tools/wptrunner/wptrunner/browsers/ie.py:
+        * imported/w3c/tools/wptrunner/wptrunner/browsers/opera.py:
+        * imported/w3c/tools/wptrunner/wptrunner/browsers/safari.py:
+        * imported/w3c/tools/wptrunner/wptrunner/browsers/sauce.py:
+        * imported/w3c/tools/wptrunner/wptrunner/browsers/sauce_setup/edge-prerun.bat:
+        * imported/w3c/tools/wptrunner/wptrunner/browsers/sauce_setup/safari-prerun.sh:
+        * imported/w3c/tools/wptrunner/wptrunner/browsers/servo.py:
+        * imported/w3c/tools/wptrunner/wptrunner/browsers/servodriver.py:
+        * imported/w3c/tools/wptrunner/wptrunner/browsers/webkit.py:
+        * imported/w3c/tools/wptrunner/wptrunner/environment.py:
+        * imported/w3c/tools/wptrunner/wptrunner/executors/__init__.py:
+        * imported/w3c/tools/wptrunner/wptrunner/executors/base.py:
+        * imported/w3c/tools/wptrunner/wptrunner/executors/executormarionette.py:
+        * imported/w3c/tools/wptrunner/wptrunner/executors/executorsafari.py: Added.
+        * imported/w3c/tools/wptrunner/wptrunner/executors/executorselenium.py:
+        * imported/w3c/tools/wptrunner/wptrunner/executors/executorservo.py:
+        * imported/w3c/tools/wptrunner/wptrunner/executors/executorservodriver.py:
+        * imported/w3c/tools/wptrunner/wptrunner/executors/protocol.py:
+        * imported/w3c/tools/wptrunner/wptrunner/executors/pytestrunner/__init__.py:
+        * imported/w3c/tools/wptrunner/wptrunner/executors/reftest-wait_marionette.js:
+        * imported/w3c/tools/wptrunner/wptrunner/executors/testharness_webdriver.js:
+        * imported/w3c/tools/wptrunner/wptrunner/font.py:
+        * imported/w3c/tools/wptrunner/wptrunner/formatters.py:
+        * imported/w3c/tools/wptrunner/wptrunner/manifestexpected.py:
+        * imported/w3c/tools/wptrunner/wptrunner/manifestupdate.py:
+        * imported/w3c/tools/wptrunner/wptrunner/metadata.py:
+        * imported/w3c/tools/wptrunner/wptrunner/products.py:
+        * imported/w3c/tools/wptrunner/wptrunner/stability.py:
+        * imported/w3c/tools/wptrunner/wptrunner/testloader.py:
+        * imported/w3c/tools/wptrunner/wptrunner/testrunner.py:
+        * imported/w3c/tools/wptrunner/wptrunner/tests/browsers/test_sauce.py:
+        * imported/w3c/tools/wptrunner/wptrunner/tests/test_chunker.py:
+        * imported/w3c/tools/wptrunner/wptrunner/tests/test_formatters.py: Added.
+        * imported/w3c/tools/wptrunner/wptrunner/tests/test_products.py:
+        * imported/w3c/tools/wptrunner/wptrunner/tests/test_stability.py: Added.
+        * imported/w3c/tools/wptrunner/wptrunner/tests/test_update.py:
+        * imported/w3c/tools/wptrunner/wptrunner/tests/test_wpttest.py: Added.
+        * imported/w3c/tools/wptrunner/wptrunner/update/__init__.py:
+        * imported/w3c/tools/wptrunner/wptrunner/update/metadata.py:
+        * imported/w3c/tools/wptrunner/wptrunner/update/sync.py:
+        * imported/w3c/tools/wptrunner/wptrunner/update/tree.py:
+        * imported/w3c/tools/wptrunner/wptrunner/update/update.py:
+        * imported/w3c/tools/wptrunner/wptrunner/vcs.py:
+        * imported/w3c/tools/wptrunner/wptrunner/webdriver_server.py:
+        * imported/w3c/tools/wptrunner/wptrunner/wptcommandline.py:
+        * imported/w3c/tools/wptrunner/wptrunner/wptlogging.py:
+        * imported/w3c/tools/wptrunner/wptrunner/wptmanifest/__init__.py:
+        * imported/w3c/tools/wptrunner/wptrunner/wptmanifest/backends/conditional.py:
+        * imported/w3c/tools/wptrunner/wptrunner/wptmanifest/parser.py:
+        * imported/w3c/tools/wptrunner/wptrunner/wptmanifest/serializer.py:
+        * imported/w3c/tools/wptrunner/wptrunner/wptmanifest/tests/test_conditional.py:
+        * imported/w3c/tools/wptrunner/wptrunner/wptmanifest/tests/test_serializer.py:
+        * imported/w3c/tools/wptrunner/wptrunner/wptmanifest/tests/test_static.py:
+        * imported/w3c/tools/wptrunner/wptrunner/wptrunner.py:
+        * imported/w3c/tools/wptrunner/wptrunner/wpttest.py:
+        * imported/w3c/webdriver/META.yml: Added.
+        * imported/w3c/webdriver/OWNERS: Removed.
+        * imported/w3c/webdriver/tests/accept_alert/accept.py:
+        * imported/w3c/webdriver/tests/actions/control_click.py: Added.
+        * imported/w3c/webdriver/tests/actions/key.py:
+        * imported/w3c/webdriver/tests/actions/modifier_click.py:
+        * imported/w3c/webdriver/tests/actions/mouse.py:
+        * imported/w3c/webdriver/tests/actions/support/keys.py:
+        * imported/w3c/webdriver/tests/add_cookie/add.py:
+        * imported/w3c/webdriver/tests/back/__init__.py: Copied from WebDriverTests/imported/w3c/webdriver/tests/element_send_keys/__init__.py.
+        * imported/w3c/webdriver/tests/back/back.py: Added.
+        * imported/w3c/webdriver/tests/back/conftest.py: Added.
+        * imported/w3c/webdriver/tests/close_window/close.py:
+        * imported/w3c/webdriver/tests/close_window/user_prompts.py:
+        * imported/w3c/webdriver/tests/conftest.py:
+        * imported/w3c/webdriver/tests/delete_all_cookies/__init__.py: Copied from WebDriverTests/imported/w3c/webdriver/tests/element_send_keys/__init__.py.
+        * imported/w3c/webdriver/tests/delete_all_cookies/delete.py: Added.
+        * imported/w3c/webdriver/tests/delete_cookie/delete.py:
+        * imported/w3c/webdriver/tests/delete_cookie/user_prompts.py:
+        * imported/w3c/webdriver/tests/delete_session/__init__.py: Copied from WebDriverTests/imported/w3c/webdriver/tests/element_send_keys/__init__.py.
+        * imported/w3c/webdriver/tests/delete_session/delete.py: Added.
+        * imported/w3c/webdriver/tests/dismiss_alert/dismiss.py:
+        * imported/w3c/webdriver/tests/element_clear/clear.py:
+        * imported/w3c/webdriver/tests/element_click/click.py: Added.
+        * imported/w3c/webdriver/tests/element_click/file_upload.py: Added.
+        * imported/w3c/webdriver/tests/element_click/interactability.py: Added.
+        * imported/w3c/webdriver/tests/element_click/navigate.py: Added.
+        * imported/w3c/webdriver/tests/element_click/scroll_into_view.py: Added.
+        * imported/w3c/webdriver/tests/element_click/support/close_window.html: Added.
+        * imported/w3c/webdriver/tests/element_click/support/input.html: Added.
+        * imported/w3c/webdriver/tests/element_send_keys/__init__.py:
+        * imported/w3c/webdriver/tests/element_send_keys/conftest.py: Added.
+        * imported/w3c/webdriver/tests/element_send_keys/events.py: Added.
+        * imported/w3c/webdriver/tests/element_send_keys/file_upload.py: Added.
+        * imported/w3c/webdriver/tests/element_send_keys/form_controls.py:
+        * imported/w3c/webdriver/tests/element_send_keys/send_keys.py: Added.
+        * imported/w3c/webdriver/tests/element_send_keys/user_prompts.py: Added.
+        * imported/w3c/webdriver/tests/execute_async_script/collections.py:
+        * imported/w3c/webdriver/tests/execute_async_script/execute_async.py: Added.
+        * imported/w3c/webdriver/tests/execute_async_script/user_prompts.py:
+        * imported/w3c/webdriver/tests/execute_script/cyclic.py:
+        * imported/w3c/webdriver/tests/execute_script/execute.py: Added.
+        * imported/w3c/webdriver/tests/execute_script/user_prompts.py:
+        * imported/w3c/webdriver/tests/find_element/find.py:
+        * imported/w3c/webdriver/tests/find_element_from_element/find.py:
+        * imported/w3c/webdriver/tests/find_elements/find.py:
+        * imported/w3c/webdriver/tests/find_elements_from_element/find.py:
+        * imported/w3c/webdriver/tests/forward/__init__.py: Copied from WebDriverTests/imported/w3c/webdriver/tests/element_send_keys/__init__.py.
+        * imported/w3c/webdriver/tests/forward/conftest.py: Added.
+        * imported/w3c/webdriver/tests/forward/forward.py: Added.
+        * imported/w3c/webdriver/tests/forward/user_prompts.py: Added.
+        * imported/w3c/webdriver/tests/fullscreen_window/fullscreen.py:
+        * imported/w3c/webdriver/tests/fullscreen_window/user_prompts.py:
+        * imported/w3c/webdriver/tests/get_active_element/get.py:
+        * imported/w3c/webdriver/tests/get_alert_text/get.py:
+        * imported/w3c/webdriver/tests/get_current_url/get.py:
+        * imported/w3c/webdriver/tests/get_current_url/user_prompts.py:
+        * imported/w3c/webdriver/tests/get_element_attribute/get.py:
+        * imported/w3c/webdriver/tests/get_element_property/get.py:
+        * imported/w3c/webdriver/tests/get_element_property/user_prompts.py:
+        * imported/w3c/webdriver/tests/get_element_tag_name/get.py:
+        * imported/w3c/webdriver/tests/get_element_tag_name/user_prompts.py:
+        * imported/w3c/webdriver/tests/get_element_text/get.py:
+        * imported/w3c/webdriver/tests/get_named_cookie/get.py:
+        * imported/w3c/webdriver/tests/get_timeouts/get.py:
+        * imported/w3c/webdriver/tests/get_title/get.py:
+        * imported/w3c/webdriver/tests/get_title/user_prompts.py:
+        * imported/w3c/webdriver/tests/get_window_rect/get.py:
+        * imported/w3c/webdriver/tests/get_window_rect/user_prompts.py:
+        * imported/w3c/webdriver/tests/interface.html:
+        * imported/w3c/webdriver/tests/is_element_selected/selected.py:
+        * imported/w3c/webdriver/tests/is_element_selected/user_prompts.py:
+        * imported/w3c/webdriver/tests/maximize_window/maximize.py:
+        * imported/w3c/webdriver/tests/maximize_window/user_prompts.py:
+        * imported/w3c/webdriver/tests/minimize_window/minimize.py:
+        * imported/w3c/webdriver/tests/minimize_window/user_prompts.py:
+        * imported/w3c/webdriver/tests/navigate_to/__init__.py: Copied from WebDriverTests/imported/w3c/webdriver/tests/element_send_keys/__init__.py.
+        * imported/w3c/webdriver/tests/navigate_to/navigate.py: Added.
+        * imported/w3c/webdriver/tests/new_session/conftest.py:
+        * imported/w3c/webdriver/tests/new_session/create_alwaysMatch.py:
+        * imported/w3c/webdriver/tests/new_session/create_firstMatch.py:
+        * imported/w3c/webdriver/tests/new_session/default_values.py:
+        * imported/w3c/webdriver/tests/new_session/invalid_capabilities.py:
+        * imported/w3c/webdriver/tests/new_session/merge.py:
+        * imported/w3c/webdriver/tests/new_session/page_load_strategy.py: Added.
+        * imported/w3c/webdriver/tests/new_session/platform_name.py: Added.
+        * imported/w3c/webdriver/tests/new_session/response.py:
+        * imported/w3c/webdriver/tests/new_session/timeouts.py: Added.
+        * imported/w3c/webdriver/tests/page_source/source.py:
+        * imported/w3c/webdriver/tests/refresh/__init__.py: Copied from WebDriverTests/imported/w3c/webdriver/tests/element_send_keys/__init__.py.
+        * imported/w3c/webdriver/tests/refresh/refresh.py: Added.
+        * imported/w3c/webdriver/tests/refresh/user_prompts.py: Added.
+        * imported/w3c/webdriver/tests/send_alert_text/send.py:
+        * imported/w3c/webdriver/tests/set_timeouts/__init__.py: Copied from WebDriverTests/imported/w3c/webdriver/tests/element_send_keys/__init__.py.
+        * imported/w3c/webdriver/tests/set_timeouts/set.py: Added.
+        * imported/w3c/webdriver/tests/set_window_rect/resizing_and_positioning.py: Removed.
+        * imported/w3c/webdriver/tests/set_window_rect/set.py:
+        * imported/w3c/webdriver/tests/set_window_rect/user_prompts.py:
+        * imported/w3c/webdriver/tests/status/status.py:
+        * imported/w3c/webdriver/tests/support/__init__.py:
+        * imported/w3c/webdriver/tests/support/asserts.py:
+        * imported/w3c/webdriver/tests/support/fixtures.py:
+        * imported/w3c/webdriver/tests/switch_to_frame/__init__.py: Copied from WebDriverTests/imported/w3c/webdriver/tests/element_send_keys/__init__.py.
+        * imported/w3c/webdriver/tests/switch_to_frame/switch.py: Added.
+        * imported/w3c/webdriver/tests/switch_to_parent_frame/switch.py:
+        * imported/w3c/webdriver/tests/switch_to_window/__init__.py: Copied from WebDriverTests/imported/w3c/webdriver/tests/element_send_keys/__init__.py.
+        * imported/w3c/webdriver/tests/switch_to_window/switch.py: Added.
+
+2018-08-13  Carlos Garcia Campos  <cgarcia@igalia.com>
+
         WebDriver: several element_send_keys tests are failing since added
         https://bugs.webkit.org/show_bug.cgi?id=181644
 
index e9b4d04..ae99b94 100644 (file)
@@ -1,6 +1,6 @@
 {
     "repository": "https://github.com/w3c/web-platform-tests.git",
-    "revision": "389b958c00a4d6b897bfce284c0c88bf451fb6b8",
+    "revision": "af380aa415d2c1d8de383d3683b8ef55cb77e3c8",
     "paths_to_import": [
         "tools/webdriver",
         "tools/wptrunner",
index 015b31b..72de3b2 100644 (file)
@@ -9,7 +9,7 @@ implementation compliance to the specification in mind,
 so that different remote end drivers
 can determine whether they meet the recognised standard.
 The client is used for the WebDriver specification tests
-in the [Web Platform Tests](https://github.com/w3c/web-platform-tests).
+in the [Web Platform Tests](https://github.com/web-platform-tests/wpt).
 
 ## Installation
 
@@ -31,7 +31,7 @@ which is useful if you want to contribute patches back:
     >>> 
 
 If you are writing WebDriver specification tests for
-[WPT](https://github.com/w3c/web-platform-tests),
+[WPT](https://github.com/web-platform-tests/wpt),
 there is no need to install the client manually
 as it is included in the `tools/webdriver` directory.
 
index 30243f7..217bfc6 100644 (file)
@@ -1,4 +1,13 @@
-from client import Cookies, Element, Find, Session, Timeouts, Window
+# flake8: noqa
+
+from client import (
+    Cookies,
+    Element,
+    Find,
+    Frame,
+    Session,
+    Timeouts,
+    Window)
 from error import (
     ElementNotSelectableException,
     ElementNotVisibleException,
index a8abda0..a8c9a06 100644 (file)
@@ -16,7 +16,6 @@ def command(func):
 
         if session.session_id is None:
             session.start()
-        assert session.session_id is not None
 
         return func(self, *args, **kwargs)
 
@@ -39,7 +38,7 @@ class Timeouts(object):
 
     def _set(self, key, secs):
         body = {key: secs * 1000}
-        timeouts = self.session.send_session_command("POST", "timeouts", body)
+        self.session.send_session_command("POST", "timeouts", body)
         return None
 
     @property
@@ -234,6 +233,8 @@ class Actions(object):
 
 
 class Window(object):
+    identifier = "window-fcc6-11e5-b4f8-330a88ab9d7f"
+
     def __init__(self, session):
         self.session = session
 
@@ -284,6 +285,23 @@ class Window(object):
     def fullscreen(self):
         return self.session.send_session_command("POST", "window/fullscreen")
 
+    @classmethod
+    def from_json(cls, json, session):
+        uuid = json[Window.identifier]
+        return cls(uuid, session)
+
+
+class Frame(object):
+    identifier = "frame-075b-4da1-b6ba-e579c2d3230a"
+
+    def __init__(self, session):
+        self.session = session
+
+    @classmethod
+    def from_json(cls, json, session):
+        uuid = json[Frame.identifier]
+        return cls(uuid, session)
+
 
 class Find(object):
     def __init__(self, session):
@@ -352,7 +370,8 @@ class Session(object):
                  extension=None):
         self.transport = transport.HTTPWireProtocol(
             host, port, url_prefix, timeout=timeout)
-        self.capabilities = capabilities
+        self.requested_capabilities = capabilities
+        self.capabilities = None
         self.session_id = None
         self.timeouts = None
         self.window = None
@@ -390,8 +409,8 @@ class Session(object):
 
         body = {}
 
-        if self.capabilities is not None:
-            body["capabilities"] = self.capabilities
+        if self.requested_capabilities is not None:
+            body["capabilities"] = self.requested_capabilities
 
         value = self.send_command("POST", "session", body=body)
         self.session_id = value["sessionId"]
@@ -403,13 +422,16 @@ class Session(object):
         return value
 
     def end(self):
+        """Tries to close the active session."""
         if self.session_id is None:
             return
 
-        url = "session/%s" % self.session_id
-        self.send_command("DELETE", url)
-
-        self.session_id = None
+        try:
+            self.send_command("DELETE", "session/%s" % self.session_id)
+        except error.SessionNotCreatedException:
+            pass
+        finally:
+            self.session_id = None
 
     def send_command(self, method, url, body=None):
         """
@@ -435,7 +457,13 @@ class Session(object):
             session=self)
 
         if response.status != 200:
-            raise error.from_response(response)
+            err = error.from_response(response)
+
+            if isinstance(err, error.SessionNotCreatedException):
+                # The driver could have already been deleted the session.
+                self.session_id = None
+
+            raise err
 
         if "value" in response.body:
             value = response.body["value"]
@@ -639,7 +667,8 @@ class Element(object):
         self.id = id
         self.session = session
 
-        assert id not in self.session._element_cache
+        if id in self.session._element_cache:
+            raise ValueError("Element already in cache: %s" % id)
         self.session._element_cache[self.id] = self
 
     def __repr__(self):
@@ -651,7 +680,6 @@ class Element(object):
 
     @classmethod
     def from_json(cls, json, session):
-        assert Element.identifier in json
         uuid = json[Element.identifier]
         if uuid in session._element_cache:
             return session._element_cache[uuid]
@@ -677,7 +705,7 @@ class Element(object):
 
     @command
     def clear(self):
-        self.send_element_command("POST", self.url("clear"), {})
+        self.send_element_command("POST", "clear", {})
 
     @command
     def send_keys(self, text):
index ecfe891..b2337ff 100644 (file)
@@ -6,7 +6,7 @@ class WebDriverException(Exception):
     http_status = None
     status_code = None
 
-    def __init__(self, message, stacktrace=None):
+    def __init__(self, message=None, stacktrace=None):
         super(WebDriverException, self)
         self.message = message
         self.stacktrace = stacktrace
@@ -15,12 +15,15 @@ class WebDriverException(Exception):
         return "<%s http_status=%s>" % (self.__class__.__name__, self.http_status)
 
     def __str__(self):
-        message = "%s (%s): %s\n" % (self.status_code, self.http_status, self.message)
+        message = "%s (%s)" % (self.status_code, self.http_status)
+
+        if self.message is not None:
+            message += ": %s" % self.message
+        message += "\n"
+
         if self.stacktrace:
-            message += ("\n"
-            "Remote-end stacktrace:\n"
-            "\n"
-            "%s" % self.stacktrace)
+            message += ("\nRemote-end stacktrace:\n\n%s" % self.stacktrace)
+
         return message
 
 
@@ -89,6 +92,11 @@ class NoSuchAlertException(WebDriverException):
     status_code = "no such alert"
 
 
+class NoSuchCookieException(WebDriverException):
+    http_status = 404
+    status_code = "no such cookie"
+
+
 class NoSuchElementException(WebDriverException):
     http_status = 404
     status_code = "no such element"
index ea0c793..18a3d52 100644 (file)
@@ -16,6 +16,10 @@ class Encoder(json.JSONEncoder):
             return [self.default(x) for x in obj]
         elif isinstance(obj, webdriver.Element):
             return {webdriver.Element.identifier: obj.id}
+        elif isinstance(obj, webdriver.Frame):
+            return {webdriver.Frame.identifier: obj.id}
+        elif isinstance(obj, webdriver.Window):
+            return {webdriver.Frame.identifier: obj.id}
         return super(Encoder, self).default(obj)
 
 
@@ -30,6 +34,10 @@ class Decoder(json.JSONDecoder):
             return [self.object_hook(x) for x in payload]
         elif isinstance(payload, dict) and webdriver.Element.identifier in payload:
             return webdriver.Element.from_json(payload, self.session)
+        elif isinstance(payload, dict) and webdriver.Frame.identifier in payload:
+            return webdriver.Frame.from_json(payload, self.session)
+        elif isinstance(payload, dict) and webdriver.Window.identifier in payload:
+            return webdriver.Window.from_json(payload, self.session)
         elif isinstance(payload, dict):
             return {k: self.object_hook(v) for k, v in payload.iteritems()}
         return payload
index 960cb37..619b3fa 100644 (file)
@@ -50,8 +50,9 @@ class HTTPWireProtocol(object):
     Transports messages (commands and responses) over the WebDriver
     wire protocol.
 
-    Complex objects, such as ``webdriver.Element``, are by default
-    not marshaled to enable use of `session.transport.send` in WPT tests::
+    Complex objects, such as ``webdriver.Element``, ``webdriver.Frame``,
+    and ``webdriver.Window`` are by default not marshaled to enable
+    use of `session.transport.send` in WPT tests::
 
         session = webdriver.Session("127.0.0.1", 4444)
         response = transport.send("GET", "element/active", None)
@@ -100,8 +101,9 @@ class HTTPWireProtocol(object):
 
         The request `body` must be JSON serialisable unless a
         custom `encoder` has been provided.  This means complex
-        objects such as ``webdriver.Element`` are not automatically
-        made into JSON.  This behaviour is, however, provided by
+        objects such as ``webdriver.Element``, ``webdriver.Frame``,
+        and `webdriver.Window`` are not automatically made
+        into JSON.  This behaviour is, however, provided by
         ``webdriver.protocol.Encoder``, should you want it.
 
         Similarly, the response body is returned au natural
@@ -144,6 +146,7 @@ class HTTPWireProtocol(object):
 
         if headers is None:
             headers = {}
+        headers.update({'Connection': 'keep-alive'})
 
         url = self.url(uri)
 
index 8917922..76e496d 100644 (file)
@@ -233,4 +233,4 @@ The web-platform-test harness knows about several keys:
 `refurl`
   The reference url for reftests.
 
-.. _`web-platform-tests testsuite`: https://github.com/w3c/web-platform-tests
+.. _`web-platform-tests testsuite`: https://github.com/web-platform-tests/wpt
index b58f313..c2d4fd5 100644 (file)
@@ -12,8 +12,8 @@
 # All configuration values have a default; values that are commented out
 # serve to show the default.
 
-import sys
-import os
+#import sys
+#import os
 
 # If extensions (or modules to document with autodoc) are in another directory,
 # add these directories to sys.path here. If the directory is relative to the
index 258cca6..5997f42 100644 (file)
@@ -32,7 +32,7 @@ a copy of the web-platform-tests repository. This can be located
 anywhere on the filesystem, but the easiest option is to put it
 under the same parent directory as the wptrunner checkout::
 
-  git clone https://github.com/w3c/web-platform-tests.git
+  git clone https://github.com/web-platform-tests/wpt.git
 
 It is also necessary to generate a web-platform-tests ``MANIFEST.json``
 file. It's recommended to also put that under the same parent directory as
@@ -220,7 +220,7 @@ url to pull from; ``branch`` the branch to sync against and
 use when checking out the tests e.g.::
 
   [web-platform-tests]
-  remote_url = https://github.com/w3c/web-platform-tests.git
+  remote_url = https://github.com/web-platform-tests/wpt.git
   branch = master
   sync_path = sync
 
index 6da3a33..fcb5922 100644 (file)
@@ -1,5 +1,5 @@
 html5lib == 1.0.1
 mozinfo == 0.10
-mozlog == 3.7
+mozlog==3.8
 mozdebug == 0.1
 urllib3[secure] == 1.22
index 0eb26e7..f1e6a85 100644 (file)
@@ -4,5 +4,6 @@ mozprocess == 0.26
 mozcrash == 1.0
 mozrunner == 7.0.0
 mozleak == 0.1
-mozinstall == 1.15
-mozdownload == 1.23
+mozinstall==1.16.0
+mozdownload==1.24
+
index f63e2c0..fa7985e 100644 (file)
@@ -33,30 +33,4 @@ deps =
      pep8-naming==0.4.1
 
 commands =
-     flake8
-
-[flake8]
-# flake8 config should be kept in sync across tools/tox.ini, tools/wpt/tox.ini, and tools/wptrunner/tox.ini
-select = E,W,F,N
-# E128: continuation line under-indented for visual indent
-# E129: visually indented line with same indent as next logical line
-# E221: multiple spaces before operator
-# E226: missing whitespace around arithmetic operator
-# E231: missing whitespace after ‘,’, ‘;’, or ‘:’
-# E251: unexpected spaces around keyword / parameter equals
-# E265: block comment should start with ‘# ‘
-# E302: expected 2 blank lines, found 0
-# E303: too many blank lines (3)
-# E305: expected 2 blank lines after end of function or class
-# E402: module level import not at top of file
-# E731: do not assign a lambda expression, use a def
-# E901: SyntaxError or IndentationError
-# W601: .has_key() is deprecated, use ‘in’
-# F401: module imported but unused
-# F403: ‘from module import *’ used; unable to detect undefined names
-# F405: name may be undefined, or defined from star imports: module
-# F841: local variable name is assigned to but never used
-# N801: class names should use CapWords convention
-# N802: function name should be lowercase
-ignore = E128,E129,E221,E226,E231,E251,E265,E302,E303,E305,E402,E731,E901,W601,F401,F403,F405,F841,N801,N802
-max-line-length = 141
+     flake8 --append-config=../flake8.ini
index 34d25f8..19462bc 100644 (file)
@@ -1,7 +1,7 @@
 [products]
 
 [web-platform-tests]
-remote_url = https://github.com/w3c/web-platform-tests.git
+remote_url = https://github.com/web-platform-tests/wpt.git
 branch = master
 sync_path = %(pwd)s/sync
 
index 6f0c49e..d8682e1 100644 (file)
@@ -6,7 +6,7 @@ a dictionary with the fields
 "browser": String indicating the Browser implementation used to launch that
            product.
 "executor": Dictionary with keys as supported test types and values as the name
-            of the Executor implemantation that will be used to run that test
+            of the Executor implementation that will be used to run that test
             type.
 "browser_kwargs": String naming function that takes product, binary,
                   prefs_root and the wptrunner.run_tests kwargs dict as arguments
@@ -25,6 +25,7 @@ module global scope.
 product_list = ["chrome",
                 "chrome_android",
                 "edge",
+                "fennec",
                 "firefox",
                 "ie",
                 "safari",
index e4c9c30..dc03ef7 100644 (file)
@@ -3,7 +3,7 @@ import platform
 import socket
 from abc import ABCMeta, abstractmethod
 
-from ..wptcommandline import require_arg
+from ..wptcommandline import require_arg  # noqa: F401
 
 here = os.path.split(__file__)[0]
 
@@ -87,7 +87,7 @@ class Browser(object):
         return {}
 
     @abstractmethod
-    def start(self, **kwargs):
+    def start(self, group_metadata, **kwargs):
         """Launch the browser object and get it into a state where is is ready to run tests"""
         pass
 
index e50c592..8b0ba45 100644 (file)
@@ -1,9 +1,9 @@
 from .base import Browser, ExecutorBrowser, require_arg
 from ..webdriver_server import ChromeDriverServer
 from ..executors import executor_kwargs as base_executor_kwargs
-from ..executors.executorselenium import (SeleniumTestharnessExecutor,
-                                          SeleniumRefTestExecutor)
-from ..executors.executorchrome import ChromeDriverWdspecExecutor
+from ..executors.executorselenium import (SeleniumTestharnessExecutor,  # noqa: F401
+                                          SeleniumRefTestExecutor)  # noqa: F401
+from ..executors.executorchrome import ChromeDriverWdspecExecutor  # noqa: F401
 
 
 __wptrunner__ = {"product": "chrome",
@@ -22,7 +22,7 @@ def check_args(**kwargs):
     require_arg(kwargs, "webdriver_binary")
 
 
-def browser_kwargs(test_type, run_info_data, **kwargs):
+def browser_kwargs(test_type, run_info_data, config, **kwargs):
     return {"binary": kwargs["binary"],
             "webdriver_binary": kwargs["webdriver_binary"],
             "webdriver_args": kwargs.get("webdriver_args")}
@@ -33,10 +33,11 @@ def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
     from selenium.webdriver import DesiredCapabilities
 
     executor_kwargs = base_executor_kwargs(test_type, server_config,
-                                           cache_manager, **kwargs)
+                                           cache_manager, run_info_data,
+                                           **kwargs)
     executor_kwargs["close_after_done"] = True
     capabilities = dict(DesiredCapabilities.CHROME.items())
-    capabilities.setdefault("chromeOptions", {})["prefs"] = {
+    capabilities.setdefault("goog:chromeOptions", {})["prefs"] = {
         "profile": {
             "default_content_setting_values": {
                 "popups": 1
@@ -45,12 +46,12 @@ def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
     }
     for (kwarg, capability) in [("binary", "binary"), ("binary_args", "args")]:
         if kwargs[kwarg] is not None:
-            capabilities["chromeOptions"][capability] = kwargs[kwarg]
+            capabilities["goog:chromeOptions"][capability] = kwargs[kwarg]
     if test_type == "testharness":
-        capabilities["chromeOptions"]["useAutomationExtension"] = False
-        capabilities["chromeOptions"]["excludeSwitches"] = ["enable-automation"]
+        capabilities["goog:chromeOptions"]["useAutomationExtension"] = False
+        capabilities["goog:chromeOptions"]["excludeSwitches"] = ["enable-automation"]
     if test_type == "wdspec":
-        capabilities["chromeOptions"]["w3c"] = True
+        capabilities["goog:chromeOptions"]["w3c"] = True
     executor_kwargs["capabilities"] = capabilities
     return executor_kwargs
 
index 985b1fe..c96cf56 100644 (file)
@@ -1,12 +1,11 @@
 import subprocess
 
-from ..config import *
 from .base import Browser, ExecutorBrowser, require_arg
 from ..webdriver_server import ChromeDriverServer
 from ..executors import executor_kwargs as base_executor_kwargs
-from ..executors.executorselenium import (SeleniumTestharnessExecutor,
-                                          SeleniumRefTestExecutor)
-from ..executors.executorchrome import ChromeDriverWdspecExecutor
+from ..executors.executorselenium import (SeleniumTestharnessExecutor,  # noqa: F401
+                                          SeleniumRefTestExecutor)  # noqa: F401
+from ..executors.executorchrome import ChromeDriverWdspecExecutor  # noqa: F401
 
 
 __wptrunner__ = {"product": "chrome_android",
@@ -27,7 +26,7 @@ def check_args(**kwargs):
     require_arg(kwargs, "webdriver_binary")
 
 
-def browser_kwargs(test_type, run_info_data, **kwargs):
+def browser_kwargs(test_type, run_info_data, config, **kwargs):
     return {"binary": kwargs["binary"],
             "webdriver_binary": kwargs["webdriver_binary"],
             "webdriver_args": kwargs.get("webdriver_args")}
@@ -43,8 +42,8 @@ def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
         server_config['ports']['ws'] + server_config['ports']['wss']
     ))
 
-    executor_kwargs = base_executor_kwargs(test_type, server_config,
-                                           cache_manager, **kwargs)
+    executor_kwargs = base_executor_kwargs(test_type, server_config, cache_manager, run_info_data,
+                                           **kwargs)
     executor_kwargs["close_after_done"] = True
     capabilities = dict(DesiredCapabilities.CHROME.items())
     capabilities["chromeOptions"] = {}
index 184ae32..ad2bb51 100644 (file)
@@ -1,9 +1,9 @@
 from .base import Browser, ExecutorBrowser, require_arg
 from ..webdriver_server import EdgeDriverServer
 from ..executors import executor_kwargs as base_executor_kwargs
-from ..executors.executorselenium import (SeleniumTestharnessExecutor,
-                                          SeleniumRefTestExecutor)
-from ..executors.executoredge import EdgeDriverWdspecExecutor
+from ..executors.executorselenium import (SeleniumTestharnessExecutor,  # noqa: F401
+                                          SeleniumRefTestExecutor)  # noqa: F401
+from ..executors.executoredge import EdgeDriverWdspecExecutor  # noqa: F401
 
 __wptrunner__ = {"product": "edge",
                  "check_args": "check_args",
@@ -16,22 +16,32 @@ __wptrunner__ = {"product": "edge",
                  "env_extras": "env_extras",
                  "env_options": "env_options"}
 
+def get_timeout_multiplier(test_type, run_info_data, **kwargs):
+    if kwargs["timeout_multiplier"] is not None:
+        return kwargs["timeout_multiplier"]
+    if test_type == "wdspec":
+        return 10
+    return 1
 
 def check_args(**kwargs):
     require_arg(kwargs, "webdriver_binary")
 
-def browser_kwargs(test_type, run_info_data, **kwargs):
+def browser_kwargs(test_type, run_info_data, config, **kwargs):
     return {"webdriver_binary": kwargs["webdriver_binary"],
-            "webdriver_args": kwargs.get("webdriver_args")}
+            "webdriver_args": kwargs.get("webdriver_args"),
+            "timeout_multiplier": get_timeout_multiplier(test_type,
+                                                         run_info_data,
+                                                         **kwargs)}
 
 def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
                     **kwargs):
-    from selenium.webdriver import DesiredCapabilities
-
     executor_kwargs = base_executor_kwargs(test_type, server_config,
-                                           cache_manager, **kwargs)
+                                           cache_manager, run_info_data, **kwargs)
     executor_kwargs["close_after_done"] = True
-    executor_kwargs["capabilities"] = dict(DesiredCapabilities.EDGE.items())
+    executor_kwargs["timeout_multiplier"] = get_timeout_multiplier(test_type,
+                                                                   run_info_data,
+                                                                   **kwargs)
+    executor_kwargs["capabilities"] = {}
     return executor_kwargs
 
 def env_extras(**kwargs):
@@ -42,14 +52,18 @@ def env_options():
 
 class EdgeBrowser(Browser):
     used_ports = set()
+    init_timeout = 60
 
-    def __init__(self, logger, webdriver_binary, webdriver_args=None):
+    def __init__(self, logger, webdriver_binary, timeout_multiplier=None, webdriver_args=None):
         Browser.__init__(self, logger)
         self.server = EdgeDriverServer(self.logger,
                                        binary=webdriver_binary,
                                        args=webdriver_args)
         self.webdriver_host = "localhost"
         self.webdriver_port = self.server.port
+        if timeout_multiplier:
+            self.init_timeout = self.init_timeout * timeout_multiplier
+
 
     def start(self, **kwargs):
         print self.server.url
diff --git a/WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/browsers/fennec.py b/WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/browsers/fennec.py
new file mode 100644 (file)
index 0000000..db271ac
--- /dev/null
@@ -0,0 +1,254 @@
+import os
+import signal
+import sys
+import tempfile
+import traceback
+
+import moznetwork
+from mozprocess import ProcessHandler
+from mozprofile import FirefoxProfile
+from mozrunner import FennecEmulatorRunner
+
+from serve.serve import make_hosts_file
+
+from .base import (get_free_port,
+                   cmd_arg,
+                   browser_command)
+from ..executors.executormarionette import MarionetteTestharnessExecutor  # noqa: F401
+from .firefox import (get_timeout_multiplier, update_properties, executor_kwargs, FirefoxBrowser)  # noqa: F401
+
+
+__wptrunner__ = {"product": "fennec",
+                 "check_args": "check_args",
+                 "browser": "FennecBrowser",
+                 "executor": {"testharness": "MarionetteTestharnessExecutor"},
+                 "browser_kwargs": "browser_kwargs",
+                 "executor_kwargs": "executor_kwargs",
+                 "env_extras": "env_extras",
+                 "env_options": "env_options",
+                 "run_info_extras": "run_info_extras",
+                 "update_properties": "update_properties"}
+
+class FennecProfile(FirefoxProfile):
+    # WPT-specific prefs are set in FennecBrowser.start()
+    FirefoxProfile.preferences.update({
+        # Make sure Shield doesn't hit the network.
+        "app.normandy.api_url": "",
+        # Increase the APZ content response timeout in tests to 1 minute.
+        "apz.content_response_timeout": 60000,
+        # Enable output of dump()
+        "browser.dom.window.dump.enabled": True,
+        # Disable safebrowsing components
+        "browser.safebrowsing.blockedURIs.enabled": False,
+        "browser.safebrowsing.downloads.enabled": False,
+        "browser.safebrowsing.passwords.enabled": False,
+        "browser.safebrowsing.malware.enabled": False,
+        "browser.safebrowsing.phishing.enabled": False,
+        # Do not restore the last open set of tabs if the browser has crashed
+        "browser.sessionstore.resume_from_crash": False,
+        # Disable Android snippets
+        "browser.snippets.enabled": False,
+        "browser.snippets.syncPromo.enabled": False,
+        "browser.snippets.firstrunHomepage.enabled": False,
+        # Do not allow background tabs to be zombified, otherwise for tests that
+        # open additional tabs, the test harness tab itself might get unloaded
+        "browser.tabs.disableBackgroundZombification": True,
+        # Disable e10s by default
+        "browser.tabs.remote.autostart": False,
+        # Don't warn when exiting the browser
+        "browser.warnOnQuit": False,
+        # Don't send Firefox health reports to the production server
+        "datareporting.healthreport.about.reportUrl": "http://%(server)s/dummy/abouthealthreport/",
+        # Automatically unload beforeunload alerts
+        "dom.disable_beforeunload": True,
+        # Disable the ProcessHangMonitor
+        "dom.ipc.reportProcessHangs": False,
+        # No slow script dialogs
+        "dom.max_chrome_script_run_time": 0,
+        "dom.max_script_run_time": 0,
+        # Make sure opening about:addons won"t hit the network
+        "extensions.webservice.discoverURL": "http://%(server)s/dummy/discoveryURL",
+        # No hang monitor
+        "hangmonitor.timeout": 0,
+
+        "javascript.options.showInConsole": True,
+        # Ensure blocklist updates don't hit the network
+        "services.settings.server": "http://%(server)s/dummy/blocklist/",
+        # Disable password capture, so that tests that include forms aren"t
+        # influenced by the presence of the persistent doorhanger notification
+        "signon.rememberSignons": False,
+    })
+
+
+def check_args(**kwargs):
+    pass
+
+def browser_kwargs(test_type, run_info_data, config, **kwargs):
+    return {"package_name": kwargs["package_name"],
+            "device_serial": kwargs["device_serial"],
+            "prefs_root": kwargs["prefs_root"],
+            "extra_prefs": kwargs["extra_prefs"],
+            "test_type": test_type,
+            "debug_info": kwargs["debug_info"],
+            "symbols_path": kwargs["symbols_path"],
+            "stackwalk_binary": kwargs["stackwalk_binary"],
+            "certutil_binary": kwargs["certutil_binary"],
+            "ca_certificate_path": kwargs["ssl_env"].ca_cert_path(),
+            "stackfix_dir": kwargs["stackfix_dir"],
+            "binary_args": kwargs["binary_args"],
+            "timeout_multiplier": get_timeout_multiplier(test_type,
+                                                         run_info_data,
+                                                         **kwargs),
+            "leak_check": kwargs["leak_check"],
+            "stylo_threads": kwargs["stylo_threads"],
+            "chaos_mode_flags": kwargs["chaos_mode_flags"],
+            "config": config}
+
+
+def env_extras(**kwargs):
+    return []
+
+
+def run_info_extras(**kwargs):
+    return {"e10s": False,
+            "headless": False}
+
+
+def env_options():
+    # The server host is set to public localhost IP so that resources can be accessed
+    # from Android emulator
+    return {"server_host": moznetwork.get_ip(),
+            "bind_address": False,
+            "supports_debugger": True}
+
+
+def write_hosts_file(config, device):
+    new_hosts = make_hosts_file(config, moznetwork.get_ip())
+    current_hosts = device.get_file("/etc/hosts")
+    if new_hosts == current_hosts:
+        return
+    hosts_fd, hosts_path = tempfile.mkstemp()
+    try:
+        with os.fdopen(hosts_fd, "w") as f:
+            f.write(new_hosts)
+        device.remount()
+        device.push(hosts_path, "/etc/hosts")
+    finally:
+        os.remove(hosts_path)
+
+
+class FennecBrowser(FirefoxBrowser):
+    used_ports = set()
+    init_timeout = 300
+    shutdown_timeout = 60
+
+    def __init__(self, logger, prefs_root, test_type, package_name=None,
+                 device_serial="emulator-5444", **kwargs):
+        FirefoxBrowser.__init__(self, logger, None, prefs_root, test_type, **kwargs)
+        self._package_name = package_name
+        self.device_serial = device_serial
+
+    @property
+    def package_name(self):
+        """
+        Name of app to run on emulator.
+        """
+        if self._package_name is None:
+            self._package_name = "org.mozilla.fennec"
+            user = os.getenv("USER")
+            if user:
+                self._package_name += "_" + user
+        return self._package_name
+
+    def start(self, **kwargs):
+        if self.marionette_port is None:
+            self.marionette_port = get_free_port(2828, exclude=self.used_ports)
+            self.used_ports.add(self.marionette_port)
+
+        env = {}
+        env["MOZ_CRASHREPORTER"] = "1"
+        env["MOZ_CRASHREPORTER_SHUTDOWN"] = "1"
+        env["MOZ_DISABLE_NONLOCAL_CONNECTIONS"] = "1"
+        env["STYLO_THREADS"] = str(self.stylo_threads)
+        if self.chaos_mode_flags is not None:
+            env["MOZ_CHAOSMODE"] = str(self.chaos_mode_flags)
+
+        preferences = self.load_prefs()
+
+        self.profile = FennecProfile(preferences=preferences)
+        self.profile.set_preferences({"marionette.port": self.marionette_port,
+                                      "dom.disable_open_during_load": False,
+                                      "places.history.enabled": False,
+                                      "dom.send_after_paint_to_content": True,
+                                      "network.preload": True})
+
+        if self.leak_check and kwargs.get("check_leaks", True):
+            self.leak_report_file = os.path.join(self.profile.profile, "runtests_leaks.log")
+            if os.path.exists(self.leak_report_file):
+                os.remove(self.leak_report_file)
+            env["XPCOM_MEM_BLOAT_LOG"] = self.leak_report_file
+        else:
+            self.leak_report_file = None
+
+        if self.ca_certificate_path is not None:
+            self.setup_ssl()
+
+        debug_args, cmd = browser_command(self.package_name,
+                                          self.binary_args if self.binary_args else [] +
+                                          [cmd_arg("marionette"), "about:blank"],
+                                          self.debug_info)
+
+        self.runner = FennecEmulatorRunner(app=self.package_name,
+                                           profile=self.profile,
+                                           cmdargs=cmd[1:],
+                                           env=env,
+                                           symbols_path=self.symbols_path,
+                                           serial=self.device_serial,
+                                           # TODO - choose appropriate log dir
+                                           logdir=os.getcwd(),
+                                           process_class=ProcessHandler,
+                                           process_args={"processOutputLine": [self.on_output]})
+
+        self.logger.debug("Starting Fennec")
+        # connect to a running emulator
+        self.runner.device.connect()
+
+        write_hosts_file(self.config, self.runner.device.device)
+
+        self.runner.start(debug_args=debug_args, interactive=self.debug_info and self.debug_info.interactive)
+
+        # gecko_log comes from logcat when running with device/emulator
+        logcat_args = {
+            "filterspec": "Gecko",
+            "serial": self.runner.device.app_ctx.device_serial
+        }
+        # TODO setting logcat_args["logfile"] yields an almost empty file
+        # even without filterspec
+        logcat_args["stream"] = sys.stdout
+        self.runner.device.start_logcat(**logcat_args)
+
+        self.runner.device.device.forward(
+            local="tcp:{}".format(self.marionette_port),
+            remote="tcp:{}".format(self.marionette_port))
+
+        self.logger.debug("Fennec Started")
+
+    def stop(self, force=False):
+        if self.runner is not None:
+            try:
+                if self.runner.device.connected:
+                    self.runner.device.device.remove_forwards(
+                        "tcp:{}".format(self.marionette_port))
+            except Exception:
+                traceback.print_exception(*sys.exc_info())
+            # We assume that stopping the runner prompts the
+            # browser to shut down. This allows the leak log to be written
+            for clean, stop_f in [(True, lambda: self.runner.wait(self.shutdown_timeout)),
+                                  (False, lambda: self.runner.stop(signal.SIGTERM)),
+                                  (False, lambda: self.runner.stop(signal.SIGKILL))]:
+                if not force or not clean:
+                    retcode = stop_f()
+                    if retcode is not None:
+                        self.logger.info("Browser exited with return code %s" % retcode)
+                        break
+        self.logger.debug("stopped")
index 68017ab..04f2ce8 100644 (file)
@@ -4,14 +4,13 @@ import platform
 import signal
 import subprocess
 import sys
-import tempfile
 
 import mozinfo
 import mozleak
 from mozprocess import ProcessHandler
 from mozprofile import FirefoxProfile, Preferences
 from mozrunner import FirefoxRunner
-from mozrunner.utils import get_stack_fixer_function
+from mozrunner.utils import test_environment, get_stack_fixer_function
 from mozcrash import mozcrash
 
 from .base import (get_free_port,
@@ -21,9 +20,9 @@ from .base import (get_free_port,
                    cmd_arg,
                    browser_command)
 from ..executors import executor_kwargs as base_executor_kwargs
-from ..executors.executormarionette import (MarionetteTestharnessExecutor,
-                                            MarionetteRefTestExecutor,
-                                            MarionetteWdspecExecutor)
+from ..executors.executormarionette import (MarionetteTestharnessExecutor,  # noqa: F401
+                                            MarionetteRefTestExecutor,  # noqa: F401
+                                            MarionetteWdspecExecutor)  # noqa: F401
 
 
 here = os.path.join(os.path.split(__file__)[0])
@@ -55,6 +54,8 @@ def get_timeout_multiplier(test_type, run_info_data, **kwargs):
             return 4
         else:
             return 3
+    elif run_info_data["os"] == "android":
+        return 4
     return 1
 
 
@@ -62,7 +63,7 @@ def check_args(**kwargs):
     require_arg(kwargs, "binary")
 
 
-def browser_kwargs(test_type, run_info_data, **kwargs):
+def browser_kwargs(test_type, run_info_data, config, **kwargs):
     return {"binary": kwargs["binary"],
             "prefs_root": kwargs["prefs_root"],
             "extra_prefs": kwargs["extra_prefs"],
@@ -71,7 +72,7 @@ def browser_kwargs(test_type, run_info_data, **kwargs):
             "symbols_path": kwargs["symbols_path"],
             "stackwalk_binary": kwargs["stackwalk_binary"],
             "certutil_binary": kwargs["certutil_binary"],
-            "ca_certificate_path": kwargs["ssl_env"].ca_cert_path(),
+            "ca_certificate_path": config.ssl_config["ca_cert_path"],
             "e10s": kwargs["gecko_e10s"],
             "stackfix_dir": kwargs["stackfix_dir"],
             "binary_args": kwargs["binary_args"],
@@ -79,19 +80,22 @@ def browser_kwargs(test_type, run_info_data, **kwargs):
                                                          run_info_data,
                                                          **kwargs),
             "leak_check": kwargs["leak_check"],
+            "asan": run_info_data.get("asan"),
             "stylo_threads": kwargs["stylo_threads"],
             "chaos_mode_flags": kwargs["chaos_mode_flags"],
-            "config": kwargs["config"]}
+            "config": config}
 
 
 def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
                     **kwargs):
     executor_kwargs = base_executor_kwargs(test_type, server_config,
-                                           cache_manager, **kwargs)
+                                           cache_manager, run_info_data,
+                                           **kwargs)
     executor_kwargs["close_after_done"] = test_type != "reftest"
     executor_kwargs["timeout_multiplier"] = get_timeout_multiplier(test_type,
                                                                    run_info_data,
                                                                    **kwargs)
+    executor_kwargs["e10s"] = run_info_data["e10s"]
     capabilities = {}
     if test_type == "reftest":
         executor_kwargs["reftest_internal"] = kwargs["reftest_internal"]
@@ -110,6 +114,8 @@ def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
         capabilities["acceptInsecureCerts"] = True
     if capabilities:
         executor_kwargs["capabilities"] = capabilities
+    executor_kwargs["debug"] = run_info_data["debug"]
+    executor_kwargs["ccov"] = run_info_data.get("ccov", False)
     return executor_kwargs
 
 
@@ -122,7 +128,7 @@ def env_options():
     # network.dns.localDomains preference set below) to resolve the test
     # domains to localhost without relying on the network stack.
     #
-    # https://github.com/w3c/web-platform-tests/pull/9480
+    # https://github.com/web-platform-tests/wpt/pull/9480
     return {"server_host": "127.0.0.1",
             "bind_address": False,
             "supports_debugger": True}
@@ -130,6 +136,8 @@ def env_options():
 
 def run_info_extras(**kwargs):
     return {"e10s": kwargs["gecko_e10s"],
+            "wasm": kwargs.get("wasm", True),
+            "verify": kwargs["verify"],
             "headless": "MOZ_HEADLESS" in os.environ}
 
 
@@ -140,14 +148,14 @@ def update_properties():
 
 class FirefoxBrowser(Browser):
     used_ports = set()
-    init_timeout = 60
-    shutdown_timeout = 60
+    init_timeout = 70
+    shutdown_timeout = 70
 
     def __init__(self, logger, binary, prefs_root, test_type, extra_prefs=None, debug_info=None,
                  symbols_path=None, stackwalk_binary=None, certutil_binary=None,
                  ca_certificate_path=None, e10s=False, stackfix_dir=None,
-                 binary_args=None, timeout_multiplier=None, leak_check=False, stylo_threads=1,
-                 chaos_mode_flags=None, config=None):
+                 binary_args=None, timeout_multiplier=None, leak_check=False, asan=False,
+                 stylo_threads=1, chaos_mode_flags=None, config=None):
         Browser.__init__(self, logger)
         self.binary = binary
         self.prefs_root = prefs_root
@@ -173,23 +181,38 @@ class FirefoxBrowser(Browser):
         if timeout_multiplier:
             self.init_timeout = self.init_timeout * timeout_multiplier
 
-        self.leak_report_file = None
+        self.asan = asan
+        self.lsan_allowed = None
         self.leak_check = leak_check
+        self.leak_report_file = None
+        self.lsan_handler = None
         self.stylo_threads = stylo_threads
         self.chaos_mode_flags = chaos_mode_flags
 
     def settings(self, test):
-        return {"check_leaks": self.leak_check and not test.leaks}
+        self.lsan_allowed = test.lsan_allowed
+        return {"check_leaks": self.leak_check and not test.leaks,
+                "lsan_allowed": test.lsan_allowed}
+
+    def start(self, group_metadata=None, **kwargs):
+        if group_metadata is None:
+            group_metadata = {}
 
-    def start(self, **kwargs):
         if self.marionette_port is None:
             self.marionette_port = get_free_port(2828, exclude=self.used_ports)
             self.used_ports.add(self.marionette_port)
 
-        env = os.environ.copy()
-        env["MOZ_CRASHREPORTER"] = "1"
-        env["MOZ_CRASHREPORTER_SHUTDOWN"] = "1"
-        env["MOZ_DISABLE_NONLOCAL_CONNECTIONS"] = "1"
+        if self.asan:
+            print "Setting up LSAN"
+            self.lsan_handler = mozleak.LSANLeaks(self.logger,
+                                                  scope=group_metadata.get("scope", "/"),
+                                                  allowed=self.lsan_allowed)
+
+        env = test_environment(xrePath=os.path.dirname(self.binary),
+                               debugger=self.debug_info is not None,
+                               log=self.logger,
+                               lsanPath=self.prefs_root)
+
         env["STYLO_THREADS"] = str(self.stylo_threads)
         if self.chaos_mode_flags is not None:
             env["MOZ_CHAOSMODE"] = str(self.chaos_mode_flags)
@@ -210,8 +233,8 @@ class FirefoxBrowser(Browser):
         if self.test_type == "reftest":
             self.profile.set_preferences({"layout.interruptible-reflow.enabled": False})
 
-        if self.leak_check and kwargs.get("check_leaks", True):
-            self.leak_report_file = os.path.join(self.profile.profile, "runtests_leaks.log")
+        if self.leak_check:
+            self.leak_report_file = os.path.join(self.profile.profile, "runtests_leaks_%s.log" % os.getpid())
             if os.path.exists(self.leak_report_file):
                 os.remove(self.leak_report_file)
             env["XPCOM_MEM_BLOAT_LOG"] = self.leak_report_file
@@ -247,16 +270,21 @@ class FirefoxBrowser(Browser):
         prefs = Preferences()
 
         pref_paths = []
-        prefs_general = os.path.join(self.prefs_root, 'prefs_general.js')
-        if os.path.isfile(prefs_general):
-            # Old preference file used in Firefox 60 and earlier (remove when no longer supported)
-            pref_paths.append(prefs_general)
 
         profiles = os.path.join(self.prefs_root, 'profiles.json')
         if os.path.isfile(profiles):
             with open(profiles, 'r') as fh:
                 for name in json.load(fh)['web-platform-tests']:
                     pref_paths.append(os.path.join(self.prefs_root, name, 'user.js'))
+        else:
+            # Old preference files used before the creation of profiles.json (remove when no longer supported)
+            legacy_pref_paths = (
+                os.path.join(self.prefs_root, 'prefs_general.js'),   # Used in Firefox 60 and below
+                os.path.join(self.prefs_root, 'common', 'user.js'),  # Used in Firefox 61
+            )
+            for path in legacy_pref_paths:
+                if os.path.isfile(path):
+                    pref_paths.append(path)
 
         for path in pref_paths:
             if os.path.exists(path):
@@ -285,24 +313,26 @@ class FirefoxBrowser(Browser):
             except OSError:
                 # This can happen on Windows if the process is already dead
                 pass
+        self.process_leaks()
         self.logger.debug("stopped")
 
     def process_leaks(self):
         self.logger.debug("PROCESS LEAKS %s" % self.leak_report_file)
-        if self.leak_report_file is None:
-            return
-        mozleak.process_leak_log(
-            self.leak_report_file,
-            leak_thresholds={
-                "default": 0,
-                "tab": 10000,  # See dependencies of bug 1051230.
-                # GMP rarely gets a log, but when it does, it leaks a little.
-                "geckomediaplugin": 20000,
-            },
-            ignore_missing_leaks=["geckomediaplugin"],
-            log=self.logger,
-            stack_fixer=self.stack_fixer
-        )
+        if self.lsan_handler:
+            self.lsan_handler.process()
+        if self.leak_report_file is not None:
+            mozleak.process_leak_log(
+                self.leak_report_file,
+                leak_thresholds={
+                    "default": 0,
+                    "tab": 10000,  # See dependencies of bug 1051230.
+                    # GMP rarely gets a log, but when it does, it leaks a little.
+                    "geckomediaplugin": 20000,
+                },
+                ignore_missing_leaks=["geckomediaplugin"],
+                log=self.logger,
+                stack_fixer=self.stack_fixer
+            )
 
     def pid(self):
         if self.runner.process_handler is None:
@@ -321,18 +351,20 @@ class FirefoxBrowser(Browser):
             data = line.decode("utf8", "replace")
             if self.stack_fixer:
                 data = self.stack_fixer(data)
-            self.logger.process_output(self.pid(),
-                                      data,
-                                      command=" ".join(self.runner.command))
+            if self.lsan_handler:
+                data = self.lsan_handler.log(data)
+            if data is not None:
+                self.logger.process_output(self.pid(),
+                                           data,
+                                           command=" ".join(self.runner.command))
 
     def is_alive(self):
         if self.runner:
             return self.runner.is_running()
         return False
 
-    def cleanup(self):
-        self.stop()
-        self.process_leaks()
+    def cleanup(self, force=False):
+        self.stop(force)
 
     def executor_browser(self):
         assert self.marionette_port is not None
@@ -370,7 +402,7 @@ class FirefoxBrowser(Browser):
         # local copy of certutil
         # TODO: Maybe only set this if certutil won't launch?
         env = os.environ.copy()
-        certutil_dir = os.path.dirname(self.binary)
+        certutil_dir = os.path.dirname(self.binary or self.certutil_binary)
         if mozinfo.isMac:
             env_var = "DYLD_LIBRARY_PATH"
         elif mozinfo.isUnix:
index 97d96ec..a0730f8 100644 (file)
@@ -1,9 +1,9 @@
 from .base import Browser, ExecutorBrowser, require_arg
 from ..webdriver_server import InternetExplorerDriverServer
 from ..executors import executor_kwargs as base_executor_kwargs
-from ..executors.executorselenium import (SeleniumTestharnessExecutor,
-                                          SeleniumRefTestExecutor)
-from ..executors.executorinternetexplorer import InternetExplorerDriverWdspecExecutor
+from ..executors.executorselenium import (SeleniumTestharnessExecutor,  # noqa: F401
+                                          SeleniumRefTestExecutor)  # noqa: F401
+from ..executors.executorinternetexplorer import InternetExplorerDriverWdspecExecutor  # noqa: F401
 
 __wptrunner__ = {"product": "ie",
                  "check_args": "check_args",
@@ -20,20 +20,18 @@ __wptrunner__ = {"product": "ie",
 def check_args(**kwargs):
     require_arg(kwargs, "webdriver_binary")
 
-def browser_kwargs(test_type, run_info_data, **kwargs):
+def browser_kwargs(test_type, run_info_data, config, **kwargs):
     return {"webdriver_binary": kwargs["webdriver_binary"],
             "webdriver_args": kwargs.get("webdriver_args")}
 
 def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
                     **kwargs):
-    from selenium.webdriver import DesiredCapabilities
-
     options = {}
     options["requireWindowFocus"] = True
     capabilities = {}
     capabilities["se:ieOptions"] = options
     executor_kwargs = base_executor_kwargs(test_type, server_config,
-                                           cache_manager, **kwargs)
+                                           cache_manager, run_info_data, **kwargs)
     executor_kwargs["close_after_done"] = True
     executor_kwargs["capabilities"] = capabilities
     return executor_kwargs
index bfacfa5..c5aeaaa 100644 (file)
@@ -1,9 +1,9 @@
 from .base import Browser, ExecutorBrowser, require_arg
 from ..webdriver_server import OperaDriverServer
 from ..executors import executor_kwargs as base_executor_kwargs
-from ..executors.executorselenium import (SeleniumTestharnessExecutor,
-                                          SeleniumRefTestExecutor)
-from ..executors.executoropera import OperaDriverWdspecExecutor
+from ..executors.executorselenium import (SeleniumTestharnessExecutor,  # noqa: F401
+                                          SeleniumRefTestExecutor)  # noqa: F401
+from ..executors.executoropera import OperaDriverWdspecExecutor  # noqa: F401
 
 
 __wptrunner__ = {"product": "opera",
@@ -22,7 +22,7 @@ def check_args(**kwargs):
     require_arg(kwargs, "webdriver_binary")
 
 
-def browser_kwargs(test_type, run_info_data, **kwargs):
+def browser_kwargs(test_type, run_info_data, config, **kwargs):
     return {"binary": kwargs["binary"],
             "webdriver_binary": kwargs["webdriver_binary"],
             "webdriver_args": kwargs.get("webdriver_args")}
@@ -33,7 +33,7 @@ def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
     from selenium.webdriver import DesiredCapabilities
 
     executor_kwargs = base_executor_kwargs(test_type, server_config,
-                                           cache_manager, **kwargs)
+                                           cache_manager, run_info_data, **kwargs)
     executor_kwargs["close_after_done"] = True
     capabilities = dict(DesiredCapabilities.OPERA.items())
     capabilities.setdefault("operaOptions", {})["prefs"] = {
index 3b99d22..670098e 100644 (file)
@@ -1,15 +1,17 @@
 from .base import Browser, ExecutorBrowser, require_arg
 from ..webdriver_server import SafariDriverServer
 from ..executors import executor_kwargs as base_executor_kwargs
-from ..executors.executorselenium import (SeleniumTestharnessExecutor,
-                                          SeleniumRefTestExecutor)
+from ..executors.executorselenium import (SeleniumTestharnessExecutor,  # noqa: F401
+                                          SeleniumRefTestExecutor)  # noqa: F401
+from ..executors.executorsafari import SafariDriverWdspecExecutor  # noqa: F401
 
 
 __wptrunner__ = {"product": "safari",
                  "check_args": "check_args",
                  "browser": "SafariBrowser",
                  "executor": {"testharness": "SeleniumTestharnessExecutor",
-                              "reftest": "SeleniumRefTestExecutor"},
+                              "reftest": "SeleniumRefTestExecutor",
+                              "wdspec": "SafariDriverWdspecExecutor"},
                  "browser_kwargs": "browser_kwargs",
                  "executor_kwargs": "executor_kwargs",
                  "env_extras": "env_extras",
@@ -20,19 +22,17 @@ def check_args(**kwargs):
     require_arg(kwargs, "webdriver_binary")
 
 
-def browser_kwargs(test_type, run_info_data, **kwargs):
+def browser_kwargs(test_type, run_info_data, config, **kwargs):
     return {"webdriver_binary": kwargs["webdriver_binary"],
             "webdriver_args": kwargs.get("webdriver_args")}
 
 
 def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
                     **kwargs):
-    from selenium.webdriver import DesiredCapabilities
-
     executor_kwargs = base_executor_kwargs(test_type, server_config,
-                                           cache_manager, **kwargs)
+                                           cache_manager, run_info_data, **kwargs)
     executor_kwargs["close_after_done"] = True
-    executor_kwargs["capabilities"] = dict(DesiredCapabilities.SAFARI.items())
+    executor_kwargs["capabilities"] = {}
     if kwargs["binary"] is not None:
         raise ValueError("Safari doesn't support setting executable location")
 
index 709af41..9aa484a 100644 (file)
@@ -15,10 +15,13 @@ import requests
 
 from .base import Browser, ExecutorBrowser, require_arg
 from ..executors import executor_kwargs as base_executor_kwargs
-from ..executors.executorselenium import (SeleniumTestharnessExecutor,
-                                          SeleniumRefTestExecutor)
+from ..executors.executorselenium import (SeleniumTestharnessExecutor,  # noqa: F401
+                                          SeleniumRefTestExecutor)  # noqa: F401
 
 here = os.path.split(__file__)[0]
+# Number of seconds to wait between polling operations when detecting status of
+# Sauce Connect sub-process.
+sc_poll_period = 1
 
 
 __wptrunner__ = {"product": "sauce",
@@ -92,7 +95,7 @@ def check_args(**kwargs):
     require_arg(kwargs, "sauce_key")
 
 
-def browser_kwargs(test_type, run_info_data, **kwargs):
+def browser_kwargs(test_type, run_info_data, config, **kwargs):
     sauce_config = get_sauce_config(**kwargs)
 
     return {"sauce_config": sauce_config}
@@ -101,7 +104,7 @@ def browser_kwargs(test_type, run_info_data, **kwargs):
 def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
                     **kwargs):
     executor_kwargs = base_executor_kwargs(test_type, server_config,
-                                           cache_manager, **kwargs)
+                                           cache_manager, run_info_data, **kwargs)
 
     executor_kwargs["capabilities"] = get_capabilities(**kwargs)
 
@@ -170,31 +173,24 @@ class SauceConnect():
         ])
 
         # Timeout config vars
-        each_sleep_secs = 1
         max_wait = 30
-        kill_wait = 5
 
         tot_wait = 0
         while not os.path.exists('./sauce_is_ready') and self.sc_process.poll() is None:
             if tot_wait >= max_wait:
-                self.sc_process.terminate()
-                while self.sc_process.poll() is None:
-                    time.sleep(each_sleep_secs)
-                    tot_wait += each_sleep_secs
-                    if tot_wait >= (max_wait + kill_wait):
-                        self.sc_process.kill()
-                        break
+                self.quit()
+
                 raise SauceException("Sauce Connect Proxy was not ready after %d seconds" % tot_wait)
 
-            time.sleep(each_sleep_secs)
-            tot_wait += each_sleep_secs
+            time.sleep(sc_poll_period)
+            tot_wait += sc_poll_period
 
         if self.sc_process.returncode is not None:
             raise SauceException("Unable to start Sauce Connect Proxy. Process exited with code %s", self.sc_process.returncode)
 
     def __exit__(self, exc_type, exc_val, exc_tb):
         self.env_config = None
-        self.sc_process.terminate()
+        self.quit()
         if self.temp_dir and os.path.exists(self.temp_dir):
             try:
                 shutil.rmtree(self.temp_dir)
@@ -208,6 +204,23 @@ class SauceConnect():
         with open(os.path.join(here, 'sauce_setup', file_name), 'rb') as f:
             requests.post(url, data=f, auth=auth)
 
+    def quit(self):
+        """The Sauce Connect process may be managing an active "tunnel" to the
+        Sauce Labs service. Issue a request to the process to close any tunnels
+        and exit. If this does not occur within 5 seconds, force the process to
+        close."""
+        kill_wait = 5
+        tot_wait = 0
+        self.sc_process.terminate()
+
+        while self.sc_process.poll() is None:
+            time.sleep(sc_poll_period)
+            tot_wait += sc_poll_period
+
+            if tot_wait >= kill_wait:
+                self.sc_process.kill()
+                break
+
 
 class SauceException(Exception):
     pass
@@ -220,7 +233,7 @@ class SauceBrowser(Browser):
         Browser.__init__(self, logger)
         self.sauce_config = sauce_config
 
-    def start(self):
+    def start(self, **kwargs):
         pass
 
     def stop(self, force=False):
index 9d0878e..1a3e6fe 100644 (file)
@@ -5,5 +5,5 @@ reg add "HKCU\Software\Classes\Local Settings\Software\Microsoft\Windows\Current
 REM Download and install the Ahem font
 REM - https://wiki.saucelabs.com/display/DOCS/Downloading+Files+to+a+Sauce+Labs+Virtual+Machine+Prior+to+Testing
 REM - https://superuser.com/questions/201896/how-do-i-install-a-font-from-the-windows-command-prompt
-bitsadmin.exe /transfer "JobName" https://github.com/w3c/web-platform-tests/raw/master/fonts/Ahem.ttf "%WINDIR%\Fonts\Ahem.ttf"
+bitsadmin.exe /transfer "JobName" https://github.com/web-platform-tests/wpt/raw/master/fonts/Ahem.ttf "%WINDIR%\Fonts\Ahem.ttf"
 reg add "HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Fonts" /v "Ahem (TrueType)" /t REG_SZ /d Ahem.ttf /f
index 06c48bd..39390e6 100644 (file)
@@ -1,3 +1,3 @@
 #!/bin/bash
-curl https://raw.githubusercontent.com/w3c/web-platform-tests/master/fonts/Ahem.ttf > ~/Library/Fonts/Ahem.ttf
+curl https://raw.githubusercontent.com/web-platform-tests/wpt/master/fonts/Ahem.ttf > ~/Library/Fonts/Ahem.ttf
 defaults write com.apple.Safari com.apple.Safari.ContentPageGroupIdentifier.WebKit2JavaScriptCanOpenWindowsAutomatically -bool true
index dd54fb0..1a357a3 100644 (file)
@@ -2,7 +2,7 @@ import os
 
 from .base import NullBrowser, ExecutorBrowser, require_arg
 from ..executors import executor_kwargs as base_executor_kwargs
-from ..executors.executorservo import ServoTestharnessExecutor, ServoRefTestExecutor, ServoWdspecExecutor
+from ..executors.executorservo import ServoTestharnessExecutor, ServoRefTestExecutor, ServoWdspecExecutor  # noqa: F401
 
 here = os.path.join(os.path.split(__file__)[0])
 
@@ -27,20 +27,20 @@ def check_args(**kwargs):
     require_arg(kwargs, "binary")
 
 
-def browser_kwargs(test_type, run_info_data, **kwargs):
+def browser_kwargs(test_type, run_info_data, config, **kwargs):
     return {
         "binary": kwargs["binary"],
         "debug_info": kwargs["debug_info"],
         "binary_args": kwargs["binary_args"],
         "user_stylesheets": kwargs.get("user_stylesheets"),
-        "ca_certificate_path": kwargs["ssl_env"].ca_cert_path(),
+        "ca_certificate_path": config.ssl_config["ca_cert_path"],
     }
 
 
 def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
                     **kwargs):
     rv = base_executor_kwargs(test_type, server_config,
-                              cache_manager, **kwargs)
+                              cache_manager, run_info_data, **kwargs)
     rv["pause_after_test"] = kwargs["pause_after_test"]
     if test_type == "wdspec":
         rv["capabilities"] = {}
index f2ee00a..aa17eba 100644 (file)
@@ -1,5 +1,4 @@
 import os
-import shutil
 import subprocess
 import tempfile
 
@@ -9,8 +8,8 @@ from serve.serve import make_hosts_file
 
 from .base import Browser, require_arg, get_free_port, browser_command, ExecutorBrowser
 from ..executors import executor_kwargs as base_executor_kwargs
-from ..executors.executorservodriver import (ServoWebDriverTestharnessExecutor,
-                                             ServoWebDriverRefTestExecutor)
+from ..executors.executorservodriver import (ServoWebDriverTestharnessExecutor,  # noqa: F401
+                                             ServoWebDriverRefTestExecutor)  # noqa: F401
 
 here = os.path.join(os.path.split(__file__)[0])
 
@@ -34,17 +33,19 @@ def check_args(**kwargs):
     require_arg(kwargs, "binary")
 
 
-def browser_kwargs(test_type, run_info_data, **kwargs):
+def browser_kwargs(test_type, run_info_data, config, **kwargs):
     return {
         "binary": kwargs["binary"],
+        "binary_args": kwargs["binary_args"],
         "debug_info": kwargs["debug_info"],
+        "server_config": config.ssl_config,
         "user_stylesheets": kwargs.get("user_stylesheets"),
     }
 
 
 def executor_kwargs(test_type, server_config, cache_manager, run_info_data, **kwargs):
     rv = base_executor_kwargs(test_type, server_config,
-                              cache_manager, **kwargs)
+                              cache_manager, run_info_data, **kwargs)
     return rv
 
 
@@ -71,16 +72,19 @@ def write_hosts_file(config):
 
 class ServoWebDriverBrowser(Browser):
     used_ports = set()
+    init_timeout = 300  # Large timeout for cases where we're booting an Android emulator
 
     def __init__(self, logger, binary, debug_info=None, webdriver_host="127.0.0.1",
-                 user_stylesheets=None):
+                 server_config=None, binary_args=None, user_stylesheets=None):
         Browser.__init__(self, logger)
         self.binary = binary
+        self.binary_args = binary_args or []
         self.webdriver_host = webdriver_host
         self.webdriver_port = None
         self.proc = None
         self.debug_info = debug_info
-        self.hosts_path = write_hosts_file()
+        self.hosts_path = write_hosts_file(server_config)
+        self.server_ports = server_config.ports if server_config else {}
         self.command = None
         self.user_stylesheets = user_stylesheets if user_stylesheets else []
 
@@ -91,10 +95,16 @@ class ServoWebDriverBrowser(Browser):
         env = os.environ.copy()
         env["HOST_FILE"] = self.hosts_path
         env["RUST_BACKTRACE"] = "1"
+        env["EMULATOR_REVERSE_FORWARD_PORTS"] = ",".join(
+            str(port)
+            for _protocol, ports in self.server_ports.items()
+            for port in ports
+            if port
+        )
 
         debug_args, command = browser_command(
             self.binary,
-            [
+            self.binary_args + [
                 "--hard-fail",
                 "--webdriver", str(self.webdriver_port),
                 "about:blank",
@@ -151,9 +161,10 @@ class ServoWebDriverBrowser(Browser):
 
     def cleanup(self):
         self.stop()
-        shutil.rmtree(os.path.dirname(self.hosts_file))
+        os.remove(self.hosts_path)
 
     def executor_browser(self):
         assert self.webdriver_port is not None
         return ExecutorBrowser, {"webdriver_host": self.webdriver_host,
-                                 "webdriver_port": self.webdriver_port}
+                                 "webdriver_port": self.webdriver_port,
+                                 "init_timeout": self.init_timeout}
index 7d95d43..9482f2f 100644 (file)
@@ -1,8 +1,8 @@
 from .base import Browser, ExecutorBrowser, require_arg
 from ..executors import executor_kwargs as base_executor_kwargs
-from ..executors.executorselenium import (SeleniumTestharnessExecutor,
-                                          SeleniumRefTestExecutor)
-from ..executors.executorwebkit import WebKitDriverWdspecExecutor
+from ..executors.executorselenium import (SeleniumTestharnessExecutor,  # noqa: F401
+                                          SeleniumRefTestExecutor)  # noqa: F401
+from ..executors.executorwebkit import WebKitDriverWdspecExecutor  # noqa: F401
 from ..webdriver_server import WebKitDriverServer
 
 
@@ -24,20 +24,24 @@ def check_args(**kwargs):
     require_arg(kwargs, "webkit_port")
 
 
-def browser_kwargs(test_type, run_info_data, **kwargs):
+def browser_kwargs(test_type, run_info_data, config, **kwargs):
     return {"binary": kwargs["binary"],
             "webdriver_binary": kwargs["webdriver_binary"],
             "webdriver_args": kwargs.get("webdriver_args")}
 
 
-def capabilities_for_port(webkit_port, binary, binary_args):
+def capabilities_for_port(server_config, **kwargs):
     from selenium.webdriver import DesiredCapabilities
 
-    if webkit_port == "gtk":
+    if kwargs["webkit_port"] == "gtk":
         capabilities = dict(DesiredCapabilities.WEBKITGTK.copy())
         capabilities["webkitgtk:browserOptions"] = {
-            "binary": binary,
-            "args": binary_args
+            "binary": kwargs["binary"],
+            "args": kwargs.get("binary_args", []),
+            "certificates": [
+                {"host": server_config["browser_host"],
+                 "certificateFile": kwargs["host_cert_path"]}
+            ]
         }
         return capabilities
 
@@ -47,12 +51,10 @@ def capabilities_for_port(webkit_port, binary, binary_args):
 def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
                     **kwargs):
     executor_kwargs = base_executor_kwargs(test_type, server_config,
-                                           cache_manager, **kwargs)
+                                           cache_manager, run_info_data, **kwargs)
     executor_kwargs["close_after_done"] = True
-    capabilities = capabilities_for_port(kwargs["webkit_port"],
-                                         kwargs["binary"],
-                                         kwargs.get("binary_args", []))
-    executor_kwargs["capabilities"] = capabilities
+    executor_kwargs["capabilities"] = capabilities_for_port(server_config,
+                                                            **kwargs)
     return executor_kwargs
 
 
index 309174c..ba89fb9 100644 (file)
@@ -10,7 +10,6 @@ from mozlog import get_default_logger, handlers, proxy
 
 from wptlogging import LogLevelRewriter
 from wptserve.handlers import StringHandler
-from wptserve import sslutils
 
 here = os.path.split(__file__)[0]
 repo_root = os.path.abspath(os.path.join(here, os.pardir, os.pardir, os.pardir))
@@ -19,7 +18,7 @@ serve = None
 
 
 def do_delayed_imports(logger, test_paths):
-    global serve, sslutils
+    global serve
 
     serve_root = serve_path(test_paths)
     sys.path.insert(0, serve_root)
@@ -42,34 +41,17 @@ def serve_path(test_paths):
     return test_paths["/"]["tests_path"]
 
 
-def get_ssl_kwargs(**kwargs):
-    if kwargs["ssl_type"] == "openssl":
-        args = {"openssl_binary": kwargs["openssl_binary"]}
-    elif kwargs["ssl_type"] == "pregenerated":
-        args = {"host_key_path": kwargs["host_key_path"],
-                "host_cert_path": kwargs["host_cert_path"],
-                "ca_cert_path": kwargs["ca_cert_path"]}
-    else:
-        args = {}
-    return args
-
-
-def ssl_env(logger, **kwargs):
-    ssl_env_cls = sslutils.environments[kwargs["ssl_type"]]
-    return ssl_env_cls(logger, **get_ssl_kwargs(**kwargs))
-
-
 class TestEnvironmentError(Exception):
     pass
 
 
 class TestEnvironment(object):
-    def __init__(self, test_paths, ssl_env, pause_after_test, debug_info, options, env_extras):
+    def __init__(self, test_paths, pause_after_test, debug_info, options, ssl_config, env_extras):
         """Context manager that owns the test environment i.e. the http and
         websockets servers"""
         self.test_paths = test_paths
-        self.ssl_env = ssl_env
         self.server = None
+        self.config_ctx = None
         self.config = None
         self.pause_after_test = pause_after_test
         self.test_server_port = options.pop("test_server_port", True)
@@ -80,14 +62,16 @@ class TestEnvironment(object):
         self.stash = serve.stash.StashServer()
         self.env_extras = env_extras
         self.env_extras_cms = None
-
+        self.ssl_config = ssl_config
 
     def __enter__(self):
+        self.config_ctx = self.build_config()
+
+        self.config = self.config_ctx.__enter__()
+
         self.stash.__enter__()
-        self.ssl_env.__enter__()
         self.cache_manager.__enter__()
 
-        self.config = self.load_config()
         self.setup_server_logging()
 
         assert self.env_extras_cms is None, (
@@ -101,7 +85,6 @@ class TestEnvironment(object):
             self.env_extras_cms.append(cm)
 
         self.servers = serve.start(self.config,
-                                   self.ssl_env,
                                    self.get_routes())
         if self.options.get("supports_debugger") and self.debug_info and self.debug_info.interactive:
             self.ignore_interrupts()
@@ -119,8 +102,8 @@ class TestEnvironment(object):
         self.env_extras_cms = None
 
         self.cache_manager.__exit__(exc_type, exc_val, exc_tb)
-        self.ssl_env.__exit__(exc_type, exc_val, exc_tb)
         self.stash.__exit__()
+        self.config_ctx.__exit__(exc_type, exc_val, exc_tb)
 
     def ignore_interrupts(self):
         signal.signal(signal.SIGINT, signal.SIG_IGN)
@@ -128,10 +111,10 @@ class TestEnvironment(object):
     def process_interrupts(self):
         signal.signal(signal.SIGINT, signal.SIG_DFL)
 
-    def load_config(self):
+    def build_config(self):
         override_path = os.path.join(serve_path(self.test_paths), "config.json")
 
-        config = serve.Config(override_ssl_env=self.ssl_env)
+        config = serve.ConfigBuilder()
 
         config.ports = {
             "http": [8000, 8001],
@@ -146,7 +129,10 @@ class TestEnvironment(object):
             config.update(override_obj)
 
         config.check_subdomains = False
-        config.ssl = {}
+
+        ssl_config = self.ssl_config.copy()
+        ssl_config["encrypt_after_connect"] = self.options.get("encrypt_after_connect", False)
+        config.ssl = ssl_config
 
         if "browser_host" in self.options:
             config.browser_host = self.options["browser_host"]
@@ -155,7 +141,6 @@ class TestEnvironment(object):
             config.bind_address = self.options["bind_address"]
 
         config.server_host = self.options.get("server_host", None)
-        config.ssl["encrypt_after_connect"] = self.options.get("encrypt_after_connect", False)
         config.doc_root = serve_path(self.test_paths)
 
         return config
index 24761b8..a580730 100644 (file)
@@ -1,3 +1,4 @@
+# flake8: noqa (not ideal, but nicer than adding noqa: F401 to every line!)
 from base import (executor_kwargs,
                   testharness_result_converter,
                   reftest_result_converter,
index 08030b3..1dc962c 100644 (file)
@@ -17,7 +17,8 @@ here = os.path.split(__file__)[0]
 extra_timeout = 5  # seconds
 
 
-def executor_kwargs(test_type, server_config, cache_manager, **kwargs):
+def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
+                    **kwargs):
     timeout_multiplier = kwargs["timeout_multiplier"]
     if timeout_multiplier is None:
         timeout_multiplier = 1
@@ -61,12 +62,12 @@ class TestharnessResultConverter(object):
                   2: "TIMEOUT",
                   3: "NOTRUN"}
 
-    def __call__(self, test, result):
+    def __call__(self, test, result, extra=None):
         """Convert a JSON result into a (TestResult, [SubtestResult]) tuple"""
         result_url, status, message, stack, subtest_results = result
         assert result_url == test.url, ("Got results from %s, expected %s" %
-                                      (result_url, test.url))
-        harness_result = test.result_cls(self.harness_codes[status], message)
+                                        (result_url, test.url))
+        harness_result = test.result_cls(self.harness_codes[status], message, extra=extra, stack=stack)
         return (harness_result,
                 [test.subtest_result_cls(st_name, self.test_codes[st_status], st_message, st_stack)
                  for st_name, st_status, st_message, st_stack in subtest_results])
@@ -76,8 +77,11 @@ testharness_result_converter = TestharnessResultConverter()
 
 
 def reftest_result_converter(self, test, result):
-    return (test.result_cls(result["status"], result["message"],
-                            extra=result.get("extra")), [])
+    return (test.result_cls(
+        result["status"],
+        result["message"],
+        extra=result.get("extra", {}),
+        stack=result.get("stack")), [])
 
 
 def pytest_result_converter(self, test, data):
@@ -243,7 +247,6 @@ class RefTestImplementation(object):
         return self.executor.logger
 
     def get_hash(self, test, viewport_size, dpi):
-        timeout = test.timeout * self.timeout_multiplier
         key = (test.url, viewport_size, dpi)
 
         if key not in self.screenshot_cache:
@@ -391,7 +394,7 @@ class WdspecRun(object):
         executor = threading.Thread(target=self._run)
         executor.start()
 
-        flag = self.result_flag.wait(self.timeout)
+        self.result_flag.wait(self.timeout)
         if self.result[1] is None:
             self.result = False, ("EXTERNAL-TIMEOUT", None)
 
@@ -533,7 +536,7 @@ class CallbackHandler(object):
                 raise ValueError("Unknown action %s" % action)
             try:
                 action_handler(payload)
-            except Exception as e:
+            except Exception:
                 self.logger.warning("Action %s failed" % action)
                 self.logger.warning(traceback.format_exc())
                 self._send_message("complete", "failure")
index bff509c..9df4508 100644 (file)
@@ -13,19 +13,15 @@ pytestrunner = None
 here = os.path.join(os.path.split(__file__)[0])
 
 from .base import (CallbackHandler,
-                   ExecutorException,
                    RefTestExecutor,
                    RefTestImplementation,
-                   TestExecutor,
                    TestharnessExecutor,
                    WdspecExecutor,
-                   WdspecRun,
                    WebDriverProtocol,
                    extra_timeout,
-                   testharness_result_converter,
-                   reftest_result_converter,
                    strip_server)
-from .protocol import (BaseProtocolPart,
+from .protocol import (AssertsProtocolPart,
+                       BaseProtocolPart,
                        TestharnessProtocolPart,
                        PrefsProtocolPart,
                        Protocol,
@@ -33,7 +29,8 @@ from .protocol import (BaseProtocolPart,
                        SelectorProtocolPart,
                        ClickProtocolPart,
                        SendKeysProtocolPart,
-                       TestDriverProtocolPart)
+                       TestDriverProtocolPart,
+                       CoverageProtocolPart)
 from ..testrunner import Stop
 from ..webdriver_server import GeckoDriverServer
 
@@ -293,6 +290,50 @@ class MarionetteStorageProtocolPart(StorageProtocolPart):
             self.marionette.execute_script(script)
 
 
+class MarionetteAssertsProtocolPart(AssertsProtocolPart):
+    def setup(self):
+        self.assert_count = {"chrome": 0, "content": 0}
+        self.chrome_assert_count = 0
+        self.marionette = self.parent.marionette
+
+    def get(self):
+        script = """
+        debug = Cc["@mozilla.org/xpcom/debug;1"].getService(Ci.nsIDebug2);
+        if (debug.isDebugBuild) {
+          return debug.assertionCount;
+        }
+        return 0;
+        """
+
+        def get_count(context, **kwargs):
+            try:
+                context_count = self.marionette.execute_script(script, **kwargs)
+                if context_count:
+                    self.parent.logger.info("Got %s assert count %s" % (context, context_count))
+                    test_count = context_count - self.assert_count[context]
+                    self.assert_count[context] = context_count
+                    return test_count
+            except errors.NoSuchWindowException:
+                # If the window was already closed
+                self.parent.logger.warning("Failed to get assertion count; window was closed")
+            except (errors.MarionetteException, socket.error):
+                # This usually happens if the process crashed
+                pass
+
+        counts = []
+        with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
+            counts.append(get_count("chrome"))
+        if self.parent.e10s:
+            counts.append(get_count("content", sandbox="system"))
+
+        counts = [item for item in counts if item is not None]
+
+        if not counts:
+            return None
+
+        return sum(counts)
+
+
 class MarionetteSelectorProtocolPart(SelectorProtocolPart):
     def setup(self):
         self.marionette = self.parent.marionette
@@ -308,6 +349,7 @@ class MarionetteClickProtocolPart(ClickProtocolPart):
     def element(self, element):
         return element.click()
 
+
 class MarionetteSendKeysProtocolPart(SendKeysProtocolPart):
     def setup(self):
         self.marionette = self.parent.marionette
@@ -315,6 +357,7 @@ class MarionetteSendKeysProtocolPart(SendKeysProtocolPart):
     def send_keys(self, element, keys):
         return element.send_keys(keys)
 
+
 class MarionetteTestDriverProtocolPart(TestDriverProtocolPart):
     def setup(self):
         self.marionette = self.parent.marionette
@@ -329,6 +372,58 @@ class MarionetteTestDriverProtocolPart(TestDriverProtocolPart):
         self.parent.base.execute_script("window.postMessage(%s, '*')" % json.dumps(obj))
 
 
+class MarionetteCoverageProtocolPart(CoverageProtocolPart):
+    def setup(self):
+        self.marionette = self.parent.marionette
+
+        if not self.parent.ccov:
+            self.is_enabled = False
+            return
+
+        script = """
+            ChromeUtils.import("chrome://marionette/content/PerTestCoverageUtils.jsm");
+            return PerTestCoverageUtils.enabled;
+            """
+        with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
+            self.is_enabled = self.marionette.execute_script(script)
+
+    def reset(self):
+        script = """
+            var callback = arguments[arguments.length - 1];
+
+            ChromeUtils.import("chrome://marionette/content/PerTestCoverageUtils.jsm");
+            PerTestCoverageUtils.beforeTest().then(callback, callback);
+            """
+        with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
+            try:
+                error = self.marionette.execute_async_script(script)
+                if error is not None:
+                    raise Exception('Failure while resetting counters: %s' % json.dumps(error))
+            except (errors.MarionetteException, socket.error):
+                # This usually happens if the process crashed
+                pass
+
+    def dump(self):
+        if len(self.marionette.window_handles):
+            handle = self.marionette.window_handles[0]
+            self.marionette.switch_to_window(handle)
+
+        script = """
+            var callback = arguments[arguments.length - 1];
+
+            ChromeUtils.import("chrome://marionette/content/PerTestCoverageUtils.jsm");
+            PerTestCoverageUtils.afterTest().then(callback, callback);
+            """
+        with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
+            try:
+                error = self.marionette.execute_async_script(script)
+                if error is not None:
+                    raise Exception('Failure while dumping counters: %s' % json.dumps(error))
+            except (errors.MarionetteException, socket.error):
+                # This usually happens if the process crashed
+                pass
+
+
 class MarionetteProtocol(Protocol):
     implements = [MarionetteBaseProtocolPart,
                   MarionetteTestharnessProtocolPart,
@@ -337,9 +432,11 @@ class MarionetteProtocol(Protocol):
                   MarionetteSelectorProtocolPart,
                   MarionetteClickProtocolPart,
                   MarionetteSendKeysProtocolPart,
-                  MarionetteTestDriverProtocolPart]
+                  MarionetteTestDriverProtocolPart,
+                  MarionetteAssertsProtocolPart,
+                  MarionetteCoverageProtocolPart]
 
-    def __init__(self, executor, browser, capabilities=None, timeout_multiplier=1):
+    def __init__(self, executor, browser, capabilities=None, timeout_multiplier=1, e10s=True, ccov=False):
         do_delayed_imports()
 
         super(MarionetteProtocol, self).__init__(executor, browser)
@@ -348,11 +445,13 @@ class MarionetteProtocol(Protocol):
         self.capabilities = capabilities
         self.timeout_multiplier = timeout_multiplier
         self.runner_handle = None
+        self.e10s = e10s
+        self.ccov = ccov
 
     def connect(self):
         self.logger.debug("Connecting to Marionette on port %i" % self.marionette_port)
         startup_timeout = marionette.Marionette.DEFAULT_STARTUP_TIMEOUT * self.timeout_multiplier
-        self.marionette = marionette.Marionette(host='localhost',
+        self.marionette = marionette.Marionette(host='127.0.0.1',
                                                 port=self.marionette_port,
                                                 socket_timeout=None,
                                                 startup_timeout=startup_timeout)
@@ -368,7 +467,7 @@ class MarionetteProtocol(Protocol):
                     raise
 
         self.logger.debug("Starting Marionette session")
-        self.marionette.start_session()
+        self.marionette.start_session(self.capabilities)
         self.logger.debug("Marionette session started")
 
     def after_connect(self):
@@ -437,15 +536,18 @@ class ExecuteAsyncScriptRun(object):
             self.logger.error("Lost marionette connection before starting test")
             return Stop
 
-        executor = threading.Thread(target = self._run)
-        executor.start()
-
         if timeout is not None:
             wait_timeout = timeout + 2 * extra_timeout
         else:
             wait_timeout = None
 
-        flag = self.result_flag.wait(wait_timeout)
+        timer = threading.Timer(wait_timeout, self._timeout)
+        timer.start()
+
+        self._run()
+
+        self.result_flag.wait()
+        timer.cancel()
 
         if self.result == (None, None):
             self.logger.debug("Timed out waiting for a result")
@@ -481,23 +583,27 @@ class ExecuteAsyncScriptRun(object):
         finally:
             self.result_flag.set()
 
+    def _timeout(self):
+        self.result = False, ("EXTERNAL-TIMEOUT", None)
+        self.result_flag.set()
+
 
 class MarionetteTestharnessExecutor(TestharnessExecutor):
     supports_testdriver = True
 
     def __init__(self, browser, server_config, timeout_multiplier=1,
                  close_after_done=True, debug_info=None, capabilities=None,
-                 **kwargs):
+                 debug=False, ccov=False, **kwargs):
         """Marionette-based executor for testharness.js tests"""
         TestharnessExecutor.__init__(self, browser, server_config,
                                      timeout_multiplier=timeout_multiplier,
                                      debug_info=debug_info)
-
-        self.protocol = MarionetteProtocol(self, browser, capabilities, timeout_multiplier)
+        self.protocol = MarionetteProtocol(self, browser, capabilities, timeout_multiplier, kwargs["e10s"], ccov)
         self.script = open(os.path.join(here, "testharness_webdriver.js")).read()
         self.script_resume = open(os.path.join(here, "testharness_webdriver_resume.js")).read()
         self.close_after_done = close_after_done
         self.window_id = str(uuid.uuid4())
+        self.debug = debug
 
         self.original_pref_values = {}
 
@@ -522,10 +628,23 @@ class MarionetteTestharnessExecutor(TestharnessExecutor):
                                               self.protocol,
                                               self.test_url(test),
                                               timeout).run()
+        # The format of data depends on whether the test ran to completion or not
+        # For asserts we only care about the fact that if it didn't complete, the
+        # status is in the first field.
+        status = None
+        if not success:
+            status = data[0]
+
+        extra = None
+        if self.debug and (success or status not in ("CRASH", "INTERNAL-ERROR")):
+            assertion_count = self.protocol.asserts.get()
+            if assertion_count is not None:
+                extra = {"assertion_count": assertion_count}
+
         if success:
-            return self.convert_result(test, data)
+            return self.convert_result(test, data, extra=extra)
 
-        return (test.result_cls(*data), [])
+        return (test.result_cls(extra=extra, *data), [])
 
     def do_testharness(self, protocol, url, timeout):
         protocol.base.execute_script("if (window.win) {window.win.close()}")
@@ -536,6 +655,9 @@ class MarionetteTestharnessExecutor(TestharnessExecutor):
         else:
             timeout_ms = "null"
 
+        if self.protocol.coverage.is_enabled:
+            self.protocol.coverage.reset()
+
         format_map = {"abs_url": url,
                       "url": strip_server(url),
                       "window_id": self.window_id,
@@ -558,6 +680,10 @@ class MarionetteTestharnessExecutor(TestharnessExecutor):
             done, rv = handler(result)
             if done:
                 break
+
+        if self.protocol.coverage.is_enabled:
+            self.protocol.coverage.dump()
+
         return rv
 
 
@@ -565,8 +691,8 @@ class MarionetteRefTestExecutor(RefTestExecutor):
     def __init__(self, browser, server_config, timeout_multiplier=1,
                  screenshot_cache=None, close_after_done=True,
                  debug_info=None, reftest_internal=False,
-                 reftest_screenshot="unexpected",
-                 group_metadata=None, capabilities=None, **kwargs):
+                 reftest_screenshot="unexpected", ccov=False,
+                 group_metadata=None, capabilities=None, debug=False, **kwargs):
         """Marionette-based executor for reftests"""
         RefTestExecutor.__init__(self,
                                  browser,
@@ -575,7 +701,8 @@ class MarionetteRefTestExecutor(RefTestExecutor):
                                  timeout_multiplier=timeout_multiplier,
                                  debug_info=debug_info)
         self.protocol = MarionetteProtocol(self, browser, capabilities,
-                                           timeout_multiplier)
+                                           timeout_multiplier, kwargs["e10s"],
+                                           ccov)
         self.implementation = (InternalRefTestImplementation
                                if reftest_internal
                                else RefTestImplementation)(self)
@@ -586,6 +713,7 @@ class MarionetteRefTestExecutor(RefTestExecutor):
         self.has_window = False
         self.original_pref_values = {}
         self.group_metadata = group_metadata
+        self.debug = debug
 
         with open(os.path.join(here, "reftest.js")) as f:
             self.script = f.read()
@@ -625,7 +753,20 @@ class MarionetteRefTestExecutor(RefTestExecutor):
                 self.protocol.base.set_window(self.protocol.marionette.window_handles[-1])
                 self.has_window = True
 
+        if self.protocol.coverage.is_enabled:
+            self.protocol.coverage.reset()
+
         result = self.implementation.run_test(test)
+
+        if self.protocol.coverage.is_enabled:
+            self.protocol.coverage.dump()
+
+        if self.debug:
+            assertion_count = self.protocol.asserts.get()
+            if "extra" not in result:
+                result["extra"] = {}
+            result["extra"]["assertion_count"] = assertion_count
+
         return self.convert_result(test, result)
 
     def screenshot(self, test, viewport_size, dpi):
@@ -675,9 +816,6 @@ class InternalRefTestImplementation(object):
         self.executor.protocol.marionette._send_message("reftest:setup", data)
 
     def run_test(self, test):
-        viewport_size = test.viewport_size
-        dpi = test.dpi
-
         references = self.get_references(test)
         rv = self.executor.protocol.marionette._send_message("reftest:run",
                                                              {"test": self.executor.test_url(test),
diff --git a/WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/executors/executorsafari.py b/WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/executors/executorsafari.py
new file mode 100644 (file)
index 0000000..ed01f4d
--- /dev/null
@@ -0,0 +1,10 @@
+from ..webdriver_server import SafariDriverServer
+from .base import WdspecExecutor, WebDriverProtocol
+
+
+class SafariDriverProtocol(WebDriverProtocol):
+    server_cls = SafariDriverServer
+
+
+class SafariDriverWdspecExecutor(WdspecExecutor):
+    protocol_cls = SafariDriverProtocol
index fb79cf2..caa9714 100644 (file)
@@ -1,22 +1,16 @@
 import base64
-import hashlib
-import httplib
 import json
 import os
 import subprocess
 import tempfile
 import threading
-import traceback
-import urlparse
 import uuid
-from collections import defaultdict
 
 from mozprocess import ProcessHandler
 
 from serve.serve import make_hosts_file
 
-from .base import (ExecutorException,
-                   ConnectionlessProtocol,
+from .base import (ConnectionlessProtocol,
                    RefTestImplementation,
                    testharness_result_converter,
                    reftest_result_converter,
@@ -24,9 +18,7 @@ from .base import (ExecutorException,
                    WebDriverProtocol)
 from .process import ProcessTestExecutor
 from ..browsers.base import browser_command
-from ..wpttest import WdspecResult, WdspecSubtestResult
 from ..webdriver_server import ServoDriverServer
-from .executormarionette import WdspecRun
 
 pytestrunner = None
 webdriver = None
index 626c987..d015e77 100644 (file)
@@ -6,13 +6,16 @@ import time
 import traceback
 
 from .base import (Protocol,
+                   BaseProtocolPart,
                    RefTestExecutor,
                    RefTestImplementation,
                    TestharnessExecutor,
                    strip_server)
 from ..testrunner import Stop
+from ..webdriver_server import wait_for_service
 
 webdriver = None
+ServoCommandExtensions = None
 
 here = os.path.join(os.path.split(__file__)[0])
 
@@ -23,21 +26,77 @@ def do_delayed_imports():
     global webdriver
     import webdriver
 
+    global ServoCommandExtensions
+
+    class ServoCommandExtensions(object):
+        def __init__(self, session):
+            self.session = session
+
+        @webdriver.client.command
+        def get_prefs(self, *prefs):
+            body = {"prefs": list(prefs)}
+            return self.session.send_session_command("POST", "servo/prefs/get", body)
+
+        @webdriver.client.command
+        def set_prefs(self, prefs):
+            body = {"prefs": prefs}
+            return self.session.send_session_command("POST", "servo/prefs/set", body)
+
+        @webdriver.client.command
+        def reset_prefs(self, *prefs):
+            body = {"prefs": list(prefs)}
+            return self.session.send_session_command("POST", "servo/prefs/reset", body)
+
+        def change_prefs(self, old_prefs, new_prefs):
+            # Servo interprets reset with an empty list as reset everything
+            if old_prefs:
+                self.reset_prefs(*old_prefs.keys())
+            self.set_prefs({k: parse_pref_value(v) for k, v in new_prefs.items()})
+
+
+# See parse_pref_from_command_line() in components/config/opts.rs
+def parse_pref_value(value):
+    if value == "true":
+        return True
+    if value == "false":
+        return False
+    try:
+        return float(value)
+    except ValueError:
+        return value
+
+
+class ServoBaseProtocolPart(BaseProtocolPart):
+    def execute_script(self, script, async=False):
+        pass
+
+    def set_timeout(self, timeout):
+        pass
+
+    def wait(self):
+        pass
+
+    def set_window(self, handle):
+        pass
+
 
 class ServoWebDriverProtocol(Protocol):
+    implements = [ServoBaseProtocolPart]
+
     def __init__(self, executor, browser, capabilities, **kwargs):
         do_delayed_imports()
         Protocol.__init__(self, executor, browser)
         self.capabilities = capabilities
         self.host = browser.webdriver_host
         self.port = browser.webdriver_port
+        self.init_timeout = browser.init_timeout
         self.session = None
 
     def connect(self):
         """Connect to browser via WebDriver."""
-        url = "http://%s:%d" % (self.host, self.port)
-        self.session = webdriver.Session(self.host, self.port,
-                                         extension=webdriver.servo.ServoCommandExtensions)
+        wait_for_service((self.host, self.port), timeout=self.init_timeout)
+
+        self.session = webdriver.Session(self.host, self.port, extension=ServoCommandExtensions)
         self.session.start()
 
     def after_connect(self):
@@ -71,11 +130,6 @@ class ServoWebDriverProtocol(Protocol):
                 self.logger.error(traceback.format_exc(e))
                 break
 
-    def on_environment_change(self, old_environment, new_environment):
-        #Unset all the old prefs
-        self.session.extension.reset_prefs(*old_environment.get("prefs", {}).keys())
-        self.session.extension.set_prefs(new_environment.get("prefs", {}))
-
 
 class ServoWebDriverRun(object):
     def __init__(self, func, session, url, timeout, current_timeout=None):
@@ -175,6 +229,12 @@ class ServoWebDriverTestharnessExecutor(TestharnessExecutor):
         session.back()
         return result
 
+    def on_environment_change(self, new_environment):
+        self.protocol.session.extension.change_prefs(
+            self.last_environment.get("prefs", {}),
+            new_environment.get("prefs", {})
+        )
+
 
 class TimeoutError(Exception):
     pass
@@ -241,3 +301,9 @@ class ServoWebDriverRefTestExecutor(RefTestExecutor):
         session.url = url
         session.execute_async_script(self.wait_script)
         return session.screenshot()
+
+    def on_environment_change(self, new_environment):
+        self.protocol.session.extension.change_prefs(
+            self.last_environment.get("prefs", {}),
+            new_environment.get("prefs", {})
+        )
index 3c938f0..71fc3c9 100644 (file)
@@ -290,3 +290,33 @@ class TestDriverProtocolPart(ProtocolPart):
                            previous command succeeded.
         :param str message: Additional data to add to the message."""
         pass
+
+
+class AssertsProtocolPart(ProtocolPart):
+    """ProtocolPart that implements the functionality required to get a count of non-fatal
+    assertions triggered"""
+    __metaclass__ = ABCMeta
+
+    name = "asserts"
+
+    @abstractmethod
+    def get(self):
+        """Get a count of assertions since the last browser start"""
+        pass
+
+
+class CoverageProtocolPart(ProtocolPart):
+    """Protocol part for collecting per-test coverage data."""
+    __metaclass__ = ABCMeta
+
+    name = "coverage"
+
+    @abstractmethod
+    def reset(self):
+        """Reset coverage counters"""
+        pass
+
+    @abstractmethod
+    def dump(self):
+        """Dump coverage counters"""
+        pass
index c226027..61f894f 100644 (file)
@@ -1,7 +1,9 @@
+var callback = arguments[arguments.length - 1];
+
 function test(x) {
   if (!root.classList.contains("reftest-wait")) {
     observer.disconnect();
-    marionetteScriptFinished();
+    callback();
   }
 }
 
index 22675f0..7f00050 100644 (file)
@@ -17,7 +17,9 @@ window.setMessageListener(function(event) {
 
 window.win = window.open("%(abs_url)s", "%(window_id)s");
 
-window.timer = setTimeout(function() {
-  window.win.timeout();
-  window.win.close();
-}, %(timeout)s);
+if (%(timeout)s != null) {
+  window.timer = setTimeout(function() {
+    window.win.timeout();
+    window.win.close();
+  }, %(timeout)s);
+}
index 79fca42..a370f5f 100644 (file)
@@ -2,9 +2,10 @@ import ctypes
 import logging
 import os
 import platform
+import plistlib
 
 from shutil import copy2, rmtree
-from subprocess import call
+from subprocess import call, check_output
 
 HERE = os.path.split(__file__)[0]
 SYSTEM = platform.system().lower()
@@ -71,9 +72,19 @@ class FontInstaller(object):
         if not os.path.exists(self.font_dir):
             os.makedirs(self.font_dir)
             self.created_dir = True
-        if not os.path.exists(os.path.join(self.font_dir, font_name)):
+        installed_font_path = os.path.join(self.font_dir, font_name)
+        if not os.path.exists(installed_font_path):
             copy2(font_path, self.font_dir)
-        return True
+
+        # Per https://github.com/web-platform-tests/results-collection/issues/218
+        # installing Ahem on macOS is flaky, so check if it actually installed
+        fonts = check_output(['/usr/sbin/system_profiler', '-xml', 'SPFontsDataType'])
+        fonts = plistlib.readPlistFromString(fonts)
+        assert len(fonts) == 1
+        for font in fonts[0]['_items']:
+            if font['path'] == installed_font_path:
+                return True
+        return False
 
     def install_windows_font(self, _, font_path):
         hwnd_broadcast = 0xFFFF
index 24d0653..1ea09db 100644 (file)
@@ -11,7 +11,8 @@ class WptreportFormatter(BaseFormatter):
         self.results = {}
 
     def suite_start(self, data):
-        self.results['run_info'] = data['run_info']
+        if 'run_info' in data:
+            self.results['run_info'] = data['run_info']
         self.results['time_start'] = data['time']
 
     def suite_end(self, data):
@@ -33,6 +34,10 @@ class WptreportFormatter(BaseFormatter):
             }
         return self.raw_results[test_name]
 
+    def test_start(self, data):
+        test = self.find_or_create_test(data)
+        test["start_time"] = data["time"]
+
     def create_subtest(self, data):
         test = self.find_or_create_test(data)
         subtest_name = data["subtest"]
@@ -49,11 +54,25 @@ class WptreportFormatter(BaseFormatter):
     def test_status(self, data):
         subtest = self.create_subtest(data)
         subtest["status"] = data["status"]
+        if "expected" in data:
+            subtest["expected"] = data["expected"]
         if "message" in data:
             subtest["message"] = data["message"]
 
     def test_end(self, data):
         test = self.find_or_create_test(data)
+        start_time = test.pop("start_time")
+        test["duration"] = data["time"] - start_time
         test["status"] = data["status"]
+        if "expected" in data:
+            test["expected"] = data["expected"]
         if "message" in data:
             test["message"] = data["message"]
+
+    def assertion_count(self, data):
+        test = self.find_or_create_test(data)
+        test["asserts"] = {
+            "count": data["count"],
+            "min": data["min_expected"],
+            "max": data["max_expected"]
+        }
index 5d57b70..831f537 100644 (file)
@@ -33,6 +33,14 @@ def bool_prop(name, node):
         return None
 
 
+def int_prop(name, node):
+    """Boolean property"""
+    try:
+        return int(node.get(name))
+    except KeyError:
+        return None
+
+
 def tags(node):
     """Set of tags that have been applied to the test"""
     try:
@@ -54,13 +62,26 @@ def prefs(node):
     try:
         node_prefs = node.get("prefs")
         if type(node_prefs) in (str, unicode):
-            prefs = {value(node_prefs)}
-        rv = dict(value(item) for item in node_prefs)
+            rv = dict(value(node_prefs))
+        else:
+            rv = dict(value(item) for item in node_prefs)
     except KeyError:
         rv = {}
     return rv
 
 
+def lsan_allowed(node):
+    try:
+        node_items = node.get("lsan-allowed")
+        if isinstance(node_items, (str, unicode)):
+            rv = {node_items}
+        else:
+            rv = set(node_items)
+    except KeyError:
+        rv = set()
+    return rv
+
+
 class ExpectedManifest(ManifestItem):
     def __init__(self, name, test_path, url_base):
         """Object representing all the tests in a particular manifest
@@ -116,6 +137,14 @@ class ExpectedManifest(ManifestItem):
         return bool_prop("leaks", self)
 
     @property
+    def min_assertion_count(self):
+        return int_prop("min-asserts", self)
+
+    @property
+    def max_assertion_count(self):
+        return int_prop("max-asserts", self)
+
+    @property
     def tags(self):
         return tags(self)
 
@@ -123,6 +152,10 @@ class ExpectedManifest(ManifestItem):
     def prefs(self):
         return prefs(self)
 
+    @property
+    def lsan_allowed(self):
+        return lsan_allowed(self)
+
 
 class DirectoryManifest(ManifestItem):
     @property
@@ -138,6 +171,14 @@ class DirectoryManifest(ManifestItem):
         return bool_prop("leaks", self)
 
     @property
+    def min_assertion_count(self):
+        return int_prop("min-asserts", self)
+
+    @property
+    def max_assertion_count(self):
+        return int_prop("max-asserts", self)
+
+    @property
     def tags(self):
         return tags(self)
 
@@ -145,6 +186,10 @@ class DirectoryManifest(ManifestItem):
     def prefs(self):
         return prefs(self)
 
+    @property
+    def lsan_allowed(self):
+        return lsan_allowed(self)
+
 
 class TestNode(ManifestItem):
     def __init__(self, name):
@@ -187,6 +232,14 @@ class TestNode(ManifestItem):
         return bool_prop("leaks", self)
 
     @property
+    def min_assertion_count(self):
+        return int_prop("min-asserts", self)
+
+    @property
+    def max_assertion_count(self):
+        return int_prop("max-asserts", self)
+
+    @property
     def tags(self):
         return tags(self)
 
@@ -194,6 +247,10 @@ class TestNode(ManifestItem):
     def prefs(self):
         return prefs(self)
 
+    @property
+    def lsan_allowed(self):
+        return lsan_allowed(self)
+
     def append(self, node):
         """Add a subtest to the current test
 
@@ -245,6 +302,7 @@ def get_manifest(metadata_root, test_path, url_base, run_info):
     except IOError:
         return None
 
+
 def get_dir_manifest(path, run_info):
     """Get the ExpectedManifest for a particular test path, or None if there is no
     metadata stored for that test path.
index 1d3c173..1f8ba77 100644 (file)
@@ -1,3 +1,4 @@
+import itertools
 import os
 import urlparse
 from collections import namedtuple, defaultdict
@@ -35,7 +36,11 @@ class ConditionError(Exception):
         self.cond = cond
 
 
-Result = namedtuple("Result", ["run_info", "status"])
+class UpdateError(Exception):
+    pass
+
+
+Value = namedtuple("Value", ["run_info", "value"])
 
 
 def data_cls_getter(output_node, visited_node):
@@ -74,6 +79,9 @@ class ExpectedManifest(ManifestItem):
         self.modified = False
         self.boolean_properties = boolean_properties
         self.property_order = property_order
+        self.update_properties = {
+            "lsan": LsanUpdate(self),
+        }
 
     def append(self, child):
         ManifestItem.append(self, child)
@@ -90,7 +98,7 @@ class ExpectedManifest(ManifestItem):
 
         :param test_id: The id of the test to look up"""
 
-        return self.child_map[test_id]
+        return self.child_map.get(test_id)
 
     def has_test(self, test_id):
         """Boolean indicating whether the current test has a known child test
@@ -105,6 +113,19 @@ class ExpectedManifest(ManifestItem):
         return urlparse.urljoin(self.url_base,
                                 "/".join(self.test_path.split(os.path.sep)))
 
+    def set_lsan(self, run_info, result):
+        """Set the result of the test in a particular run
+
+        :param run_info: Dictionary of run_info parameters corresponding
+                         to this run
+        :param result: Lsan violations detected"""
+
+        self.update_properties["lsan"].set(run_info, result)
+
+    def coalesce_properties(self, stability):
+        for prop_update in self.update_properties.itervalues():
+            prop_update.coalesce(stability)
+
 
 class TestNode(ManifestItem):
     def __init__(self, node):
@@ -113,12 +134,14 @@ class TestNode(ManifestItem):
         :param node: AST node associated with the test"""
 
         ManifestItem.__init__(self, node)
-        self.updated_expected = []
-        self.new_expected = []
-        self.new_disabled = False
         self.subtests = {}
-        self.default_status = None
         self._from_file = True
+        self.new_disabled = False
+        self.update_properties = {
+            "expected": ExpectedUpdate(self),
+            "max-asserts": MaxAssertsUpdate(self),
+            "min-asserts": MinAssertsUpdate(self)
+        }
 
     @classmethod
     def create(cls, test_id):
@@ -128,7 +151,7 @@ class TestNode(ManifestItem):
         :param test_id: The id of the test"""
 
         url = test_id
-        name = url.split("/")[-1]
+        name = url.rsplit("/", 1)[1]
         node = DataNode(name)
         self = cls(node)
 
@@ -145,7 +168,6 @@ class TestNode(ManifestItem):
     @property
     def test_type(self):
         """The type of the test represented by this TestNode"""
-
         return self.get("type", None)
 
     @property
@@ -168,25 +190,114 @@ class TestNode(ManifestItem):
                          to this run
         :param result: Status of the test in this run"""
 
-        if self.default_status is not None:
-            assert self.default_status == result.default_expected
+        self.update_properties["expected"].set(run_info, result)
+
+    def set_asserts(self, run_info, count):
+        """Set the assert count of a test
+
+        """
+        self.update_properties["min-asserts"].set(run_info, count)
+        self.update_properties["max-asserts"].set(run_info, count)
+
+    def _add_key_value(self, node, values):
+        ManifestItem._add_key_value(self, node, values)
+        if node.data in self.update_properties:
+            new_updated = []
+            self.update_properties[node.data].updated = new_updated
+            for value in values:
+                new_updated.append((value, []))
+
+    def clear(self, key):
+        """Clear all the expected data for this test and all of its subtests"""
+
+        self.updated = []
+        if key in self._data:
+            for child in self.node.children:
+                if (isinstance(child, KeyValueNode) and
+                    child.data == key):
+                    child.remove()
+                    del self._data[key]
+                    break
+
+        for subtest in self.subtests.itervalues():
+            subtest.clear(key)
+
+    def append(self, node):
+        child = ManifestItem.append(self, node)
+        self.subtests[child.name] = child
+
+    def get_subtest(self, name):
+        """Return a SubtestNode corresponding to a particular subtest of
+        the current test, creating a new one if no subtest with that name
+        already exists.
+
+        :param name: Name of the subtest"""
+
+        if name in self.subtests:
+            return self.subtests[name]
         else:
-            self.default_status = result.default_expected
+            subtest = SubtestNode.create(name)
+            self.append(subtest)
+            return subtest
+
+    def coalesce_properties(self, stability):
+        for prop_update in self.update_properties.itervalues():
+            prop_update.coalesce(stability)
+
+
+class SubtestNode(TestNode):
+    def __init__(self, node):
+        assert isinstance(node, DataNode)
+        TestNode.__init__(self, node)
+
+    @classmethod
+    def create(cls, name):
+        node = DataNode(name)
+        self = cls(node)
+        return self
+
+    @property
+    def is_empty(self):
+        if self._data:
+            return False
+        return True
+
+
+class PropertyUpdate(object):
+    property_name = None
+    cls_default_value = None
+    value_type = None
+
+    def __init__(self, node):
+        self.node = node
+        self.updated = []
+        self.new = []
+        self.default_value = self.cls_default_value
+
+    def set(self, run_info, in_value):
+        self.check_default(in_value)
+        value = self.get_value(in_value)
 
         # Add this result to the list of results satisfying
         # any condition in the list of updated results it matches
-        for (cond, values) in self.updated_expected:
+        for (cond, values) in self.updated:
             if cond(run_info):
-                values.append(Result(run_info, result.status))
-                if result.status != cond.value:
-                    self.root.modified = True
+                values.append(Value(run_info, value))
+                if value != cond.value_as(self.value_type):
+                    self.node.root.modified = True
                 break
         else:
             # We didn't find a previous value for this
-            self.new_expected.append(Result(run_info, result.status))
-            self.root.modified = True
+            self.new.append(Value(run_info, value))
+            self.node.root.modified = True
+
+    def check_default(self, result):
+        return
+
+    def get_value(self, in_value):
+        return in_value
 
-    def coalesce_expected(self, stability=None):
+    def coalesce(self, stability=None):
         """Update the underlying manifest AST for this test based on all the
         added results.
 
@@ -202,149 +313,247 @@ class TestNode(ManifestItem):
         """
 
         try:
-            unconditional_status = self.get("expected")
+            unconditional_value = self.node.get(self.property_name)
+            if self.value_type:
+                unconditional_value = self.value_type(unconditional_value)
         except KeyError:
-            unconditional_status = self.default_status
+            unconditional_value = self.default_value
 
-        for conditional_value, results in self.updated_expected:
+        for conditional_value, results in self.updated:
             if not results:
                 # The conditional didn't match anything in these runs so leave it alone
                 pass
-            elif all(results[0].status == result.status for result in results):
+            elif all(results[0].value == result.value for result in results):
                 # All the new values for this conditional matched, so update the node
                 result = results[0]
-                if (result.status == unconditional_status and
+                if (result.value == unconditional_value and
                     conditional_value.condition_node is not None):
-                    if "expected" in self:
-                        self.remove_value("expected", conditional_value)
+                    if self.property_name in self.node:
+                        self.node.remove_value(self.property_name, conditional_value)
                 else:
-                    conditional_value.value = result.status
+                    conditional_value.value = self.update_value(conditional_value.value_as(self.value_type),
+                                                                result.value)
             elif conditional_value.condition_node is not None:
                 # Blow away the existing condition and rebuild from scratch
                 # This isn't sure to work if we have a conditional later that matches
                 # these values too, but we can hope, verify that we get the results
                 # we expect, and if not let a human sort it out
-                self.remove_value("expected", conditional_value)
-                self.new_expected.extend(results)
+                self.node.remove_value(self.property_name, conditional_value)
+                self.new.extend(results)
             elif conditional_value.condition_node is None:
-                self.new_expected.extend(result for result in results
-                                         if result.status != unconditional_status)
+                self.new.extend(result for result in results
+                                if result.value != unconditional_value)
 
-        # It is an invariant that nothing in new_expected matches an existing
+        # It is an invariant that nothing in new matches an existing
         # condition except for the default condition
-
-        if self.new_expected:
-            if all(self.new_expected[0].status == result.status
-                   for result in self.new_expected) and not self.updated_expected:
-                status = self.new_expected[0].status
-                if status != self.default_status:
-                    self.set("expected", status, condition=None)
+        if self.new:
+            update_default, new_default_value = self.update_default()
+            if update_default:
+                if new_default_value != self.default_value:
+                    self.node.set(self.property_name,
+                                  self.update_value(unconditional_value, new_default_value),
+                                  condition=None)
             else:
                 try:
-                    conditionals = group_conditionals(
-                        self.new_expected,
-                        property_order=self.root.property_order,
-                        boolean_properties=self.root.boolean_properties)
-                except ConditionError as e:
-                    if stability is not None:
-                        self.set("disabled", stability or "unstable", e.cond.children[0])
-                        self.new_disabled = True
-                    else:
-                        print "Conflicting test results for %s, cannot update" % self.root.test_path
-                    return
-                for conditional_node, status in conditionals:
-                    if status != unconditional_status:
-                        self.set("expected", status, condition=conditional_node.children[0])
-
-        if ("expected" in self._data and
-            len(self._data["expected"]) > 0 and
-            self._data["expected"][-1].condition_node is None and
-            self._data["expected"][-1].value == self.default_status):
-
-            self.remove_value("expected", self._data["expected"][-1])
-
-        if ("expected" in self._data and
-            len(self._data["expected"]) == 0):
-            for child in self.node.children:
-                if (isinstance(child, KeyValueNode) and
-                    child.data == "expected"):
-                    child.remove()
-                    break
+                    self.add_new(unconditional_value, stability)
+                except UpdateError as e:
+                    print("%s for %s, cannot update %s" % (e, self.node.root.test_path,
+                                                           self.property_name))
 
-    def _add_key_value(self, node, values):
-        ManifestItem._add_key_value(self, node, values)
-        if node.data == "expected":
-            self.updated_expected = []
-            for value in values:
-                self.updated_expected.append((value, []))
+        # Remove cases where the value matches the default
+        if (self.property_name in self.node._data and
+            len(self.node._data[self.property_name]) > 0 and
+            self.node._data[self.property_name][-1].condition_node is None and
+            self.node._data[self.property_name][-1].value_as(self.value_type) == self.default_value):
 
-    def clear_expected(self):
-        """Clear all the expected data for this test and all of its subtests"""
+            self.node.remove_value(self.property_name, self.node._data[self.property_name][-1])
 
-        self.updated_expected = []
-        if "expected" in self._data:
+        # Remove empty properties
+        if (self.property_name in self.node._data and len(self.node._data[self.property_name]) == 0):
             for child in self.node.children:
-                if (isinstance(child, KeyValueNode) and
-                    child.data == "expected"):
+                if (isinstance(child, KeyValueNode) and child.data == self.property_name):
                     child.remove()
-                    del self._data["expected"]
                     break
 
-        for subtest in self.subtests.itervalues():
-            subtest.clear_expected()
+    def update_default(self):
+        """Get the updated default value for the property (i.e. the one chosen when no conditions match).
 
-    def append(self, node):
-        child = ManifestItem.append(self, node)
-        self.subtests[child.name] = child
+        :returns: (update, new_default_value) where updated is a bool indicating whether the property
+                  should be updated, and new_default_value is the value to set if it should."""
+        raise NotImplementedError
 
-    def get_subtest(self, name):
-        """Return a SubtestNode corresponding to a particular subtest of
-        the current test, creating a new one if no subtest with that name
-        already exists.
+    def add_new(self, unconditional_value, stability):
+        """Add new conditional values for the property.
 
-        :param name: Name of the subtest"""
+        Subclasses need not implement this if they only ever update the default value."""
+        raise NotImplementedError
 
-        if name in self.subtests:
-            return self.subtests[name]
-        else:
-            subtest = SubtestNode.create(name)
-            self.append(subtest)
-            return subtest
+    def update_value(self, old_value, new_value):
+        """Get a value to set on the property, given its previous value and the new value from logs.
 
+        By default this just returns the new value, but overriding is useful in cases
+        where we want the new value to be some function of both old and new e.g. max(old_value, new_value)"""
+        return new_value
 
-class SubtestNode(TestNode):
-    def __init__(self, node):
-        assert isinstance(node, DataNode)
-        TestNode.__init__(self, node)
 
-    @classmethod
-    def create(cls, name):
-        node = DataNode(name)
-        self = cls(node)
-        return self
+class ExpectedUpdate(PropertyUpdate):
+    property_name = "expected"
 
-    @property
-    def is_empty(self):
-        if self._data:
-            return False
-        return True
+    def check_default(self, result):
+        if self.default_value is not None:
+            assert self.default_value == result.default_expected
+        else:
+            self.default_value = result.default_expected
+
+    def get_value(self, in_value):
+        return in_value.status
+
+    def update_default(self):
+        update_default = all(self.new[0].value == result.value
+                             for result in self.new) and not self.updated
+        new_value = self.new[0].value
+        return update_default, new_value
+
+    def add_new(self, unconditional_value, stability):
+        try:
+            conditionals = group_conditionals(
+                self.new,
+                property_order=self.node.root.property_order,
+                boolean_properties=self.node.root.boolean_properties)
+        except ConditionError as e:
+            if stability is not None:
+                self.node.set("disabled", stability or "unstable", e.cond.children[0])
+                self.node.new_disabled = True
+            else:
+                raise UpdateError("Conflicting metadata values")
+        for conditional_node, value in conditionals:
+            if value != unconditional_value:
+                self.node.set(self.property_name, value, condition=conditional_node.children[0])
+
+
+class MaxAssertsUpdate(PropertyUpdate):
+    property_name = "max-asserts"
+    cls_default_value = 0
+    value_type = int
+
+    def update_value(self, old_value, new_value):
+        new_value = self.value_type(new_value)
+        if old_value is not None:
+            old_value = self.value_type(old_value)
+        if old_value is not None and old_value < new_value:
+            return new_value + 1
+        if old_value is None:
+            return new_value + 1
+        return old_value
+
+    def update_default(self):
+        """For asserts we always update the default value and never add new conditionals.
+        The value we set as the default is the maximum the current default or one more than the
+        number of asserts we saw in any configuration."""
+        # Current values
+        values = []
+        current_default = None
+        if self.property_name in self.node._data:
+            current_default = [item for item in
+                               self.node._data[self.property_name]
+                               if item.condition_node is None]
+            if current_default:
+                values.append(int(current_default[0].value))
+        values.extend(item.value for item in self.new)
+        values.extend(item.value for item in
+                      itertools.chain.from_iterable(results for _, results in self.updated))
+        new_value = max(values)
+        return True, new_value
+
+
+class MinAssertsUpdate(PropertyUpdate):
+    property_name = "min-asserts"
+    cls_default_value = 0
+    value_type = int
+
+    def update_value(self, old_value, new_value):
+        new_value = self.value_type(new_value)
+        if old_value is not None:
+            old_value = self.value_type(old_value)
+        if old_value is not None and new_value < old_value:
+            return 0
+        if old_value is None:
+            # If we are getting some asserts for the first time, set the minimum to 0
+            return new_value
+        return old_value
+
+    def update_default(self):
+        """For asserts we always update the default value and never add new conditionals.
+        This is either set to the current value or one less than the number of asserts
+        we saw, whichever is lower."""
+        values = []
+        current_default = None
+        if self.property_name in self.node._data:
+            current_default = [item for item in
+                               self.node._data[self.property_name]
+                               if item.condition_node is None]
+        if current_default:
+            values.append(current_default[0].value_as(self.value_type))
+        values.extend(max(0, item.value) for item in self.new)
+        values.extend(max(0, item.value) for item in
+                      itertools.chain.from_iterable(results for _, results in self.updated))
+        new_value = min(values)
+        return True, new_value
+
+
+class LsanUpdate(PropertyUpdate):
+    property_name = "lsan-allowed"
+    cls_default_value = None
+
+    def get_value(self, result):
+        # If we have an allowed_match that matched, return None
+        # This value is ignored later (because it matches the default)
+        # We do that because then if we allow a failure in foo/__dir__.ini
+        # we don't want to update foo/bar/__dir__.ini with the same rule
+        if result[1]:
+            return None
+        # Otherwise return the topmost stack frame
+        # TODO: there is probably some improvement to be made by looking for a "better" stack frame
+        return result[0][0]
+
+    def update_value(self, old_value, new_value):
+        if isinstance(new_value, (str, unicode)):
+            new_value = {new_value}
+        else:
+            new_value = set(new_value)
+        if old_value is None:
+            old_value = set()
+        old_value = set(old_value)
+        return sorted((old_value | new_value) - {None})
+
+    def update_default(self):
+        current_default = None
+        if self.property_name in self.node._data:
+            current_default = [item for item in
+                               self.node._data[self.property_name]
+                               if item.condition_node is None]
+        if current_default:
+            current_default = current_default[0].value
+        new_values = [item.value for item in self.new]
+        new_value = self.update_value(current_default, new_values)
+        return True, new_value if new_value else None
 
 
 def group_conditionals(values, property_order=None, boolean_properties=None):
-    """Given a list of Result objects, return a list of
+    """Given a list of Value objects, return a list of
     (conditional_node, status) pairs representing the conditional
     expressions that are required to match each status
 
-    :param values: List of Results
+    :param values: List of Values
     :param property_order: List of properties to use in expectation metadata
                            from most to least significant.
     :param boolean_properties: Set of properties in property_order that should
                                be treated as boolean."""
 
     by_property = defaultdict(set)
-    for run_info, status in values:
+    for run_info, value in values:
         for prop_name, prop_value in run_info.iteritems():
-            by_property[(prop_name, prop_value)].add(status)
+            by_property[(prop_name, prop_value)].add(value)
 
     if property_order is None:
         property_order = ["debug", "os", "version", "processor", "bits"]
@@ -372,21 +581,21 @@ def group_conditionals(values, property_order=None, boolean_properties=None):
 
     conditions = {}
 
-    for run_info, status in values:
+    for run_info, value in values:
         prop_set = tuple((prop, run_info[prop]) for prop in include_props)
         if prop_set in conditions:
-            if conditions[prop_set][1] != status:
+            if conditions[prop_set][1] != value:
                 # A prop_set contains contradictory results
-                raise ConditionError(make_expr(prop_set, status, boolean_properties))
+                raise ConditionError(make_expr(prop_set, value, boolean_properties))
             continue
 
-        expr = make_expr(prop_set, status, boolean_properties=boolean_properties)
-        conditions[prop_set] = (expr, status)
+        expr = make_expr(prop_set, value, boolean_properties=boolean_properties)
+        conditions[prop_set] = (expr, value)
 
     return conditions.values()
 
 
-def make_expr(prop_set, status, boolean_properties=None):
+def make_expr(prop_set, rhs, boolean_properties=None):
     """Create an AST that returns the value ``status`` given all the
     properties in prop_set match.
 
@@ -434,7 +643,11 @@ def make_expr(prop_set, status, boolean_properties=None):
         node = expressions[0]
 
     root.append(node)
-    root.append(StringNode(status))
+    if type(rhs) in number_types:
+        rhs_node = NumberNode(rhs)
+    else:
+        rhs_node = StringNode(rhs)
+    root.append(rhs_node)
 
     return root
 
index 92d77ba..94560cd 100644 (file)
@@ -1,30 +1,27 @@
+import array
 import os
 import shutil
-import sys
 import tempfile
-import types
 import uuid
-from collections import defaultdict
+from collections import defaultdict, namedtuple
 
-from mozlog import reader
 from mozlog import structuredlog
 
-import expected
 import manifestupdate
 import testloader
 import wptmanifest
 import wpttest
+from expected import expected_path
 from vcs import git
 manifest = None  # Module that will be imported relative to test_root
 manifestitem = None
 
 logger = structuredlog.StructuredLogger("web-platform-tests")
 
-
-def load_test_manifests(serve_root, test_paths):
-    do_delayed_imports(serve_root)
-    manifest_loader = testloader.ManifestLoader(test_paths, False)
-    return manifest_loader.load()
+try:
+    import ujson as json
+except ImportError:
+    import json
 
 
 def update_expected(test_paths, serve_root, log_file_names,
@@ -36,42 +33,25 @@ def update_expected(test_paths, serve_root, log_file_names,
 
     If stability is not None, assume log_file_names refers to logs from repeated
     test jobs, disable tests that don't behave as expected on all runs"""
+    do_delayed_imports(serve_root)
 
-    manifests = load_test_manifests(serve_root, test_paths)
-
-    change_data = {}
-
-    if sync_root is not None:
-        if rev_old is not None:
-            rev_old = git("rev-parse", rev_old, repo=sync_root).strip()
-        rev_new = git("rev-parse", rev_new, repo=sync_root).strip()
-
-        if rev_old is not None:
-            change_data = load_change_data(rev_old, rev_new, repo=sync_root)
-
-    expected_map_by_manifest = update_from_logs(manifests,
-                                                *log_file_names,
-                                                ignore_existing=ignore_existing,
-                                                property_order=property_order,
-                                                boolean_properties=boolean_properties,
-                                                stability=stability)
-
-    for test_manifest, expected_map in expected_map_by_manifest.iteritems():
-        url_base = manifests[test_manifest]["url_base"]
-        metadata_path = test_paths[url_base]["metadata_path"]
-        write_changes(metadata_path, expected_map)
-        if stability is not None:
-            for tree in expected_map.itervalues():
-                for test in tree.iterchildren():
-                    for subtest in test.iterchildren():
-                        if subtest.new_disabled:
-                            print "disabled: %s" % os.path.dirname(subtest.root.test_path) + "/" + subtest.name
-                    if test.new_disabled:
-                        print "disabled: %s" % test.root.test_path
+    id_test_map = load_test_data(test_paths)
 
-    results_changed = [item.test_path for item in expected_map.itervalues() if item.modified]
+    for metadata_path, updated_ini in update_from_logs(id_test_map,
+                                                       *log_file_names,
+                                                       ignore_existing=ignore_existing,
+                                                       property_order=property_order,
+                                                       boolean_properties=boolean_properties,
+                                                       stability=stability):
 
-    return unexpected_changes(manifests, change_data, results_changed)
+        write_new_expected(metadata_path, updated_ini)
+        if stability:
+            for test in updated_ini.iterchildren():
+                for subtest in test.iterchildren():
+                    if subtest.new_disabled:
+                        print "disabled: %s" % os.path.dirname(subtest.root.test_path) + "/" + subtest.name
+                    if test.new_disabled:
+                        print "disabled: %s" % test.root.test_path
 
 
 def do_delayed_imports(serve_root):
@@ -119,8 +99,6 @@ def unexpected_changes(manifests, change_data, files_changed):
     else:
         return []
 
-    rv = []
-
     return [fn for _, fn, _ in root_manifest if fn in files_changed and change_data.get(fn) != "M"]
 
 # For each testrun
@@ -139,38 +117,107 @@ def unexpected_changes(manifests, change_data, files_changed):
 #   Check if all the RHS values are the same; if so collapse the conditionals
 
 
-def update_from_logs(manifests, *log_filenames, **kwargs):
+class InternedData(object):
+    """Class for interning data of any (hashable) type.
+
+    This class is intended for building a mapping of int <=> value, such
+    that the integer may be stored as a proxy for the real value, and then
+    the real value obtained later from the proxy value.
+
+    In order to support the use case of packing the integer value as binary,
+    it is possible to specify a maximum bitsize of the data; adding more items
+    than this allowed will result in a ValueError exception.
+
+    The zero value is reserved to use as a sentinal."""
+
+    type_conv = None
+    rev_type_conv = None
+
+    def __init__(self, max_bits=8):
+        self.max_idx = 2**max_bits - 2
+        # Reserve 0 as a sentinal
+        self._data = [None], {}
+
+    def store(self, obj):
+        if self.type_conv is not None:
+            obj = self.type_conv(obj)
+
+        objs, obj_to_idx = self._data
+        if obj not in obj_to_idx:
+            value = len(objs)
+            objs.append(obj)
+            obj_to_idx[obj] = value
+            if value > self.max_idx:
+                raise ValueError
+        else:
+            value = obj_to_idx[obj]
+        return value
+
+    def get(self, idx):
+        obj = self._data[0][idx]
+        if self.rev_type_conv is not None:
+            obj = self.rev_type_conv(obj)
+        return obj
+
+
+class RunInfoInterned(InternedData):
+    def type_conv(self, value):
+        return tuple(value.items())
+
+    def rev_type_conv(self, value):
+        return dict(value)
+
+
+prop_intern = InternedData(4)
+run_info_intern = RunInfoInterned()
+status_intern = InternedData(4)
+
+
+def load_test_data(test_paths):
+    manifest_loader = testloader.ManifestLoader(test_paths, False)
+    manifests = manifest_loader.load()
+
+    id_test_map = {}
+    for test_manifest, paths in manifests.iteritems():
+        id_test_map.update(create_test_tree(paths["metadata_path"],
+                                            test_manifest))
+    return id_test_map
+
+
+def update_from_logs(id_test_map, *log_filenames, **kwargs):
     ignore_existing = kwargs.get("ignore_existing", False)
     property_order = kwargs.get("property_order")
     boolean_properties = kwargs.get("boolean_properties")
     stability = kwargs.get("stability")
 
-    expected_map = {}
-    id_test_map = {}
-
-    for test_manifest, paths in manifests.iteritems():
-        expected_map_manifest, id_path_map_manifest = create_test_tree(
-            paths["metadata_path"],
-            test_manifest,
-            property_order=property_order,
-            boolean_properties=boolean_properties)
-        expected_map[test_manifest] = expected_map_manifest
-        id_test_map.update(id_path_map_manifest)
-
-    updater = ExpectedUpdater(manifests, expected_map, id_test_map,
+    updater = ExpectedUpdater(id_test_map,
                               ignore_existing=ignore_existing)
-    for log_filename in log_filenames:
+
+    for i, log_filename in enumerate(log_filenames):
+        print("Processing log %d/%d" % (i + 1, len(log_filenames)))
         with open(log_filename) as f:
             updater.update_from_log(f)
 
-    for manifest_expected in expected_map.itervalues():
-        for tree in manifest_expected.itervalues():
-            for test in tree.iterchildren():
-                for subtest in test.iterchildren():
-                    subtest.coalesce_expected(stability=stability)
-                test.coalesce_expected(stability=stability)
+    for item in update_results(id_test_map, property_order, boolean_properties, stability):
+        yield item
+
+
+def update_results(id_test_map, property_order, boolean_properties, stability):
+    test_file_items = set(id_test_map.itervalues())
+
+    default_expected_by_type = {}
+    for test_type, test_cls in wpttest.manifest_test_cls.iteritems():
+        if test_cls.result_cls:
+            default_expected_by_type[(test_type, False)] = test_cls.result_cls.default_expected
+        if test_cls.subtest_result_cls:
+            default_expected_by_type[(test_type, True)] = test_cls.subtest_result_cls.default_expected
+
+    for test_file in test_file_items:
+        updated_expected = test_file.update(property_order, boolean_properties, stability,
+                                            default_expected_by_type)
+        if updated_expected is not None and updated_expected.modified:
+            yield test_file.metadata_path, updated_expected
 
-    return expected_map
 
 def directory_manifests(metadata_path):
     rv = []
@@ -180,19 +227,16 @@ def directory_manifests(metadata_path):
             rv.append(os.path.join(rel_path, "__dir__.ini"))
     return rv
 
-def write_changes(metadata_path, expected_map):
+
+def write_changes(metadata_path, expected):
     # First write the new manifest files to a temporary directory
     temp_path = tempfile.mkdtemp(dir=os.path.split(metadata_path)[0])
-    write_new_expected(temp_path, expected_map)
-
-    # Keep all __dir__.ini files (these are not in expected_map because they
-    # aren't associated with a specific test)
-    keep_files = directory_manifests(metadata_path)
+    write_new_expected(temp_path, expected)
 
     # Copy all files in the root to the temporary location since
     # these cannot be ini files
-    keep_files.extend(item for item in os.listdir(metadata_path) if
-                      not os.path.isdir(os.path.join(metadata_path, item)))
+    keep_files = [item for item in os.listdir(metadata_path) if
+                  not os.path.isdir(os.path.join(metadata_path, item))]
 
     for item in keep_files:
         dest_dir = os.path.dirname(os.path.join(temp_path, item))
@@ -209,158 +253,356 @@ def write_changes(metadata_path, expected_map):
     shutil.rmtree(temp_path_2)
 
 
-def write_new_expected(metadata_path, expected_map):
+def write_new_expected(metadata_path, expected):
     # Serialize the data back to a file
-    for tree in expected_map.itervalues():
-        if not tree.is_empty:
-            manifest_str = wptmanifest.serialize(tree.node, skip_empty_data=True)
-            assert manifest_str != ""
-            path = expected.expected_path(metadata_path, tree.test_path)
-            dir = os.path.split(path)[0]
-            if not os.path.exists(dir):
-                os.makedirs(dir)
-            with open(path, "wb") as f:
+    path = expected_path(metadata_path, expected.test_path)
+    if not expected.is_empty:
+        manifest_str = wptmanifest.serialize(expected.node, skip_empty_data=True)
+        assert manifest_str != ""
+        dir = os.path.split(path)[0]
+        if not os.path.exists(dir):
+            os.makedirs(dir)
+        tmp_path = path + ".tmp"
+        try:
+            with open(tmp_path, "wb") as f:
                 f.write(manifest_str)
+            os.rename(tmp_path, path)
+        except (Exception, KeyboardInterrupt):
+            try:
+                os.unlink(tmp_path)
+            except OSError:
+                pass
+    else:
+        try:
+            os.unlink(path)
+        except OSError:
+            pass
 
 
 class ExpectedUpdater(object):
-    def __init__(self, test_manifests, expected_tree, id_path_map, ignore_existing=False):
-        self.test_manifests = test_manifests
-        self.expected_tree = expected_tree
-        self.id_path_map = id_path_map
+    def __init__(self, id_test_map, ignore_existing=False):
+        self.id_test_map = id_test_map
         self.ignore_existing = ignore_existing
         self.run_info = None
         self.action_map = {"suite_start": self.suite_start,
                            "test_start": self.test_start,
                            "test_status": self.test_status,
-                           "test_end": self.test_end}
+                           "test_end": self.test_end,
+                           "assertion_count": self.assertion_count,
+                           "lsan_leak": self.lsan_leak}
         self.tests_visited = {}
 
-        self.test_cache = {}
-
     def update_from_log(self, log_file):
         self.run_info = None
-        log_reader = reader.read(log_file)
-        reader.each_log(log_reader, self.action_map)
+        try:
+            data = json.load(log_file)
+        except Exception:
+            pass
+        else:
+            if "action" not in data and "results" in data:
+                self.update_from_wptreport_log(data)
+                return
+
+        log_file.seek(0)
+        self.update_from_raw_log(log_file)
+
+    def update_from_raw_log(self, log_file):
+        action_map = self.action_map
+        for line in log_file:
+            try:
+                data = json.loads(line)
+            except ValueError:
+                # Just skip lines that aren't json
+                continue
+            action = data["action"]
+            if action in action_map:
+                action_map[action](data)
+
+    def update_from_wptreport_log(self, data):
+        action_map = self.action_map
+        action_map["suite_start"]({"run_info": data["run_info"]})
+        for test in data["results"]:
+            action_map["test_start"]({"test": test["test"]})
+            for subtest in test["subtests"]:
+                action_map["test_status"]({"test": test["test"],
+                                           "subtest": subtest["name"],
+                                           "status": subtest["status"],
+                                           "expected": subtest.get("expected")})
+            action_map["test_end"]({"test": test["test"],
+                                    "status": test["status"],
+                                    "expected": test.get("expected")})
+            if "asserts" in test:
+                asserts = test["asserts"]
+                action_map["assertion_count"]({"test": test["test"],
+                                               "count": asserts["count"],
+                                               "min_expected": asserts["min"],
+                                               "max_expected": asserts["max"]})
+        for item in data.get("lsan_leaks", []):
+            action_map["lsan_leak"](item)
 
     def suite_start(self, data):
-        self.run_info = data["run_info"]
-
-    def test_type(self, path):
-        for manifest in self.test_manifests.iterkeys():
-            tests = list(manifest.iterpath(path))
-            if len(tests):
-                assert all(test.item_type == tests[0].item_type for test in tests)
-                return tests[0].item_type
-        assert False
+        self.run_info = run_info_intern.store(data["run_info"])
 
     def test_start(self, data):
-        test_id = data["test"]
+        test_id = intern(data["test"].encode("utf8"))
         try:
-            test_manifest, test = self.id_path_map[test_id]
-            expected_node = self.expected_tree[test_manifest][test].get_test(test_id)
+            test_data = self.id_test_map[test_id]
         except KeyError:
             print "Test not found %s, skipping" % test_id
             return
-        self.test_cache[test_id] = expected_node
 
-        if test_id not in self.tests_visited:
-            if self.ignore_existing:
-                expected_node.clear_expected()
-            self.tests_visited[test_id] = set()
+        if self.ignore_existing:
+            test_data.set_requires_update()
+            test_data.clear.append("expected")
+        self.tests_visited[test_id] = set()
 
     def test_status(self, data):
-        test = self.test_cache.get(data["test"])
-        if test is None:
+        test_id = intern(data["test"].encode("utf8"))
+        subtest = intern(data["subtest"].encode("utf8"))
+        test_data = self.id_test_map.get(test_id)
+        if test_data is None:
             return
-        test_cls = wpttest.manifest_test_cls[self.test_type(test.root.test_path)]
 
-        subtest = test.get_subtest(data["subtest"])
+        self.tests_visited[test_id].add(subtest)
 
-        self.tests_visited[test.id].add(data["subtest"])
+        result = status_intern.store(data["status"])
 
-        result = test_cls.subtest_result_cls(
-            data["subtest"],
-            data["status"],
-            data.get("message"))
-
-        subtest.set_result(self.run_info, result)
+        test_data.set(test_id, subtest, "status", self.run_info, result)
+        if data.get("expected") and data["expected"] != data["status"]:
+            test_data.set_requires_update()
 
     def test_end(self, data):
-        test_id = data["test"]
-        test = self.test_cache.get(test_id)
-        if test is None:
+        if data["status"] == "SKIP":
             return
-        test_cls = wpttest.manifest_test_cls[self.test_type(test.root.test_path)]
 
-        if data["status"] == "SKIP":
+        test_id = intern(data["test"].encode("utf8"))
+        test_data = self.id_test_map.get(test_id)
+        if test_data is None:
             return
 
-        result = test_cls.result_cls(
-            data["status"],
-            data.get("message"))
-        test.set_result(self.run_info, result)
-        del self.test_cache[test_id]
+        result = status_intern.store(data["status"])
+
+        test_data.set(test_id, None, "status", self.run_info, result)
+        if data.get("expected") and data["status"] != data["expected"]:
+            test_data.set_requires_update()
+        del self.tests_visited[test_id]
 
+    def assertion_count(self, data):
+        test_id = intern(data["test"].encode("utf8"))
+        test_data = self.id_test_map.get(test_id)
+        if test_data is None:
+            return
 
-def create_test_tree(metadata_path, test_manifest, property_order=None,
-                     boolean_properties=None):
-    expected_map = {}
+        test_data.set(test_id, None, "asserts", self.run_info, data["count"])
+        if data["count"] < data["min_expected"] or data["count"] > data["max_expected"]:
+            test_data.set_requires_update()
+
+    def lsan_leak(self, data):
+        dir_path = data.get("scope", "/")
+        dir_id = intern(os.path.join(dir_path, "__dir__").replace(os.path.sep, "/").encode("utf8"))
+        if dir_id.startswith("/"):
+            dir_id = dir_id[1:]
+        test_data = self.id_test_map[dir_id]
+        test_data.set(dir_id, None, "lsan",
+                      self.run_info, (data["frames"], data.get("allowed_match")))
+        if not data.get("allowed_match"):
+            test_data.set_requires_update()
+
+
+def create_test_tree(metadata_path, test_manifest):
+    """Create a map of test_id to TestFileData for that test.
+    """
     id_test_map = {}
     exclude_types = frozenset(["stub", "helper", "manual", "support", "conformancechecker"])
-    all_types = [item.item_type for item in manifestitem.__dict__.itervalues()
-                 if type(item) == type and
-                 issubclass(item, manifestitem.ManifestItem) and
-                 item.item_type is not None]
+    all_types = manifestitem.item_types.keys()
     include_types = set(all_types) - exclude_types
-    for _, test_path, tests in test_manifest.itertypes(*include_types):
-        expected_data = load_expected(test_manifest, metadata_path, test_path, tests,
-                                      property_order=property_order,
-                                      boolean_properties=boolean_properties)
-        if expected_data is None:
-            expected_data = create_expected(test_manifest,
-                                            test_path,
-                                            tests,
-                                            property_order=property_order,
-                                            boolean_properties=boolean_properties)
-
+    for item_type, test_path, tests in test_manifest.itertypes(*include_types):
+        test_file_data = TestFileData(intern(test_manifest.url_base.encode("utf8")),
+                                      intern(item_type.encode("utf8")),
+                                      metadata_path,
+                                      test_path,
+                                      tests)
         for test in tests:
-            id_test_map[test.id] = (test_manifest, test)
-            expected_map[test] = expected_data
-
-    return expected_map, id_test_map
-
+            id_test_map[intern(test.id.encode("utf8"))] = test_file_data
+
+        dir_path = os.path.split(test_path)[0].replace(os.path.sep, "/")
+        while True:
+            if dir_path:
+                dir_id = dir_path + "/__dir__"
+            else:
+                dir_id = "__dir__"
+            dir_id = intern((test_manifest.url_base + dir_id).lstrip("/").encode("utf8"))
+            if dir_id not in id_test_map:
+                test_file_data = TestFileData(intern(test_manifest.url_base.encode("utf8")),
+                                              None,
+                                              metadata_path,
+                                              dir_id,
+                                              [])
+                id_test_map[dir_id] = test_file_data
+            if not dir_path or dir_path in id_test_map:
+                break
+            dir_path = dir_path.rsplit("/", 1)[0] if "/" in dir_path else ""
+
+    return id_test_map
+
+
+class PackedResultList(object):
+    """Class for storing test results.
+
+    Results are stored as an array of 2-byte integers for compactness.
+    The first 4 bits represent the property name, the second 4 bits
+    represent the test status (if it's a result with a status code), and
+    the final 8 bits represent the run_info. If the result doesn't have a
+    simple status code but instead a richer type, we place that richer type
+    in a dictionary and set the status part of the result type to 0.
+
+    This class depends on the global prop_intern, run_info_intern and
+    status_intern InteredData objects to convert between the bit values
+    and corresponding Python objects."""
+
+    def __init__(self):
+        self.data = array.array("H")
+
+    __slots__ = ("data", "raw_data")
+
+    def append(self, prop, run_info, value):
+        out_val = (prop << 12) + run_info
+        if prop == prop_intern.store("status"):
+            out_val += value << 8
+        else:
+            if not hasattr(self, "raw_data"):
+                self.raw_data = {}
+            self.raw_data[len(self.data)] = value
+        self.data.append(out_val)
+
+    def unpack(self, idx, packed):
+        prop = prop_intern.get((packed & 0xF000) >> 12)
+
+        value_idx = (packed & 0x0F00) >> 8
+        if value_idx is 0:
+            value = self.raw_data[idx]
+        else:
+            value = status_intern.get(value_idx)
+
+        run_info = run_info_intern.get((packed & 0x00FF))
+
+        return prop, run_info, value
+
+    def __iter__(self):
+        for i, item in enumerate(self.data):
+            yield self.unpack(i, item)
+
+
+class TestFileData(object):
+    __slots__ = ("url_base", "item_type", "test_path", "metadata_path", "tests",
+                 "_requires_update", "clear", "data")
+
+    def __init__(self, url_base, item_type, metadata_path, test_path, tests):
+        self.url_base = url_base
+        self.item_type = item_type
+        self.test_path = test_path
+        self.metadata_path = metadata_path
+        self.tests = {intern(item.id.encode("utf8")) for item in tests}
+        self._requires_update = False
+        self.clear = set()
+        self.data = defaultdict(lambda: defaultdict(PackedResultList))
+
+    def set_requires_update(self):
+        self._requires_update = True
+
+    def set(self, test_id, subtest_id, prop, run_info, value):
+        self.data[test_id][subtest_id].append(prop_intern.store(prop),
+                                              run_info,
+                                              value)
+
+    def expected(self, property_order, boolean_properties):
+        expected_data = load_expected(self.url_base,
+                                      self.metadata_path,
+                                      self.test_path,
+                                      self.tests,
+                                      property_order,
+                                      boolean_properties)
+        if expected_data is None:
+            expected_data = create_expected(self.url_base,
+                                            self.test_path,
+                                            property_order,
+                                            boolean_properties)
+        return expected_data
+
+    def update(self, property_order, boolean_properties, stability,
+               default_expected_by_type):
+        if not self._requires_update:
+            return
 
-def create_expected(test_manifest, test_path, tests, property_order=None,
+        expected = self.expected(property_order, boolean_properties)
+        expected_by_test = {}
+
+        for test_id in self.tests:
+            if not expected.has_test(test_id):
+                expected.append(manifestupdate.TestNode.create(test_id))
+            test_expected = expected.get_test(test_id)
+            expected_by_test[test_id] = test_expected
+            for prop in self.clear:
+                test_expected.clear(prop)
+
+        for test_id, test_data in self.data.iteritems():
+            for subtest_id, results_list in test_data.iteritems():
+                for prop, run_info, value in results_list:
+                    # Special case directory metadata
+                    if subtest_id is None and test_id.endswith("__dir__"):
+                        if prop == "lsan":
+                            expected.set_lsan(run_info, value)
+                        continue
+
+                    if prop == "status":
+                        value = Result(value, default_expected_by_type[self.item_type,
+                                                                       subtest_id is not None])
+
+                    test_expected = expected_by_test[test_id]
+                    if subtest_id is None:
+                        item_expected = test_expected
+                    else:
+                        item_expected = test_expected.get_subtest(subtest_id)
+                    if prop == "status":
+                        item_expected.set_result(run_info, value)
+                    elif prop == "asserts":
+                        item_expected.set_asserts(run_info, value)
+
+        expected.coalesce_properties(stability=stability)
+        for test in expected.iterchildren():
+            for subtest in test.iterchildren():
+                subtest.coalesce_properties(stability=stability)
+            test.coalesce_properties(stability=stability)
+
+        return expected
+
+
+Result = namedtuple("Result", ["status", "default_expected"])
+
+
+def create_expected(url_base, test_path, property_order=None,
                     boolean_properties=None):
-    expected = manifestupdate.ExpectedManifest(None, test_path, test_manifest.url_base,
+    expected = manifestupdate.ExpectedManifest(None,
+                                               test_path,
+                                               url_base,
                                                property_order=property_order,
                                                boolean_properties=boolean_properties)
-    for test in tests:
-        expected.append(manifestupdate.TestNode.create(test.id))
     return expected
 
 
-def load_expected(test_manifest, metadata_path, test_path, tests, property_order=None,
+def load_expected(url_base, metadata_path, test_path, tests, property_order=None,
                   boolean_properties=None):
     expected_manifest = manifestupdate.get_manifest(metadata_path,
                                                     test_path,
-                                                    test_manifest.url_base,
+                                                    url_base,
                                                     property_order=property_order,
                                                     boolean_properties=boolean_properties)
     if expected_manifest is None:
         return
 
-    tests_by_id = {item.id: item for item in tests}
-
     # Remove expected data for tests that no longer exist
     for test in expected_manifest.iterchildren():
-        if test.id not in tests_by_id:
+        if test.id not in tests:
             test.remove()
 
-    # Add tests that don't have expected data
-    for test in tests:
-        if not expected_manifest.has_test(test.id):
-            expected_manifest.append(manifestupdate.TestNode.create(test.id))
-
     return expected_manifest
index c077f95..0c1f86f 100644 (file)
@@ -1,4 +1,3 @@
-import os
 import importlib
 import imp
 
@@ -12,9 +11,6 @@ def products_enabled(config):
         return names
 
 def product_module(config, product):
-    here = os.path.join(os.path.split(__file__)[0])
-    product_dir = os.path.join(here, "browsers")
-
     if product not in products_enabled(config):
         raise ValueError("Unknown product %s" % product)
 
index e1709c5..859f014 100644 (file)
@@ -181,8 +181,8 @@ def run_step(logger, iterations, restart_after_iteration, kwargs_extras, **kwarg
     kwargs.update(kwargs_extras)
 
     def wrap_handler(x):
-        x = LogLevelFilter(x, "WARNING")
         if not kwargs["verify_log_full"]:
+            x = LogLevelFilter(x, "WARNING")
             x = LogActionFilter(x, ["log", "process_output"])
         return x
 
index 9ac2e94..018dc10 100644 (file)
@@ -378,11 +378,13 @@ class TagFilter(object):
 
 
 class ManifestLoader(object):
-    def __init__(self, test_paths, force_manifest_update=False, manifest_download=False):
+    def __init__(self, test_paths, force_manifest_update=False, manifest_download=False, types=None, meta_filters=None):
         do_delayed_imports()
         self.test_paths = test_paths
         self.force_manifest_update = force_manifest_update
         self.manifest_download = manifest_download
+        self.types = types
+        self.meta_filters = meta_filters or []
         self.logger = structured.get_default_logger()
         if self.logger is None:
             self.logger = structured.structuredlog.StructuredLogger("ManifestLoader")
@@ -416,10 +418,12 @@ class ManifestLoader(object):
                 with open(manifest_path) as f:
                     json_data = json.load(f)
             except IOError:
-                #If the existing file doesn't exist just create one from scratch
-                pass
+                self.logger.info("Unable to find test manifest")
+            except ValueError:
+                self.logger.info("Unable to parse test manifest")
 
         if not json_data:
+            self.logger.info("Creating test manifest")
             manifest_file = manifest.Manifest(url_base)
         else:
             try:
@@ -435,7 +439,7 @@ class ManifestLoader(object):
         if (not os.path.exists(manifest_path) or
             self.force_manifest_update):
             self.update_manifest(manifest_path, tests_path, url_base, download=self.manifest_download)
-        manifest_file = manifest.load(tests_path, manifest_path)
+        manifest_file = manifest.load(tests_path, manifest_path, types=self.types, meta_filters=self.meta_filters)
         if manifest_file.url_base != url_base:
             self.logger.info("Updating url_base in manifest from %s to %s" % (manifest_file.url_base,
                                                                               url_base))
@@ -451,6 +455,7 @@ def iterfilter(filters, iter):
     for item in iter:
         yield item
 
+
 class TestLoader(object):
     def __init__(self,
                  test_manifests,
@@ -511,7 +516,7 @@ class TestLoader(object):
     def load_dir_metadata(self, test_manifest, metadata_path, test_path):
         rv = []
         path_parts = os.path.dirname(test_path).split(os.path.sep)
-        for i in xrange(1,len(path_parts) + 1):
+        for i in xrange(len(path_parts) + 1):
             path = os.path.join(metadata_path, os.path.sep.join(path_parts[:i]), "__dir__.ini")
             if path not in self.directory_manifests:
                 self.directory_manifests[path] = manifestexpected.get_dir_manifest(path,
@@ -593,6 +598,10 @@ class TestSource(object):
     def make_queue(cls, tests, **kwargs):
         pass
 
+    @classmethod
+    def group_metadata(cls, state):
+        return {"scope": "/"}
+
     def group(self):
         if not self.current_group or len(self.current_group) == 0:
             try:
@@ -616,7 +625,8 @@ class GroupedSource(TestSource):
 
         for test in tests:
             if cls.new_group(state, test, **kwargs):
-                groups.append((deque(), {}))
+                group_metadata = cls.group_metadata(state)
+                groups.append((deque(), group_metadata))
 
             group, metadata = groups[-1]
             group.append(test)
@@ -633,7 +643,7 @@ class SingleTestSource(TestSource):
         test_queue = Queue()
         processes = kwargs["processes"]
         queues = [deque([]) for _ in xrange(processes)]
-        metadatas = [{} for _ in xrange(processes)]
+        metadatas = [cls.group_metadata(None) for _ in xrange(processes)]
         for test in tests:
             idx = hash(test.id) % processes
             group = queues[idx]
@@ -651,9 +661,13 @@ class PathGroupedSource(GroupedSource):
     @classmethod
     def new_group(cls, state, test, **kwargs):
         depth = kwargs.get("depth")
-        if depth is True:
+        if depth is True or depth == 0:
             depth = None
         path = urlparse.urlsplit(test.url).path.split("/")[1:-1][:depth]
         rv = path != state.get("prev_path")
         state["prev_path"] = path
         return rv
+
+    @classmethod
+    def group_metadata(cls, state):
+        return {"scope": "/%s" % "/".join(state["prev_path"])}
index 257e789..46d67e3 100644 (file)
@@ -1,7 +1,6 @@
 from __future__ import unicode_literals
 
 import multiprocessing
-import sys
 import threading
 import traceback
 from Queue import Empty
@@ -175,7 +174,7 @@ class BrowserManager(object):
         self.last_test = test
         return restart_required
 
-    def init(self):
+    def init(self, group_metadata):
         """Launch the browser that is being tested,
         and the TestRunner process that will run the tests."""
         # It seems that this lock is helpful to prevent some race that otherwise
@@ -193,7 +192,7 @@ class BrowserManager(object):
             if self.init_timer is not None:
                 self.init_timer.start()
             self.logger.debug("Starting browser with settings %r" % self.browser_settings)
-            self.browser.start(**self.browser_settings)
+            self.browser.start(group_metadata=group_metadata, **self.browser_settings)
             self.browser_pid = self.browser.pid()
         except Exception:
             self.logger.warning("Failure during init %s" % traceback.format_exc())
@@ -452,7 +451,7 @@ class TestRunnerManager(threading.Thread):
 
         self.browser.update_settings(self.state.test)
 
-        result = self.browser.init()
+        result = self.browser.init(self.state.group_metadata)
         if result is Stop:
             return RunnerManagerState.error()
         elif not result:
@@ -583,11 +582,20 @@ class TestRunnerManager(threading.Thread):
         if status == "CRASH":
             self.browser.log_crash(test.id)
 
+        if "assertion_count" in file_result.extra:
+            assertion_count = file_result.extra.pop("assertion_count")
+            if assertion_count > 0:
+                self.logger.assertion_count(test.id,
+                                            int(assertion_count),
+                                            test.min_assertion_count,
+                                            test.max_assertion_count)
+
         self.logger.test_end(test.id,
                              status,
                              message=file_result.message,
                              expected=expected,
-                             extra=file_result.extra)
+                             extra=file_result.extra,
+                             stack=file_result.stack)
 
         restart_before_next = (test.restart_after or
                                file_result.status in ("CRASH", "EXTERNAL-TIMEOUT", "INTERNAL-ERROR") or
index db0de8f..e854295 100644 (file)
@@ -8,7 +8,7 @@ sys.path.insert(0, join(dirname(__file__), "..", "..", ".."))
 
 sauce = pytest.importorskip("wptrunner.browsers.sauce")
 
-from wptserve.config import Config
+from wptserve.config import ConfigBuilder
 
 
 def test_sauceconnect_success():
@@ -27,10 +27,10 @@ def test_sauceconnect_success():
             sauce_tunnel_id="ccc",
             sauce_connect_binary="ddd")
 
-        env_config = Config(browser_host="example.net")
-        sauce_connect(None, env_config)
-        with sauce_connect:
-            pass
+        with ConfigBuilder(browser_host="example.net") as env_config:
+            sauce_connect(None, env_config)
+            with sauce_connect:
+                pass
 
 
 @pytest.mark.parametrize("readyfile,returncode", [
@@ -56,16 +56,43 @@ def test_sauceconnect_failure_exit(readyfile, returncode):
             sauce_tunnel_id="ccc",
             sauce_connect_binary="ddd")
 
-        env_config = Config(browser_host="example.net")
-        sauce_connect(None, env_config)
-        with pytest.raises(sauce.SauceException):
-            with sauce_connect:
-                pass
+        with ConfigBuilder(browser_host="example.net") as env_config:
+            sauce_connect(None, env_config)
+            with pytest.raises(sauce.SauceException):
+                with sauce_connect:
+                    pass
 
         # Given we appear to exit immediately with these mocks, sleep shouldn't be called
         sleep.assert_not_called()
 
 
+def test_sauceconnect_cleanup():
+    """Ensure that execution pauses when the process is closed while exiting
+    the context manager. This allow Sauce Connect to close any active
+    tunnels."""
+    with mock.patch.object(sauce.SauceConnect, "upload_prerun_exec"),\
+            mock.patch.object(sauce.subprocess, "Popen") as Popen,\
+            mock.patch.object(sauce.os.path, "exists") as exists,\
+            mock.patch.object(sauce.time, "sleep") as sleep:
+        Popen.return_value.poll.return_value = True
+        Popen.return_value.returncode = None
+        exists.return_value = True
+
+        sauce_connect = sauce.SauceConnect(
+            sauce_user="aaa",
+            sauce_key="bbb",
+            sauce_tunnel_id="ccc",
+            sauce_connect_binary="ddd")
+
+        with ConfigBuilder(browser_host="example.net") as env_config:
+            sauce_connect(None, env_config)
+            with sauce_connect:
+                Popen.return_value.poll.return_value = None
+                sleep.assert_not_called()
+
+        sleep.assert_called()
+
+
 def test_sauceconnect_failure_never_ready():
     with mock.patch.object(sauce.SauceConnect, "upload_prerun_exec"),\
             mock.patch.object(sauce.subprocess, "Popen") as Popen,\
@@ -81,11 +108,11 @@ def test_sauceconnect_failure_never_ready():
             sauce_tunnel_id="ccc",
             sauce_connect_binary="ddd")
 
-        env_config = Config(browser_host="example.net")
-        sauce_connect(None, env_config)
-        with pytest.raises(sauce.SauceException):
-            with sauce_connect:
-                pass
+        with ConfigBuilder(browser_host="example.net") as env_config:
+            sauce_connect(None, env_config)
+            with pytest.raises(sauce.SauceException):
+                with sauce_connect:
+                    pass
 
         # We should sleep while waiting for it to create the readyfile
         sleep.assert_called()
@@ -109,24 +136,24 @@ def test_sauceconnect_tunnel_domains():
             sauce_tunnel_id="ccc",
             sauce_connect_binary="ddd")
 
-        env_config = Config(browser_host="example.net",
-                            alternate_hosts={"alt": "example.org"},
-                            subdomains={"a", "b"},
-                            not_subdomains={"x", "y"})
-        sauce_connect(None, env_config)
-        with sauce_connect:
-            Popen.assert_called_once()
-            args, kwargs = Popen.call_args
-            cmd = args[0]
-            assert "--tunnel-domains" in cmd
-            i = cmd.index("--tunnel-domains")
-            rest = cmd[i+1:]
-            assert len(rest) >= 1
-            if len(rest) > 1:
-                assert rest[1].startswith("-"), "--tunnel-domains takes a comma separated list (not a space separated list)"
-            assert set(rest[0].split(",")) == {'example.net',
-                                               'a.example.net',
-                                               'b.example.net',
-                                               'example.org',
-                                               'a.example.org',
-                                               'b.example.org'}
+        with ConfigBuilder(browser_host="example.net",
+                           alternate_hosts={"alt": "example.org"},
+                           subdomains={"a", "b"},
+                           not_subdomains={"x", "y"}) as env_config:
+            sauce_connect(None, env_config)
+            with sauce_connect:
+                Popen.assert_called_once()
+                args, kwargs = Popen.call_args
+                cmd = args[0]
+                assert "--tunnel-domains" in cmd
+                i = cmd.index("--tunnel-domains")
+                rest = cmd[i+1:]
+                assert len(rest) >= 1
+                if len(rest) > 1:
+                    assert rest[1].startswith("-"), "--tunnel-domains takes a comma separated list (not a space separated list)"
+                assert set(rest[0].split(",")) == {'example.net',
+                                                   'a.example.net',
+                                                   'b.example.net',
+                                                   'example.org',
+                                                   'a.example.org',
+                                                   'b.example.org'}
index 062b687..bd649d6 100644 (file)
@@ -3,30 +3,37 @@ import sys
 from os.path import join, dirname
 from mozlog import structured
 
-import pytest
-
-sys.path.insert(0, join(dirname(__file__), "..", ".."))
+sys.path.insert(0, join(dirname(__file__), "..", "..", ".."))
 
 from wptrunner.testloader import EqualTimeChunker
+from manifest.sourcefile import SourceFile
 
 structured.set_default_logger(structured.structuredlog.StructuredLogger("TestChunker"))
 
+
+testharness_test = """<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>"""
+
+
 class MockTest(object):
     default_timeout = 10
 
-    def __init__(self, id, timeout=10):
+    def __init__(self, id, path, timeout=10, contents=testharness_test):
         self.id = id
+        self.url = "/" + path
         self.item_type = "testharness"
         self.timeout = timeout
+        self.source_file = SourceFile("/", path, "/", contents=contents)
 
 
 def make_mock_manifest(*items):
     rv = []
     for test_type, dir_path, num_tests in items:
         for i in range(num_tests):
+            filename = "/%i.html" % i
             rv.append((test_type,
-                       dir_path + "/%i.test" % i,
-                       set([MockTest(i)])))
+                       dir_path + filename,
+                       set([MockTest("%i.html" % i, dir_path + filename)])))
     return rv
 
 
diff --git a/WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/tests/test_formatters.py b/WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/tests/test_formatters.py
new file mode 100644 (file)
index 0000000..fc2ce5b
--- /dev/null
@@ -0,0 +1,64 @@
+import json
+import sys
+import time
+from os.path import dirname, join
+from StringIO import StringIO
+
+from mozlog import handlers, structuredlog
+
+sys.path.insert(0, join(dirname(__file__), "..", ".."))
+
+from wptrunner.formatters import WptreportFormatter
+
+
+def test_wptreport_runtime(capfd):
+    # setup the logger
+    output = StringIO()
+    logger = structuredlog.StructuredLogger("test_a")
+    logger.add_handler(handlers.StreamHandler(output, WptreportFormatter()))
+
+    # output a bunch of stuff
+    logger.suite_start(["test-id-1"], run_info={})
+    logger.test_start("test-id-1")
+    time.sleep(0.125)
+    logger.test_end("test-id-1", "PASS")
+    logger.suite_end()
+
+    # check nothing got output to stdout/stderr
+    # (note that mozlog outputs exceptions during handling to stderr!)
+    captured = capfd.readouterr()
+    assert captured.out == ""
+    assert captured.err == ""
+
+    # check the actual output of the formatter
+    output.seek(0)
+    output_obj = json.load(output)
+    # be relatively lax in case of low resolution timers
+    # 62 is 0.125s = 125ms / 2 = 62ms (assuming int maths)
+    # this provides a margin of 62ms, sufficient for even DOS (55ms timer)
+    assert output_obj["results"][0]["duration"] >= 62
+
+
+def test_wptreport_run_info_optional(capfd):
+    """per the mozlog docs, run_info is optional; check we work without it"""
+    # setup the logger
+    output = StringIO()
+    logger = structuredlog.StructuredLogger("test_a")
+    logger.add_handler(handlers.StreamHandler(output, WptreportFormatter()))
+
+    # output a bunch of stuff
+    logger.suite_start(["test-id-1"])  # no run_info arg!
+    logger.test_start("test-id-1")
+    logger.test_end("test-id-1", "PASS")
+    logger.suite_end()
+
+    # check nothing got output to stdout/stderr
+    # (note that mozlog outputs exceptions during handling to stderr!)
+    captured = capfd.readouterr()
+    assert captured.out == ""
+    assert captured.err == ""
+
+    # check the actual output of the formatter
+    output.seek(0)
+    output_obj = json.load(output)
+    assert "run_info" not in output_obj or output_obj["run_info"] == {}
index 8ece29b..441c83f 100644 (file)
@@ -1,4 +1,3 @@
-import os
 import sys
 
 from os.path import join, dirname
@@ -9,8 +8,7 @@ import pytest
 from .base import all_products, active_products
 
 sys.path.insert(0, join(dirname(__file__), "..", "..", "..", ".."))  # repo root
-from tools import localpaths
-from wptserve import sslutils
+from tools import localpaths  # noqa: flake8
 
 from wptrunner import environment
 from wptrunner import products
@@ -48,11 +46,11 @@ def test_server_start_config(product):
 
     with mock.patch.object(environment.serve, "start") as start:
         with environment.TestEnvironment(test_paths,
-                                         sslutils.environments["none"](None),
                                          False,
                                          None,
                                          env_options,
-                                         env_extras) as test_environment:
+                                         {"type": "none"},
+                                         env_extras):
             start.assert_called_once()
             args = start.call_args
             config = args[0][0]
diff --git a/WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/tests/test_stability.py b/WebDriverTests/imported/w3c/tools/wptrunner/wptrunner/tests/test_stability.py
new file mode 100644 (file)
index 0000000..72cff21
--- /dev/null
@@ -0,0 +1,12 @@
+import sys
+from os.path import dirname, join
+
+sys.path.insert(0, join(dirname(__file__), "..", ".."))
+
+from wptrunner import stability
+
+def test_is_inconsistent():
+    assert stability.is_inconsistent({"PASS": 10}, 10) is False
+    assert stability.is_inconsistent({"PASS": 9}, 10) is True
+    assert stability.is_inconsistent({"PASS": 9, "FAIL": 1}, 10) is True
+    assert stability.is_inconsistent({"PASS": 8, "FAIL": 1}, 10) is True
index e5eb4cf..5c654c9 100644 (file)
-import unittest
-import StringIO
-
-import pytest
+import json
+import mock
+import os
+import sys
+from io import BytesIO
 
 from .. import metadata, manifestupdate
 from mozlog import structuredlog, handlers, formatters
 
+sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir))
+from manifest import manifest, item as manifest_item
+
+
+def rel_path_to_url(rel_path, url_base="/"):
+    assert not os.path.isabs(rel_path)
+    if url_base[0] != "/":
+        url_base = "/" + url_base
+    if url_base[-1] != "/":
+        url_base += "/"
+    return url_base + rel_path.replace(os.sep, "/")
+
+
+def SourceFileWithTest(path, hash, cls, *args):
+    s = mock.Mock(rel_path=path, hash=hash)
+    test = cls(s, rel_path_to_url(path), *args)
+    s.manifest_items = mock.Mock(return_value=(cls.item_type, [test]))
+    return s
+
+
+item_classes = {"testharness": manifest_item.TestharnessTest,
+                "reftest": manifest_item.RefTest,
+                "reftest_node": manifest_item.RefTestNode,
+                "manual": manifest_item.ManualTest,
+                "stub": manifest_item.Stub,
+                "wdspec": manifest_item.WebdriverSpecTest,
+                "conformancechecker": manifest_item.ConformanceCheckerTest,
+                "visual": manifest_item.VisualTest,
+                "support": manifest_item.SupportFile}
+
+
+def update(tests, *logs):
+    id_test_map, updater = create_updater(tests)
+    for log in logs:
+        log = create_log(log)
+        updater.update_from_log(log)
 
-class TestExpectedUpdater(unittest.TestCase):
-    def create_manifest(self, data, test_path="path/to/test.ini"):
-        f = StringIO.StringIO(data)
-        return manifestupdate.compile(f, test_path)
+    return list(metadata.update_results(id_test_map,
+                                        ["debug", "os", "version", "processor", "bits"],
+                                        ["debug"],
+                                        False))
 
-    def create_updater(self, data, **kwargs):
-        expected_tree = {}
-        id_path_map = {}
-        for test_path, test_ids, manifest_str in data:
-            if isinstance(test_ids, (str, unicode)):
-                test_ids = [test_ids]
-            expected_tree[test_path] = self.create_manifest(manifest_str, test_path)
-            for test_id in test_ids:
-                id_path_map[test_id] = test_path
 
-        return metadata.ExpectedUpdater(expected_tree, id_path_map, **kwargs)
+def create_updater(tests, url_base="/", **kwargs):
+    id_test_map = {}
+    m = create_test_manifest(tests, url_base)
+    expected_data = {}
+    metadata.load_expected = lambda _, __, test_path, *args: expected_data[test_path]
 
-    def create_log(self, *args, **kwargs):
+    for test_path, test_ids, test_type, manifest_str in tests:
+        tests = list(m.iterpath(test_path))
+        if isinstance(test_ids, (str, unicode)):
+            test_ids = [test_ids]
+        test_data = metadata.TestFileData("/", "testharness", None, test_path, tests)
+        expected_data[test_path] = manifestupdate.compile(BytesIO(manifest_str),
+                                                          test_path,
+                                                          url_base)
+
+        for test_id in test_ids:
+            id_test_map[test_id] = test_data
+
+    return id_test_map, metadata.ExpectedUpdater(id_test_map, **kwargs)
+
+
+def create_log(entries):
+    data = BytesIO()
+    if isinstance(entries, list):
         logger = structuredlog.StructuredLogger("expected_test")
-        data = StringIO.StringIO()
         handler = handlers.StreamHandler(data, formatters.JSONFormatter())
         logger.add_handler(handler)
 
-        log_entries = ([("suite_start", {"tests": [], "run_info": kwargs.get("run_info", {})})] +
-                       list(args) +
-                       [("suite_end", {})])
-
-        for item in log_entries:
+        for item in entries:
             action, kwargs = item
             getattr(logger, action)(**kwargs)
         logger.remove_handler(handler)
-        data.seek(0)
-        return data
+    else:
+        json.dump(entries, data)
+    data.seek(0)
+    return data
 
 
-    def coalesce_results(self, trees):
-        for tree in trees:
-            for test in tree.iterchildren():
-                for subtest in test.iterchildren():
-                    subtest.coalesce_expected()
-                test.coalesce_expected()
+def suite_log(entries, run_info=None):
+    return ([("suite_start", {"tests": [], "run_info": run_info or {}})] +
+            entries +
+            [("suite_end", {})])
 
-    @pytest.mark.xfail
-    def test_update_0(self):
-        prev_data = [("path/to/test.htm.ini", ["/path/to/test.htm"], """[test.htm]
-  type: testharness
+
+def create_test_manifest(tests, url_base="/"):
+    source_files = []
+    for i, (test, _, test_type, _) in enumerate(tests):
+        if test_type:
+            source_files.append(SourceFileWithTest(test, str(i) * 40, item_classes[test_type]))
+    m = manifest.Manifest()
+    m.update(source_files)
+    return m
+
+
+def test_update_0():
+    tests = [("path/to/test.htm", ["/path/to/test.htm"], "testharness",
+              """[test.htm]
   [test1]
     expected: FAIL""")]
 
-        new_data = self.create_log(("test_start", {"test": "/path/to/test.htm"}),
-                                   ("test_status", {"test": "/path/to/test.htm",
-                                                    "subtest": "test1",
-                                                    "status": "PASS",
-                                                    "expected": "FAIL"}),
-                                   ("test_end", {"test": "/path/to/test.htm",
-                                                 "status": "OK"}))
-        updater = self.create_updater(prev_data)
-        updater.update_from_log(new_data)
-
-        new_manifest = updater.expected_tree["path/to/test.htm.ini"]
-        self.coalesce_results([new_manifest])
-        self.assertTrue(new_manifest.is_empty)
-
-    @pytest.mark.xfail
-    def test_update_1(self):
-        test_id = "/path/to/test.htm"
-        prev_data = [("path/to/test.htm.ini", [test_id], """[test.htm]
-  type: testharness
+    log = suite_log([("test_start", {"test": "/path/to/test.htm"}),
+                     ("test_status", {"test": "/path/to/test.htm",
+                                      "subtest": "test1",
+                                      "status": "PASS",
+                                      "expected": "FAIL"}),
+                     ("test_end", {"test": "/path/to/test.htm",
+                                   "status": "OK"})])
+
+    updated = update(tests, log)
+
+    assert len(updated) == 1
+    assert updated[0][1].is_empty
+
+
+def test_update_1():
+    test_id = "/path/to/test.htm"
+    tests = [("path/to/test.htm", [test_id], "testharness",
+              """[test.htm]
   [test1]
     expected: ERROR""")]
 
-        new_data = self.create_log(("test_start", {"test": test_id}),
-                                   ("test_status", {"test": test_id,
-                                                    "subtest": "test1",
-                                                    "status": "FAIL",
-                                                    "expected": "ERROR"}),
-                                   ("test_end", {"test": test_id,
-                                                 "status": "OK"}))
-        updater = self.create_updater(prev_data)
-        updater.update_from_log(new_data)
-
-        new_manifest = updater.expected_tree["path/to/test.htm.ini"]
-        self.coalesce_results([new_manifest])
-        self.assertFalse(new_manifest.is_empty)
-        self.assertEquals(new_manifest.get_test(test_id).children[0].get("expected"), "FAIL")
-
-    @pytest.mark.xfail
-    def test_new_subtest(self):
-        test_id = "/path/to/test.htm"
-        prev_data = [("path/to/test.htm.ini", [test_id], """[test.htm]
-  type: testharness
+    log = suite_log([("test_start", {"test": test_id}),
+                     ("test_status", {"test": test_id,
+                                      "subtest": "test1",
+                                      "status": "FAIL",
+                                      "expected": "ERROR"}),
+                     ("test_end", {"test": test_id,
+                                   "status": "OK"})])
+
+    updated = update(tests, log)
+
+    new_manifest = updated[0][1]
+    assert not new_manifest.is_empty
+    assert new_manifest.get_test(test_id).children[0].get("expected") == "FAIL"
+
+
+def test_skip_0():
+    test_id = "/path/to/test.htm"
+    tests = [("path/to/test.htm", [test_id], "testharness",
+              """[test.htm]
+  [test1]
+    expected: FAIL""")]
+
+    log = suite_log([("test_start", {"test": test_id}),
+                     ("test_status", {"test": test_id,
+                                      "subtest": "test1",
+                                      "status": "FAIL",
+                                      "expected": "FAIL"}),
+                     ("test_end", {"test": test_id,
+                                   "status": "OK"})])
+
+    updated = update(tests, log)
+    assert not updated
+
+
+def test_new_subtest():
+    test_id = "/path/to/test.htm"
+    tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
   [test1]
     expected: FAIL""")]
 
-        new_data = self.create_log(("test_start", {"test": test_id}),
-                                   ("test_status", {"test": test_id,
-                                                    "subtest": "test1",
-                                                    "status": "FAIL",
-                                                    "expected": "FAIL"}),
-                                   ("test_status", {"test": test_id,
-                                                    "subtest": "test2",
-                                                    "status": "FAIL",
-                                                    "expected": "PASS"}),
-                                   ("test_end", {"test": test_id,
-                                                 "status": "OK"}))
-        updater = self.create_updater(prev_data)
-        updater.update_from_log(new_data)
-
-        new_manifest = updater.expected_tree["path/to/test.htm.ini"]
-        self.coalesce_results([new_manifest])
-        self.assertFalse(new_manifest.is_empty)
-        self.assertEquals(new_manifest.get_test(test_id).children[0].get("expected"), "FAIL")
-        self.assertEquals(new_manifest.get_test(test_id).children[1].get("expected"), "FAIL")
-
-    @pytest.mark.xfail
-    def test_update_multiple_0(self):
-        test_id = "/path/to/test.htm"
-        prev_data = [("path/to/test.htm.ini", [test_id], """[test.htm]
-  type: testharness
+    log = suite_log([("test_start", {"test": test_id}),
+                     ("test_status", {"test": test_id,
+                                      "subtest": "test1",
+                                      "status": "FAIL",
+                                      "expected": "FAIL"}),
+                     ("test_status", {"test": test_id,
+                                      "subtest": "test2",
+                                      "status": "FAIL",
+                                      "expected": "PASS"}),
+                     ("test_end", {"test": test_id,
+                                   "status": "OK"})])
+    updated = update(tests, log)
+    new_manifest = updated[0][1]
+    assert not new_manifest.is_empty
+    assert new_manifest.get_test(test_id).children[0].get("expected") == "FAIL"
+    assert new_manifest.get_test(test_id).children[1].get("expected") == "FAIL"
+
+
+def test_update_multiple_0():
+    test_id = "/path/to/test.htm"
+    tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
   [test1]
     expected: FAIL""")]
 
-        new_data_0 = self.create_log(("test_start", {"test": test_id}),
-                                     ("test_status", {"test": test_id,
-                                                      "subtest": "test1",
-                                                      "status": "FAIL",
-                                                      "expected": "FAIL"}),
-                                     ("test_end", {"test": test_id,
-                                                   "status": "OK"}),
-                                     run_info={"debug": False, "os": "osx"})
-
-        new_data_1 = self.create_log(("test_start", {"test": test_id}),
-                                     ("test_status", {"test": test_id,
-                                                      "subtest": "test1",
-                                                      "status": "TIMEOUT",
-                                                      "expected": "FAIL"}),
-                                     ("test_end", {"test": test_id,
-                                                   "status": "OK"}),
-                                     run_info={"debug": False, "os": "linux"})
-        updater = self.create_updater(prev_data)
-
-        updater.update_from_log(new_data_0)
-        updater.update_from_log(new_data_1)
-
-        new_manifest = updater.expected_tree["path/to/test.htm.ini"]
-
-        self.coalesce_results([new_manifest])
-
-        self.assertFalse(new_manifest.is_empty)
-        self.assertEquals(new_manifest.get_test(test_id).children[0].get(
-            "expected", {"debug": False, "os": "osx"}), "FAIL")
-        self.assertEquals(new_manifest.get_test(test_id).children[0].get(
-            "expected", {"debug": False, "os": "linux"}), "TIMEOUT")
-
-    @pytest.mark.xfail
-    def test_update_multiple_1(self):
-        test_id = "/path/to/test.htm"
-        prev_data = [("path/to/test.htm.ini", [test_id], """[test.htm]
-  type: testharness
+    log_0 = suite_log([("test_start", {"test": test_id}),
+                       ("test_status", {"test": test_id,
+                                        "subtest": "test1",
+                                        "status": "FAIL",
+                                        "expected": "FAIL"}),
+                       ("test_end", {"test": test_id,
+                                     "status": "OK"})],
+                      run_info={"debug": False, "os": "osx"})
+
+    log_1 = suite_log([("test_start", {"test": test_id}),
+                       ("test_status", {"test": test_id,
+                                        "subtest": "test1",
+                                        "status": "TIMEOUT",
+                                        "expected": "FAIL"}),
+                       ("test_end", {"test": test_id,
+                                     "status": "OK"})],
+                      run_info={"debug": False, "os": "linux"})
+
+    updated = update(tests, log_0, log_1)
+    new_manifest = updated[0][1]
+
+    assert not new_manifest.is_empty
+    assert new_manifest.get_test(test_id).children[0].get(
+        "expected", {"debug": False, "os": "osx"}) == "FAIL"
+    assert new_manifest.get_test(test_id).children[0].get(
+        "expected", {"debug": False, "os": "linux"}) == "TIMEOUT"
+
+
+def test_update_multiple_1():
+    test_id = "/path/to/test.htm"
+    tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
   [test1]
     expected: FAIL""")]
 
-        new_data_0 = self.create_log(("test_start", {"test": test_id}),
-                                     ("test_status", {"test": test_id,
-                                                      "subtest": "test1",
-                                                      "status": "FAIL",
-                                                      "expected": "FAIL"}),
-                                     ("test_end", {"test": test_id,
-                                                   "status": "OK"}),
-                                     run_info={"debug": False, "os": "osx"})
-
-        new_data_1 = self.create_log(("test_start", {"test": test_id}),
-                                     ("test_status", {"test": test_id,
-                                                      "subtest": "test1",
-                                                      "status": "TIMEOUT",
-                                                      "expected": "FAIL"}),
-                                     ("test_end", {"test": test_id,
-                                                   "status": "OK"}),
-                                     run_info={"debug": False, "os": "linux"})
-        updater = self.create_updater(prev_data)
-
-        updater.update_from_log(new_data_0)
-        updater.update_from_log(new_data_1)
-
-        new_manifest = updater.expected_tree["path/to/test.htm.ini"]
-
-        self.coalesce_results([new_manifest])
-
-        self.assertFalse(new_manifest.is_empty)
-        self.assertEquals(new_manifest.get_test(test_id).children[0].get(
-            "expected", {"debug": False, "os": "osx"}), "FAIL")
-        self.assertEquals(new_manifest.get_test(test_id).children[0].get(
-            "expected", {"debug": False, "os": "linux"}), "TIMEOUT")
-        self.assertEquals(new_manifest.get_test(test_id).children[0].get(
-            "expected", {"debug": False, "os": "windows"}), "FAIL")
-
-    @pytest.mark.xfail
-    def test_update_multiple_2(self):
-        test_id = "/path/to/test.htm"
-        prev_data = [("path/to/test.htm.ini", [test_id], """[test.htm]
-  type: testharness
+    log_0 = suite_log([("test_start", {"test": test_id}),
+                       ("test_status", {"test": test_id,
+                                        "subtest": "test1",
+                                        "status": "FAIL",
+                                        "expected": "FAIL"}),
+                       ("test_end", {"test": test_id,
+                                     "status": "OK"})],
+                      run_info={"debug": False, "os": "osx"})
+
+    log_1 = suite_log([("test_start", {"test": test_id}),
+                       ("test_status", {"test": test_id,
+                                        "subtest": "test1",
+                                        "status": "TIMEOUT",
+                                        "expected": "FAIL"}),
+                       ("test_end", {"test": test_id,
+                                     "status": "OK"})],
+                      run_info={"debug": False, "os": "linux"})
+
+    updated = update(tests, log_0, log_1)
+    new_manifest = updated[0][1]
+
+    assert not new_manifest.is_empty
+    assert new_manifest.get_test(test_id).children[0].get(
+        "expected", {"debug": False, "os": "osx"}) == "FAIL"
+    assert new_manifest.get_test(test_id).children[0].get(
+        "expected", {"debug": False, "os": "linux"}) == "TIMEOUT"
+    assert new_manifest.get_test(test_id).children[0].get(
+        "expected", {"debug": False, "os": "windows"}) == "FAIL"
+
+
+def test_update_multiple_2():
+    test_id = "/path/to/test.htm"
+    tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
   [test1]
     expected: FAIL""")]
 
-        new_data_0 = self.create_log(("test_start", {"test": test_id}),
-                                     ("test_status", {"test": test_id,
-                                                      "subtest": "test1",
-                                                      "status": "FAIL",
-                                                      "expected": "FAIL"}),
-                                     ("test_end", {"test": test_id,
-                                                   "status": "OK"}),
-                                     run_info={"debug": False, "os": "osx"})
-
-        new_data_1 = self.create_log(("test_start", {"test": test_id}),
-                                     ("test_status", {"test": test_id,
-                                                      "subtest": "test1",
-                                                      "status": "TIMEOUT",
-                                                      "expected": "FAIL"}),
-                                     ("test_end", {"test": test_id,
-                                                   "status": "OK"}),
-                                     run_info={"debug": True, "os": "osx"})
-        updater = self.create_updater(prev_data)
-
-        updater.update_from_log(new_data_0)
-        updater.update_from_log(new_data_1)
-
-        new_manifest = updater.expected_tree["path/to/test.htm.ini"]
-
-        self.coalesce_results([new_manifest])
-
-        self.assertFalse(new_manifest.is_empty)
-        self.assertEquals(new_manifest.get_test(test_id).children[0].get(
-            "expected", {"debug": False, "os": "osx"}), "FAIL")
-        self.assertEquals(new_manifest.get_test(test_id).children[0].get(
-            "expected", {"debug": True, "os": "osx"}), "TIMEOUT")
-
-    @pytest.mark.xfail
-    def test_update_multiple_3(self):
-        test_id = "/path/to/test.htm"
-        prev_data = [("path/to/test.htm.ini", [test_id], """[test.htm]
-  type: testharness
+    log_0 = suite_log([("test_start", {"test": test_id}),
+                       ("test_status", {"test": test_id,
+                                        "subtest": "test1",
+                                        "status": "FAIL",
+                                        "expected": "FAIL"}),
+                       ("test_end", {"test": test_id,
+                                     "status": "OK"})],
+                      run_info={"debug": False, "os": "osx"})
+
+    log_1 = suite_log([("test_start", {"test": test_id}),
+                       ("test_status", {"test": test_id,
+                                        "subtest": "test1",
+                                        "status": "TIMEOUT",
+                                        "expected": "FAIL"}),
+                       ("test_end", {"test": test_id,
+                                     "status": "OK"})],
+                      run_info={"debug": True, "os": "osx"})
+
+    updated = update(tests, log_0, log_1)
+    new_manifest = updated[0][1]
+
+    assert not new_manifest.is_empty
+    assert new_manifest.get_test(test_id).children[0].get(
+        "expected", {"debug": False, "os": "osx"}) == "FAIL"
+    assert new_manifest.get_test(test_id).children[0].get(
+        "expected", {"debug": True, "os": "osx"}) == "TIMEOUT"
+
+
+def test_update_multiple_3():
+    test_id = "/path/to/test.htm"
+    tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
   [test1]
     expected:
       if debug: FAIL
       if not debug and os == "osx": TIMEOUT""")]
 
-        new_data_0 = self.create_log(("test_start", {"test": test_id}),
-                                     ("test_status", {"test": test_id,
-                                                      "subtest": "test1",
-                                                      "status": "FAIL",
-                                                      "expected": "FAIL"}),
-                                     ("test_end", {"test": test_id,
-                                                   "status": "OK"}),
-                                     run_info={"debug": False, "os": "osx"})
-
-        new_data_1 = self.create_log(("test_start", {"test": test_id}),
-                                     ("test_status", {"test": test_id,
-                                                      "subtest": "test1",
-                                                      "status": "TIMEOUT",
-                                                      "expected": "FAIL"}),
-                                     ("test_end", {"test": test_id,
-                                                   "status": "OK"}),
-                                     run_info={"debug": True, "os": "osx"})
-        updater = self.create_updater(prev_data)
-
-        updater.update_from_log(new_data_0)
-        updater.update_from_log(new_data_1)
-
-        new_manifest = updater.expected_tree["path/to/test.htm.ini"]
-
-        self.coalesce_results([new_manifest])
-
-        self.assertFalse(new_manifest.is_empty)
-        self.assertEquals(new_manifest.get_test(test_id).children[0].get(
-            "expected", {"debug": False, "os": "osx"}), "FAIL")
-        self.assertEquals(new_manifest.get_test(test_id).children[0].get(
-            "expected", {"debug": True, "os": "osx"}), "TIMEOUT")
-
-    @pytest.mark.xfail
-    def test_update_ignore_existing(self):
-        test_id = "/path/to/test.htm"
-        prev_data = [("path/to/test.htm.ini", [test_id], """[test.htm]
-  type: testharness
+    log_0 = suite_log([("test_start", {"test": test_id}),
+                       ("test_status", {"test": test_id,
+                                        "subtest": "test1",
+                                        "status": "FAIL",
+                                        "expected": "FAIL"}),
+                       ("test_end", {"test": test_id,
+                                     "status": "OK"})],
+                      run_info={"debug": False, "os": "osx"})
+
+    log_1 = suite_log([("test_start", {"test": test_id}),
+                       ("test_status", {"test": test_id,
+                                        "subtest": "test1",
+                                        "status": "TIMEOUT",
+                                        "expected": "FAIL"}),
+                       ("test_end", {"test": test_id,
+                                     "status": "