Import Cobalt 20.master.0.215766
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/LICENSE b/src/third_party/web_platform_tests/tools/wptrunner/LICENSE
new file mode 100644
index 0000000..45896e6
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/LICENSE
@@ -0,0 +1,30 @@
+W3C 3-clause BSD License
+
+http://www.w3.org/Consortium/Legal/2008/03-bsd-license.html
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+* Redistributions of works must retain the original copyright notice,
+  this list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the original copyright
+  notice, this list of conditions and the following disclaimer in the
+  documentation and/or other materials provided with the distribution.
+
+* Neither the name of the W3C nor the names of its contributors may be
+  used to endorse or promote products derived from this work without
+  specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/MANIFEST.in b/src/third_party/web_platform_tests/tools/wptrunner/MANIFEST.in
new file mode 100644
index 0000000..0c5e38b
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/MANIFEST.in
@@ -0,0 +1,17 @@
+exclude MANIFEST.in
+include requirements.txt
+include wptrunner/browsers/b2g_setup/*
+include wptrunner.default.ini
+include wptrunner/testharness_runner.html
+include wptrunner/testharnessreport.js
+include wptrunner/testharnessreport-servo.js
+include wptrunner/testharnessreport-servodriver.js
+include wptrunner/executors/testharness_marionette.js
+include wptrunner/executors/testharness_servodriver.js
+include wptrunner/executors/testharness_webdriver.js
+include wptrunner/executors/reftest.js
+include wptrunner/executors/reftest-wait.js
+include wptrunner/executors/reftest-wait_servodriver.js
+include wptrunner/executors/reftest-wait_webdriver.js
+include wptrunner/config.json
+include wptrunner/browsers/server-locations.txt
\ No newline at end of file
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/README.rst b/src/third_party/web_platform_tests/tools/wptrunner/README.rst
new file mode 100644
index 0000000..fc650ee
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/README.rst
@@ -0,0 +1,242 @@
+wptrunner: A web-platform-tests harness
+=======================================
+
+wptrunner is a harness for running the W3C `web-platform-tests testsuite`_.
+
+.. contents::
+
+Installation
+~~~~~~~~~~~~
+
+wptrunner is expected to be installed into a virtualenv using pip. For
+development, it can be installed using the `-e` option::
+
+  pip install -e ./
+
+Running the Tests
+~~~~~~~~~~~~~~~~~
+
+After installation, the command ``wptrunner`` should be available to run
+the tests.
+
+The ``wptrunner`` command  takes multiple options, of which the
+following are most significant:
+
+``--product`` (defaults to `firefox`)
+  The product to test against: `b2g`, `chrome`, `firefox`, or `servo`.
+
+``--binary`` (required if product is `firefox` or `servo`)
+  The path to a binary file for the product (browser) to test against.
+
+``--webdriver-binary`` (required if product is `chrome`)
+  The path to a `driver` binary; e.g., a `chromedriver` binary.
+
+``--certutil-binary`` (required if product is `firefox` [#]_)
+  The path to a `certutil` binary (for tests that must be run over https).
+
+``--metadata`` (required)
+  The path to a directory containing test metadata. [#]_
+
+``--tests`` (required)
+  The path to a directory containing a web-platform-tests checkout.
+
+``--prefs-root`` (required only when testing a Firefox binary)
+  The path to a directory containing Firefox test-harness preferences. [#]_
+
+``--config`` (should default to `wptrunner.default.ini`)
+  The path to the config (ini) file.
+
+.. [#] The ``--certutil-binary`` option is required when the product is
+   ``firefox`` unless ``--ssl-type=none`` is specified.
+
+.. [#] The ``--metadata`` path is to a directory that contains:
+
+  * a ``MANIFEST.json`` file (instructions on generating this file are
+    available in the `detailed documentation
+    <http://wptrunner.readthedocs.org/en/latest/usage.html#installing-wptrunner>`_);
+    and
+  * (optionally) any expectation files (see below)
+
+.. [#] Example ``--prefs-root`` value: ``~/mozilla-central/testing/profiles``.
+
+There are also a variety of other options available; use ``--help`` to
+list them.
+
+-------------------------------
+Example: How to start wptrunner
+-------------------------------
+
+To test a Firefox Nightly build in an OS X environment, you might start
+wptrunner using something similar to the following example::
+
+  wptrunner --metadata=~/web-platform-tests/ --tests=~/web-platform-tests/ \
+    --binary=~/mozilla-central/obj-x86_64-apple-darwin14.3.0/dist/Nightly.app/Contents/MacOS/firefox \
+    --certutil-binary=~/mozilla-central/obj-x86_64-apple-darwin14.3.0/security/nss/cmd/certutil/certutil \
+    --prefs-root=~/mozilla-central/testing/profiles
+
+And to test a Chromium build in an OS X environment, you might start
+wptrunner using something similar to the following example::
+
+  wptrunner --metadata=~/web-platform-tests/ --tests=~/web-platform-tests/ \
+    --binary=~/chromium/src/out/Release/Chromium.app/Contents/MacOS/Chromium \
+    --webdriver-binary=/usr/local/bin/chromedriver --product=chrome
+
+-------------------------------------
+Example: How to run a subset of tests
+-------------------------------------
+
+To restrict a test run just to tests in a particular web-platform-tests
+subdirectory, specify the directory name in the positional arguments after
+the options; for example, run just the tests in the `dom` subdirectory::
+
+  wptrunner --metadata=~/web-platform-tests/ --tests=~/web-platform-tests/ \
+    --binary=/path/to/firefox --certutil-binary=/path/to/certutil \
+    --prefs-root=/path/to/testing/profiles \
+    dom
+
+Output
+~~~~~~
+
+By default wptrunner just dumps its entire output as raw JSON messages
+to stdout. This is convenient for piping into other tools, but not ideal
+for humans reading the output.
+
+As an alternative, you can use the ``--log-mach`` option, which provides
+output in a reasonable format for humans. The option requires a value:
+either the path for a file to write the `mach`-formatted output to, or
+"`-`" (a hyphen) to write the `mach`-formatted output to stdout.
+
+When using ``--log-mach``, output of the full raw JSON log is still
+available, from the ``--log-raw`` option. So to output the full raw JSON
+log to a file and a human-readable summary to stdout, you might start
+wptrunner using something similar to the following example::
+
+  wptrunner --metadata=~/web-platform-tests/ --tests=~/web-platform-tests/ \
+    --binary=/path/to/firefox --certutil-binary=/path/to/certutil \
+    --prefs-root=/path/to/testing/profiles \
+    --log-raw=output.log --log-mach=-
+
+Expectation Data
+~~~~~~~~~~~~~~~~
+
+wptrunner is designed to be used in an environment where it is not
+just necessary to know which tests passed, but to compare the results
+between runs. For this reason it is possible to store the results of a
+previous run in a set of ini-like "expectation files". This format is
+documented below. To generate the expectation files use `wptrunner` with
+the `--log-raw=/path/to/log/file` option. This can then be used as
+input to the `wptupdate` tool.
+
+Expectation File Format
+~~~~~~~~~~~~~~~~~~~~~~~
+
+Metadata about tests, notably including their expected results, is
+stored in a modified ini-like format that is designed to be human
+editable, but also to be machine updatable.
+
+Each test file that requires metadata to be specified (because it has
+a non-default expectation or because it is disabled, for example) has
+a corresponding expectation file in the `metadata` directory. For
+example a test file `html/test1.html` containing a failing test would
+have an expectation file called `html/test1.html.ini` in the
+`metadata` directory.
+
+An example of an expectation file is::
+
+  example_default_key: example_value
+
+  [filename.html]
+    type: testharness
+
+    [subtest1]
+      expected: FAIL
+
+    [subtest2]
+      expected:
+        if platform == 'win': TIMEOUT
+        if platform == 'osx': ERROR
+        FAIL
+
+  [filename.html?query=something]
+    type: testharness
+    disabled: bug12345
+
+The file consists of two elements, key-value pairs and
+sections.
+
+Sections are delimited by headings enclosed in square brackets. Any
+closing square bracket in the heading itself my be escaped with a
+backslash. Each section may then contain any number of key-value pairs
+followed by any number of subsections. So that it is clear which data
+belongs to each section without the use of end-section markers, the
+data for each section (i.e. the key-value pairs and subsections) must
+be indented using spaces. Indentation need only be consistent, but
+using two spaces per level is recommended.
+
+In a test expectation file, each resource provided by the file has a
+single section, with the section heading being the part after the last
+`/` in the test url. Tests that have subsections may have subsections
+for those subtests in which the heading is the name of the subtest.
+
+Simple key-value pairs are of the form::
+
+  key: value
+
+Note that unlike ini files, only `:` is a valid seperator; `=` will
+not work as expected. Key-value pairs may also have conditional
+values of the form::
+
+  key:
+    if condition1: value1
+    if condition2: value2
+    default
+
+In this case each conditional is evaluated in turn and the value is
+that on the right hand side of the first matching conditional. In the
+case that no condition matches, the unconditional default is used. If
+no condition matches and no default is provided it is equivalent to
+the key not being present. Conditionals use a simple python-like expression
+language e.g.::
+
+  if debug and (platform == "linux" or platform == "osx"): FAIL
+
+For test expectations the avaliable variables are those in the
+`run_info` which for desktop are `version`, `os`, `bits`, `processor`,
+`debug` and `product`.
+
+Key-value pairs specified at the top level of the file before any
+sections are special as they provide defaults for the rest of the file
+e.g.::
+
+  key1: value1
+
+  [section 1]
+    key2: value2
+
+  [section 2]
+    key1: value3
+
+In this case, inside section 1, `key1` would have the value `value1`
+and `key2` the value `value2` whereas in section 2 `key1` would have
+the value `value3` and `key2` would be undefined.
+
+The web-platform-test harness knows about several keys:
+
+`expected`
+  Must evaluate to a possible test status indicating the expected
+  result of the test. The implicit default is PASS or OK when the
+  field isn't present.
+
+`disabled`
+  Any value indicates that the test is disabled.
+
+`type`
+  The test type e.g. `testharness`, `reftest`, or `wdspec`.
+
+`reftype`
+  The type of comparison for reftests; either `==` or `!=`.
+
+`refurl`
+  The reference url for reftests.
+
+.. _`web-platform-tests testsuite`: https://github.com/w3c/web-platform-tests
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/docs/Makefile b/src/third_party/web_platform_tests/tools/wptrunner/docs/Makefile
new file mode 100644
index 0000000..d02b6c5
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/docs/Makefile
@@ -0,0 +1,177 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS    =
+SPHINXBUILD   = sphinx-build
+PAPER         =
+BUILDDIR      = _build
+
+# User-friendly check for sphinx-build
+ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
+$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
+endif
+
+# Internal variables.
+PAPEROPT_a4     = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+# the i18n builder cannot share the environment and doctrees with the others
+I18NSPHINXOPTS  = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
+
+help:
+	@echo "Please use \`make <target>' where <target> is one of"
+	@echo "  html       to make standalone HTML files"
+	@echo "  dirhtml    to make HTML files named index.html in directories"
+	@echo "  singlehtml to make a single large HTML file"
+	@echo "  pickle     to make pickle files"
+	@echo "  json       to make JSON files"
+	@echo "  htmlhelp   to make HTML files and a HTML help project"
+	@echo "  qthelp     to make HTML files and a qthelp project"
+	@echo "  devhelp    to make HTML files and a Devhelp project"
+	@echo "  epub       to make an epub"
+	@echo "  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+	@echo "  latexpdf   to make LaTeX files and run them through pdflatex"
+	@echo "  latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
+	@echo "  text       to make text files"
+	@echo "  man        to make manual pages"
+	@echo "  texinfo    to make Texinfo files"
+	@echo "  info       to make Texinfo files and run them through makeinfo"
+	@echo "  gettext    to make PO message catalogs"
+	@echo "  changes    to make an overview of all changed/added/deprecated items"
+	@echo "  xml        to make Docutils-native XML files"
+	@echo "  pseudoxml  to make pseudoxml-XML files for display purposes"
+	@echo "  linkcheck  to check all external links for integrity"
+	@echo "  doctest    to run all doctests embedded in the documentation (if enabled)"
+
+clean:
+	rm -rf $(BUILDDIR)/*
+
+html:
+	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+	@echo
+	@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+	$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+	@echo
+	@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+singlehtml:
+	$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+	@echo
+	@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+pickle:
+	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+	@echo
+	@echo "Build finished; now you can process the pickle files."
+
+json:
+	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+	@echo
+	@echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+	@echo
+	@echo "Build finished; now you can run HTML Help Workshop with the" \
+	      ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+	$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+	@echo
+	@echo "Build finished; now you can run "qcollectiongenerator" with the" \
+	      ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+	@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/wptrunner.qhcp"
+	@echo "To view the help file:"
+	@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/wptrunner.qhc"
+
+devhelp:
+	$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+	@echo
+	@echo "Build finished."
+	@echo "To view the help file:"
+	@echo "# mkdir -p $$HOME/.local/share/devhelp/wptrunner"
+	@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/wptrunner"
+	@echo "# devhelp"
+
+epub:
+	$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+	@echo
+	@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+latex:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo
+	@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+	@echo "Run \`make' in that directory to run these through (pdf)latex" \
+	      "(use \`make latexpdf' here to do that automatically)."
+
+latexpdf:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo "Running LaTeX files through pdflatex..."
+	$(MAKE) -C $(BUILDDIR)/latex all-pdf
+	@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+latexpdfja:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo "Running LaTeX files through platex and dvipdfmx..."
+	$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
+	@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+text:
+	$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+	@echo
+	@echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+man:
+	$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+	@echo
+	@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+texinfo:
+	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+	@echo
+	@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
+	@echo "Run \`make' in that directory to run these through makeinfo" \
+	      "(use \`make info' here to do that automatically)."
+
+info:
+	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+	@echo "Running Texinfo files through makeinfo..."
+	make -C $(BUILDDIR)/texinfo info
+	@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
+
+gettext:
+	$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
+	@echo
+	@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
+
+changes:
+	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+	@echo
+	@echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+	@echo
+	@echo "Link check complete; look for any errors in the above output " \
+	      "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+	$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+	@echo "Testing of doctests in the sources finished, look at the " \
+	      "results in $(BUILDDIR)/doctest/output.txt."
+
+xml:
+	$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
+	@echo
+	@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
+
+pseudoxml:
+	$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
+	@echo
+	@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/docs/architecture.svg b/src/third_party/web_platform_tests/tools/wptrunner/docs/architecture.svg
new file mode 100644
index 0000000..b8d5aa2
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/docs/architecture.svg
@@ -0,0 +1 @@
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="780px" height="1087px" version="1.1"><defs><linearGradient x1="0%" y1="0%" x2="0%" y2="100%" id="mx-gradient-a9c4eb-1-a9c4eb-1-s-0"><stop offset="0%" style="stop-color:#A9C4EB"/><stop offset="100%" style="stop-color:#A9C4EB"/></linearGradient></defs><g transform="translate(0.5,0.5)"><rect x="498" y="498" width="120" height="60" fill="#e6d0de" stroke="#000000" pointer-events="none"/><g transform="translate(500,521)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">TestRunner</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><rect x="338" y="778" width="120" height="60" fill="#f19c99" stroke="#000000" pointer-events="none"/><g transform="translate(340,801)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">Product under test</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><rect x="338" y="388" width="120" height="60" fill="#e6d0de" stroke="#000000" pointer-events="none"/><g transform="translate(340,411)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">TestRunnerManager</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><rect x="338" y="228" width="120" height="60" fill="#e6d0de" stroke="#000000" pointer-events="none"/><g transform="translate(340,251)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">ManagerGroup</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><rect x="658" y="608" width="120" height="60" fill="#ffce9f" stroke="#000000" pointer-events="none"/><g transform="translate(660,631)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">Executor</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><rect x="338" y="498" width="120" height="60" fill="url(#mx-gradient-a9c4eb-1-a9c4eb-1-s-0)" stroke="#000000" pointer-events="none"/><g transform="translate(340,521)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">Browser</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><path d="M 398 288 L 398 382" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 398 387 L 395 380 L 398 382 L 402 380 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 398 448 L 398 492" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 398 497 L 395 490 L 398 492 L 402 490 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 618 528 L 684 603" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 687 607 L 680 604 L 684 603 L 685 600 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><rect x="498" y="608" width="120" height="60" fill="#a9c4eb" stroke="#000000" pointer-events="none"/><g transform="translate(500,631)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">ExecutorBrowser</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><path d="M 624 638 L 658 638" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 619 638 L 626 635 L 624 638 L 626 642 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 428 448 L 552 496" fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="none"/><path d="M 557 498 L 549 498 L 552 496 L 552 492 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 398 558 L 398 772" fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="none"/><path d="M 398 777 L 395 770 L 398 772 L 402 770 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><rect x="338" y="48" width="120" height="60" fill="#e6d0de" stroke="#000000" pointer-events="none"/><g transform="translate(340,71)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">run_tests</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><path d="M 458 78 L 652 78" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 657 78 L 650 82 L 652 78 L 650 75 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><rect x="658" y="48" width="120" height="60" fill="#e6d0de" stroke="#000000" pointer-events="none"/><g transform="translate(660,71)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">TestLoader</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><rect x="71" y="48" width="120" height="60" fill="#e6d0de" stroke="#000000" pointer-events="none"/><g transform="translate(73,71)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">TestEnvironment</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><rect x="151" y="618" width="120" height="60" fill="#b9e0a5" stroke="#000000" pointer-events="none"/><g transform="translate(153,641)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">wptserve</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><rect x="1" y="618" width="120" height="60" fill="#b9e0a5" stroke="#000000" pointer-events="none"/><g transform="translate(3,641)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">pywebsocket</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><path d="M 338 78 L 197 78" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 192 78 L 199 75 L 197 78 L 199 82 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 101 308 L 62 612" fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="none"/><path d="M 61 617 L 59 610 L 62 612 L 66 610 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 161 308 L 204 612" fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="none"/><path d="M 204 617 L 200 610 L 204 612 L 207 609 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 338 823 L 61 678" fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="none"/><path d="M 211 678 L 338 793" fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="none"/><path d="M 398 108 L 398 222" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 398 227 L 395 220 L 398 222 L 402 220 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 706 288 L 618 513" fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="none"/><rect x="658" y="388" width="70" height="40" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="693" y="412">Queue.get</text></g><path d="M 458 808 L 718 668" fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="none"/><rect x="71" y="248" width="120" height="60" fill="#b9e0a5" stroke="#000000" pointer-events="none"/><g transform="translate(73,271)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">serve.py</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><path d="M 131 108 L 131 242" fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="none"/><path d="M 131 247 L 128 240 L 131 242 L 135 240 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 88 973 L 132 973" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 137 973 L 130 977 L 132 973 L 130 970 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><rect x="138" y="1018" width="180" height="30" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="228" y="1037">Communication (cross process)</text></g><path d="M 88 1002 L 132 1002" fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="none"/><path d="M 137 1002 L 130 1006 L 132 1002 L 130 999 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><rect x="138" y="958" width="180" height="30" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="228" y="977">Ownership (same process)</text></g><path d="M 88 1033 L 138 1033" fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="none"/><rect x="143" y="988" width="180" height="30" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="233" y="1007">Ownership (cross process)</text></g><rect x="428" y="966" width="50" height="15" fill="#e6d0de" stroke="#000000" pointer-events="none"/><rect x="428" y="990" width="50" height="15" fill="#a9c4eb" stroke="#000000" pointer-events="none"/><rect x="428" y="1015" width="50" height="15" fill="#ffce9f" stroke="#000000" pointer-events="none"/><rect x="428" y="1063" width="50" height="15" fill="#f19c99" stroke="#000000" pointer-events="none"/><rect x="428" y="1038" width="50" height="15" fill="#b9e0a5" stroke="#000000" pointer-events="none"/><rect x="485" y="958" width="90" height="30" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="530" y="977">wptrunner class</text></g><rect x="486" y="983" width="150" height="30" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="561" y="1002">Per-product wptrunner class</text></g><rect x="486" y="1008" width="150" height="30" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="561" y="1027">Per-protocol wptrunner class</text></g><rect x="491" y="1031" width="150" height="30" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="566" y="1050">Web-platform-tests component</text></g><rect x="486" y="1055" width="90" height="30" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="531" y="1074">Browser process</text></g><path d="M 398 8 L 398 42" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 398 47 L 395 40 L 398 42 L 402 40 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><rect x="478" y="388" width="120" height="60" fill-opacity="0.5" fill="#e6d0de" stroke="#000000" stroke-opacity="0.5" pointer-events="none"/><g transform="translate(480,411)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">TestRunnerManager</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><path d="M 398 288 L 533 384" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 537 387 L 529 386 L 533 384 L 533 380 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><rect x="198" y="388" width="120" height="60" fill-opacity="0.5" fill="#e6d0de" stroke="#000000" stroke-opacity="0.5" pointer-events="none"/><g transform="translate(200,411)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">TestRunnerManager</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><path d="M 398 288 L 263 384" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 259 387 L 263 380 L 263 384 L 267 386 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><rect x="575" y="748" width="110" height="40" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="630" y="758">Browser control</text><text x="630" y="772">protocol</text><text x="630" y="786">(e.g. WebDriver)</text></g><rect x="258" y="708" width="80" height="40" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="298" y="732">HTTP</text></g><rect x="111" y="728" width="80" height="40" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="151" y="752">websockets</text></g><rect x="658" y="228" width="120" height="60" fill="#e6d0de" stroke="#000000" pointer-events="none"/><g transform="translate(660,251)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">Tests Queue</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><path d="M 718 108 L 718 222" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 718 227 L 715 220 L 718 222 L 722 220 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 428 970 L 428 970" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/></g></svg>
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/docs/conf.py b/src/third_party/web_platform_tests/tools/wptrunner/docs/conf.py
new file mode 100644
index 0000000..39e5cc4
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/docs/conf.py
@@ -0,0 +1,267 @@
+# -*- coding: utf-8 -*-
+#
+# wptrunner documentation build configuration file, created by
+# sphinx-quickstart on Mon May 19 18:14:20 2014.
+#
+# This file is execfile()d with the current directory set to its
+# containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys
+import os
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#sys.path.insert(0, os.path.abspath('.'))
+
+# -- General configuration ------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+    'sphinx.ext.autodoc',
+    'sphinx.ext.intersphinx',
+    'sphinx.ext.viewcode',
+]
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'wptrunner'
+copyright = u''
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = '0.3'
+# The full version, including alpha/beta/rc tags.
+release = '0.3'
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = ['_build']
+
+# The reST default role (used for this markup: `text`) to use for all
+# documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+# If true, keep warnings as "system message" paragraphs in the built documents.
+#keep_warnings = False
+
+
+# -- Options for HTML output ----------------------------------------------
+
+# The theme to use for HTML and HTML Help pages.  See the documentation for
+# a list of builtin themes.
+html_theme = 'default'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further.  For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents.  If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar.  Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# Add any extra paths that contain custom files (such as robots.txt or
+# .htaccess) here, relative to this directory. These files are copied
+# directly to the root of the documentation.
+#html_extra_path = []
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_domain_indices = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it.  The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'wptrunnerdoc'
+
+
+# -- Options for LaTeX output ---------------------------------------------
+
+latex_elements = {
+# The paper size ('letterpaper' or 'a4paper').
+#'papersize': 'letterpaper',
+
+# The font size ('10pt', '11pt' or '12pt').
+#'pointsize': '10pt',
+
+# Additional stuff for the LaTeX preamble.
+#'preamble': '',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+#  author, documentclass [howto, manual, or own class]).
+latex_documents = [
+  ('index', 'wptrunner.tex', u'wptrunner Documentation',
+   u'James Graham', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_domain_indices = True
+
+
+# -- Options for manual page output ---------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+    ('index', 'wptrunner', u'wptrunner Documentation',
+     [u'James Graham'], 1)
+]
+
+# If true, show URL addresses after external links.
+#man_show_urls = False
+
+
+# -- Options for Texinfo output -------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+#  dir menu entry, description, category)
+texinfo_documents = [
+  ('index', 'wptrunner', u'wptrunner Documentation',
+   u'James Graham', 'wptrunner', 'One line description of project.',
+   'Miscellaneous'),
+]
+
+# Documents to append as an appendix to all manuals.
+#texinfo_appendices = []
+
+# If false, no module index is generated.
+#texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+#texinfo_show_urls = 'footnote'
+
+# If true, do not generate a @detailmenu in the "Top" node's menu.
+#texinfo_no_detailmenu = False
+
+
+# Example configuration for intersphinx: refer to the Python standard library.
+intersphinx_mapping = {'python': ('http://docs.python.org/', None),
+                       'mozlog': ('http://mozbase.readthedocs.org/en/latest/', None)}
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/docs/design.rst b/src/third_party/web_platform_tests/tools/wptrunner/docs/design.rst
new file mode 100644
index 0000000..bf108a0
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/docs/design.rst
@@ -0,0 +1,106 @@
+wptrunner Design
+================
+
+The design of wptrunner is intended to meet the following
+requirements:
+
+ * Possible to run tests from W3C web-platform-tests.
+
+ * Tests should be run as fast as possible. In particular it should
+   not be necessary to restart the browser between tests, or similar.
+
+ * As far as possible, the tests should run in a "normal" browser and
+   browsing context. In particular many tests assume that they are
+   running in a top-level browsing context, so we must avoid the use
+   of an ``iframe`` test container.
+
+ * It must be possible to deal with all kinds of behaviour of the
+   browser runder test, for example, crashing, hanging, etc.
+
+ * It should be possible to add support for new platforms and browsers
+   with minimal code changes.
+
+ * It must be possible to run tests in parallel to further improve
+   performance.
+
+ * Test output must be in a machine readable form.
+
+Architecture
+------------
+
+In order to meet the above requirements, wptrunner is designed to
+push as much of the test scheduling as possible into the harness. This
+allows the harness to monitor the state of the browser and perform
+appropriate action if it gets into an unwanted state e.g. kill the
+browser if it appears to be hung.
+
+The harness will typically communicate with the browser via some remote
+control protocol such as WebDriver. However for browsers where no such
+protocol is supported, other implementation strategies are possible,
+typically at the expense of speed.
+
+The overall architecture of wptrunner is shown in the diagram below:
+
+.. image:: architecture.svg
+
+The main entry point to the code is :py:func:`run_tests` in
+``wptrunner.py``. This is responsible for setting up the test
+environment, loading the list of tests to be executed, and invoking
+the remainder of the code to actually execute some tests.
+
+The test environment is encapsulated in the
+:py:class:`TestEnvironment` class. This defers to code in
+``web-platform-tests`` which actually starts the required servers to
+run the tests.
+
+The set of tests to run is defined by the
+:py:class:`TestLoader`. This is constructed with a
+:py:class:`TestFilter` (not shown), which takes any filter arguments
+from the command line to restrict the set of tests that will be
+run. The :py:class:`TestLoader` reads both the ``web-platform-tests``
+JSON manifest and the expectation data stored in ini files and
+produces a :py:class:`multiprocessing.Queue` of tests to run, and
+their expected results.
+
+Actually running the tests happens through the
+:py:class:`ManagerGroup` object. This takes the :py:class:`Queue` of
+tests to be run and starts a :py:class:`testrunner.TestRunnerManager` for each
+instance of the browser under test that will be started. These
+:py:class:`TestRunnerManager` instances are each started in their own
+thread.
+
+A :py:class:`TestRunnerManager` coordinates starting the product under
+test, and outputting results from the test. In the case that the test
+has timed out or the browser has crashed, it has to restart the
+browser to ensure the test run can continue. The functionality for
+initialising the browser under test, and probing its state
+(e.g. whether the process is still alive) is implemented through a
+:py:class:`Browser` object. An implementation of this class must be
+provided for each product that is supported.
+
+The functionality for actually running the tests is provided by a
+:py:class:`TestRunner` object. :py:class:`TestRunner` instances are
+run in their own child process created with the
+:py:mod:`multiprocessing` module. This allows them to run concurrently
+and to be killed and restarted as required. Communication between the
+:py:class:`TestRunnerManager` and the :py:class:`TestRunner` is
+provided by a pair of queues, one for sending messages in each
+direction. In particular test results are sent from the
+:py:class:`TestRunner` to the :py:class:`TestRunnerManager` using one
+of these queues.
+
+The :py:class:`TestRunner` object is generic in that the same
+:py:class:`TestRunner` is used regardless of the product under
+test. However the details of how to run the test may vary greatly with
+the product since different products support different remote control
+protocols (or none at all). These protocol-specific parts are placed
+in the :py:class:`Executor` object. There is typically a different
+:py:class:`Executor` class for each combination of control protocol
+and test type. The :py:class:`TestRunner` is responsible for pulling
+each test off the :py:class:`Queue` of tests and passing it down to
+the :py:class:`Executor`.
+
+The executor often requires access to details of the particular
+browser instance that it is testing so that it knows e.g. which port
+to connect to to send commands to the browser. These details are
+encapsulated in the :py:class:`ExecutorBrowser` class.
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/docs/expectation.rst b/src/third_party/web_platform_tests/tools/wptrunner/docs/expectation.rst
new file mode 100644
index 0000000..6a0c776
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/docs/expectation.rst
@@ -0,0 +1,248 @@
+Expectation Data
+================
+
+Introduction
+------------
+
+For use in continuous integration systems, and other scenarios where
+regression tracking is required, wptrunner supports storing and
+loading the expected result of each test in a test run. Typically
+these expected results will initially be generated by running the
+testsuite in a baseline build. They may then be edited by humans as
+new features are added to the product that change the expected
+results. The expected results may also vary for a single product
+depending on the platform on which it is run. Therefore, the raw
+structured log data is not a suitable format for storing these
+files. Instead something is required that is:
+
+ * Human readable
+
+ * Human editable
+
+ * Machine readable / writable
+
+ * Capable of storing test id / result pairs
+
+ * Suitable for storing in a version control system (i.e. text-based)
+
+The need for different results per platform means either having
+multiple expectation files for each platform, or having a way to
+express conditional values within a certain file. The former would be
+rather cumbersome for humans updating the expectation files, so the
+latter approach has been adopted, leading to the requirement:
+
+ * Capable of storing result values that are conditional on the platform.
+
+There are few extant formats that meet these requirements, so
+wptrunner uses a bespoke ``expectation manifest`` format, which is
+closely based on the standard ``ini`` format.
+
+Directory Layout
+----------------
+
+Expectation manifest files must be stored under the ``metadata``
+directory passed to the test runner. The directory layout follows that
+of web-platform-tests with each test path having a corresponding
+manifest file. Tests that differ only by query string, or reftests
+with the same test path but different ref paths share the same
+reference file. The file name is taken from the last /-separated part
+of the path, suffixed with ``.ini``.
+
+As an optimisation, files which produce only default results
+(i.e. ``PASS`` or ``OK``) don't require a corresponding manifest file.
+
+For example a test with url::
+
+  /spec/section/file.html?query=param
+
+would have an expectation file ::
+
+  metadata/spec/section/file.html.ini
+
+
+.. _wptupdate-label:
+
+Generating Expectation Files
+----------------------------
+
+wptrunner provides the tool ``wptupdate`` to generate expectation
+files from the results of a set of baseline test runs. The basic
+syntax for this is::
+
+  wptupdate [options] [logfile]...
+
+Each ``logfile`` is a structured log file from a previous run. These
+can be generated from wptrunner using the ``--log-raw`` option
+e.g. ``--log-raw=structured.log``. The default behaviour is to update
+all the test data for the particular combination of hardware and OS
+used in the run corresponding to the log data, whilst leaving any
+other expectations untouched.
+
+wptupdate takes several useful options:
+
+``--sync``
+  Pull the latest version of web-platform-tests from the
+  upstream specified in the config file. If this is specified in
+  combination with logfiles, it is assumed that the results in the log
+  files apply to the post-update tests.
+
+``--no-check-clean``
+  Don't attempt to check if the working directory is clean before
+  doing the update (assuming that the working directory is a git or
+  mercurial tree).
+
+``--patch``
+  Create a a git commit, or a mq patch, with the changes made by wptupdate.
+
+``--ignore-existing``
+  Overwrite all the expectation data for any tests that have a result
+  in the passed log files, not just data for the same platform.
+
+Examples
+~~~~~~~~
+
+Update the local copy of web-platform-tests without changing the
+expectation data and commit (or create a mq patch for) the result::
+
+  wptupdate --patch --sync
+
+Update all the expectations from a set of cross-platform test runs::
+
+  wptupdate --no-check-clean --patch osx.log linux.log windows.log
+
+Add expectation data for some new tests that are expected to be
+platform-independent::
+
+  wptupdate --no-check-clean --patch --ignore-existing tests.log
+
+Manifest Format
+---------------
+The format of the manifest files is based on the ini format. Files are
+divided into sections, each (apart from the root section) having a
+heading enclosed in square braces. Within each section are key-value
+pairs. There are several notable differences from standard .ini files,
+however:
+
+ * Sections may be hierarchically nested, with significant whitespace
+   indicating nesting depth.
+
+ * Only ``:`` is valid as a key/value separator
+
+A simple example of a manifest file is::
+
+  root_key: root_value
+
+  [section]
+    section_key: section_value
+
+    [subsection]
+       subsection_key: subsection_value
+
+  [another_section]
+    another_key: another_value
+
+Conditional Values
+~~~~~~~~~~~~~~~~~~
+
+In order to support values that depend on some external data, the
+right hand side of a key/value pair can take a set of conditionals
+rather than a plain value. These values are placed on a new line
+following the key, with significant indentation. Conditional values
+are prefixed with ``if`` and terminated with a colon, for example::
+
+  key:
+    if cond1: value1
+    if cond2: value2
+    value3
+
+In this example, the value associated with ``key`` is determined by
+first evaluating ``cond1`` against external data. If that is true,
+``key`` is assigned the value ``value1``, otherwise ``cond2`` is
+evaluated in the same way. If both ``cond1`` and ``cond2`` are false,
+the unconditional ``value3`` is used.
+
+Conditions themselves use a Python-like expression syntax. Operands
+can either be variables, corresponding to data passed in, numbers
+(integer or floating point; exponential notation is not supported) or
+quote-delimited strings. Equality is tested using ``==`` and
+inequality by ``!=``. The operators ``and``, ``or`` and ``not`` are
+used in the expected way. Parentheses can also be used for
+grouping. For example::
+
+  key:
+    if (a == 2 or a == 3) and b == "abc": value1
+    if a == 1 or b != "abc": value2
+    value3
+
+Here ``a`` and ``b`` are variables, the value of which will be
+supplied when the manifest is used.
+
+Expectation Manifests
+---------------------
+
+When used for expectation data, manifests have the following format:
+
+ * A section per test URL described by the manifest, with the section
+   heading being the part of the test URL following the last ``/`` in
+   the path (this allows multiple tests in a single manifest file with
+   the same path part of the URL, but different query parts).
+
+ * A subsection per subtest, with the heading being the title of the
+   subtest.
+
+ * A key ``type`` indicating the test type. This takes the values
+   ``testharness`` and ``reftest``.
+
+ * For reftests, keys ``reftype`` indicating the reference type
+   (``==`` or ``!=``) and ``refurl`` indicating the URL of the
+   reference.
+
+ * A key ``expected`` giving the expectation value of each (sub)test.
+
+ * A key ``disabled`` which can be set to any value to indicate that
+   the (sub)test is disabled and should either not be run (for tests)
+   or that its results should be ignored (subtests).
+
+ * A key ``restart-after`` which can be set to any value to indicate that
+   the runner should restart the browser after running this test (e.g. to
+   clear out unwanted state).
+
+ * Variables ``debug``, ``os``, ``version``, ``processor`` and
+   ``bits`` that describe the configuration of the browser under
+   test. ``debug`` is a boolean indicating whether a build is a debug
+   build. ``os`` is a string indicating the operating system, and
+   ``version`` a string indicating the particular version of that
+   operating system. ``processor`` is a string indicating the
+   processor architecture and ``bits`` an integer indicating the
+   number of bits. This information is typically provided by
+   :py:mod:`mozinfo`.
+
+ * Top level keys are taken as defaults for the whole file. So, for
+   example, a top level key with ``expected: FAIL`` would indicate
+   that all tests and subtests in the file are expected to fail,
+   unless they have an ``expected`` key of their own.
+
+An simple example manifest might look like::
+
+  [test.html?variant=basic]
+    type: testharness
+
+    [Test something unsupported]
+       expected: FAIL
+
+  [test.html?variant=broken]
+    expected: ERROR
+
+  [test.html?variant=unstable]
+    disabled: http://test.bugs.example.org/bugs/12345
+
+A more complex manifest with conditional properties might be::
+
+  [canvas_test.html]
+    expected:
+      if os == "osx": FAIL
+      if os == "windows" and version == "XP": FAIL
+      PASS
+
+Note that ``PASS`` in the above works, but is unnecessary; ``PASS``
+(or ``OK``) is always the default expectation for (sub)tests.
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/docs/index.rst b/src/third_party/web_platform_tests/tools/wptrunner/docs/index.rst
new file mode 100644
index 0000000..5147d3e
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/docs/index.rst
@@ -0,0 +1,24 @@
+.. wptrunner documentation master file, created by
+   sphinx-quickstart on Mon May 19 18:14:20 2014.
+   You can adapt this file completely to your liking, but it should at least
+   contain the root `toctree` directive.
+
+Welcome to wptrunner's documentation!
+=====================================
+
+Contents:
+
+.. toctree::
+   :maxdepth: 2
+
+   usage
+   expectation
+   design
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
+
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/docs/make.bat b/src/third_party/web_platform_tests/tools/wptrunner/docs/make.bat
new file mode 100644
index 0000000..959c161
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/docs/make.bat
@@ -0,0 +1,242 @@
+@ECHO OFF

+

+REM Command file for Sphinx documentation

+

+if "%SPHINXBUILD%" == "" (

+	set SPHINXBUILD=sphinx-build

+)

+set BUILDDIR=_build

+set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .

+set I18NSPHINXOPTS=%SPHINXOPTS% .

+if NOT "%PAPER%" == "" (

+	set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%

+	set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%

+)

+

+if "%1" == "" goto help

+

+if "%1" == "help" (

+	:help

+	echo.Please use `make ^<target^>` where ^<target^> is one of

+	echo.  html       to make standalone HTML files

+	echo.  dirhtml    to make HTML files named index.html in directories

+	echo.  singlehtml to make a single large HTML file

+	echo.  pickle     to make pickle files

+	echo.  json       to make JSON files

+	echo.  htmlhelp   to make HTML files and a HTML help project

+	echo.  qthelp     to make HTML files and a qthelp project

+	echo.  devhelp    to make HTML files and a Devhelp project

+	echo.  epub       to make an epub

+	echo.  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter

+	echo.  text       to make text files

+	echo.  man        to make manual pages

+	echo.  texinfo    to make Texinfo files

+	echo.  gettext    to make PO message catalogs

+	echo.  changes    to make an overview over all changed/added/deprecated items

+	echo.  xml        to make Docutils-native XML files

+	echo.  pseudoxml  to make pseudoxml-XML files for display purposes

+	echo.  linkcheck  to check all external links for integrity

+	echo.  doctest    to run all doctests embedded in the documentation if enabled

+	goto end

+)

+

+if "%1" == "clean" (

+	for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i

+	del /q /s %BUILDDIR%\*

+	goto end

+)

+

+

+%SPHINXBUILD% 2> nul

+if errorlevel 9009 (

+	echo.

+	echo.The 'sphinx-build' command was not found. Make sure you have Sphinx

+	echo.installed, then set the SPHINXBUILD environment variable to point

+	echo.to the full path of the 'sphinx-build' executable. Alternatively you

+	echo.may add the Sphinx directory to PATH.

+	echo.

+	echo.If you don't have Sphinx installed, grab it from

+	echo.http://sphinx-doc.org/

+	exit /b 1

+)

+

+if "%1" == "html" (

+	%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished. The HTML pages are in %BUILDDIR%/html.

+	goto end

+)

+

+if "%1" == "dirhtml" (

+	%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.

+	goto end

+)

+

+if "%1" == "singlehtml" (

+	%SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.

+	goto end

+)

+

+if "%1" == "pickle" (

+	%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished; now you can process the pickle files.

+	goto end

+)

+

+if "%1" == "json" (

+	%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished; now you can process the JSON files.

+	goto end

+)

+

+if "%1" == "htmlhelp" (

+	%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished; now you can run HTML Help Workshop with the ^

+.hhp project file in %BUILDDIR%/htmlhelp.

+	goto end

+)

+

+if "%1" == "qthelp" (

+	%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished; now you can run "qcollectiongenerator" with the ^

+.qhcp project file in %BUILDDIR%/qthelp, like this:

+	echo.^> qcollectiongenerator %BUILDDIR%\qthelp\wptrunner.qhcp

+	echo.To view the help file:

+	echo.^> assistant -collectionFile %BUILDDIR%\qthelp\wptrunner.ghc

+	goto end

+)

+

+if "%1" == "devhelp" (

+	%SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished.

+	goto end

+)

+

+if "%1" == "epub" (

+	%SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished. The epub file is in %BUILDDIR%/epub.

+	goto end

+)

+

+if "%1" == "latex" (

+	%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.

+	goto end

+)

+

+if "%1" == "latexpdf" (

+	%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex

+	cd %BUILDDIR%/latex

+	make all-pdf

+	cd %BUILDDIR%/..

+	echo.

+	echo.Build finished; the PDF files are in %BUILDDIR%/latex.

+	goto end

+)

+

+if "%1" == "latexpdfja" (

+	%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex

+	cd %BUILDDIR%/latex

+	make all-pdf-ja

+	cd %BUILDDIR%/..

+	echo.

+	echo.Build finished; the PDF files are in %BUILDDIR%/latex.

+	goto end

+)

+

+if "%1" == "text" (

+	%SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished. The text files are in %BUILDDIR%/text.

+	goto end

+)

+

+if "%1" == "man" (

+	%SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished. The manual pages are in %BUILDDIR%/man.

+	goto end

+)

+

+if "%1" == "texinfo" (

+	%SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.

+	goto end

+)

+

+if "%1" == "gettext" (

+	%SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished. The message catalogs are in %BUILDDIR%/locale.

+	goto end

+)

+

+if "%1" == "changes" (

+	%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.The overview file is in %BUILDDIR%/changes.

+	goto end

+)

+

+if "%1" == "linkcheck" (

+	%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Link check complete; look for any errors in the above output ^

+or in %BUILDDIR%/linkcheck/output.txt.

+	goto end

+)

+

+if "%1" == "doctest" (

+	%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Testing of doctests in the sources finished, look at the ^

+results in %BUILDDIR%/doctest/output.txt.

+	goto end

+)

+

+if "%1" == "xml" (

+	%SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished. The XML files are in %BUILDDIR%/xml.

+	goto end

+)

+

+if "%1" == "pseudoxml" (

+	%SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml.

+	goto end

+)

+

+:end

diff --git a/src/third_party/web_platform_tests/tools/wptrunner/docs/usage.rst b/src/third_party/web_platform_tests/tools/wptrunner/docs/usage.rst
new file mode 100644
index 0000000..8e74a43
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/docs/usage.rst
@@ -0,0 +1,238 @@
+Getting Started
+===============
+
+Installing wptrunner
+--------------------
+
+The easiest way to install wptrunner is into a virtualenv, using pip::
+
+  virtualenv wptrunner
+  cd wptrunner
+  source bin/activate
+  pip install wptrunner
+
+This will install the base dependencies for wptrunner, but not any
+extra dependencies required to test against specific browsers. In
+order to do this you must use use the extra requirements files in
+``$VIRTUAL_ENV/requirements/requirements_browser.txt``. For example,
+in order to test against Firefox you would have to run::
+
+  pip install -r requirements/requirements_firefox.txt
+
+If you intend to work on the code, the ``-e`` option to pip should be
+used in combination with a source checkout i.e. inside a virtual
+environment created as above::
+
+  git clone https://github.com/w3c/wptrunner.git
+  cd wptrunner
+  pip install -e ./
+
+In addition to the dependencies installed by pip, wptrunner requires
+a copy of the web-platform-tests repository. This can be located
+anywhere on the filesystem, but the easiest option is to put it
+under the same parent directory as the wptrunner checkout::
+
+  git clone https://github.com/w3c/web-platform-tests.git
+
+It is also necessary to generate a web-platform-tests ``MANIFEST.json``
+file. It's recommended to also put that under the same parent directory as
+the wptrunner checkout, in a directory named ``meta``::
+
+  mkdir meta
+  cd web-platform-tests
+  python manifest --path ../meta/MANIFEST.json
+
+The ``MANIFEST.json`` file needs to be regenerated each time the
+web-platform-tests checkout is updated. To aid with the update process
+there is a tool called ``wptupdate``, which is described in
+:ref:`wptupdate-label`.
+
+Running the Tests
+-----------------
+
+A test run is started using the ``wptrunner`` command.  The command
+takes multiple options, of which the following are most significant:
+
+``--product`` (defaults to `firefox`)
+  The product to test against: `b2g`, `chrome`, `firefox`, or `servo`.
+
+``--binary`` (required if product is `firefox` or `servo`)
+  The path to a binary file for the product (browser) to test against.
+
+``--webdriver-binary`` (required if product is `chrome`)
+  The path to a `*driver` binary; e.g., a `chromedriver` binary.
+
+``--certutil-binary`` (required if product is `firefox` [#]_)
+  The path to a `certutil` binary (for tests that must be run over https).
+
+``--metadata`` (required only when not `using default paths`_)
+  The path to a directory containing test metadata. [#]_
+
+``--tests`` (required only when not `using default paths`_)
+  The path to a directory containing a web-platform-tests checkout.
+
+``--prefs-root`` (required only when testing a Firefox binary)
+  The path to a directory containing Firefox test-harness preferences. [#]_
+
+``--config`` (should default to `wptrunner.default.ini`)
+  The path to the config (ini) file.
+
+.. [#] The ``--certutil-binary`` option is required when the product is
+   ``firefox`` unless ``--ssl-type=none`` is specified.
+
+.. [#] The ``--metadata`` path is to a directory that contains:
+
+  * a ``MANIFEST.json`` file (the web-platform-tests documentation has
+    instructions on generating this file)
+  * (optionally) any expectation files (see :ref:`wptupdate-label`)
+
+.. [#] Example ``--prefs-root`` value: ``~/mozilla-central/testing/profiles``.
+
+There are also a variety of other command-line options available; use
+``--help`` to list them.
+
+The following examples show how to start wptrunner with various options.
+
+------------------
+Starting wptrunner
+------------------
+
+The examples below assume the following directory layout,
+though no specific folder structure is required::
+
+  ~/testtwf/wptrunner          # wptrunner checkout
+  ~/testtwf/web-platform-tests # web-platform-tests checkout
+  ~/testtwf/meta               # metadata
+
+To test a Firefox Nightly build in an OS X environment, you might start
+wptrunner using something similar to the following example::
+
+  wptrunner --metadata=~/testtwf/meta/ --tests=~/testtwf/web-platform-tests/ \
+    --binary=~/mozilla-central/obj-x86_64-apple-darwin14.3.0/dist/Nightly.app/Contents/MacOS/firefox \
+    --certutil-binary=~/mozilla-central/obj-x86_64-apple-darwin14.3.0/security/nss/cmd/certutil/certutil \
+    --prefs-root=~/mozilla-central/testing/profiles
+
+
+And to test a Chromium build in an OS X environment, you might start
+wptrunner using something similar to the following example::
+
+  wptrunner --metadata=~/testtwf/meta/ --tests=~/testtwf/web-platform-tests/ \
+    --binary=~/chromium/src/out/Release/Chromium.app/Contents/MacOS/Chromium \
+    --webdriver-binary=/usr/local/bin/chromedriver --product=chrome
+
+--------------------
+Running test subsets
+--------------------
+
+To restrict a test run just to tests in a particular web-platform-tests
+subdirectory, specify the directory name in the positional arguments after
+the options; for example, run just the tests in the `dom` subdirectory::
+
+  wptrunner --metadata=~/testtwf/meta --tests=~/testtwf/web-platform-tests/ \
+    --binary=/path/to/firefox --certutil-binary=/path/to/certutil \
+    --prefs-root=/path/to/testing/profiles \
+    dom
+
+-------------------
+Running in parallel
+-------------------
+
+To speed up the testing process, use the ``--processes`` option to have
+wptrunner run multiple browser instances in parallel. For example, to
+have wptrunner attempt to run tests against with six browser instances
+in parallel, specify ``--processes=6``. But note that behaviour in this
+mode is necessarily less deterministic than with ``--processes=1`` (the
+default), so there may be more noise in the test results.
+
+-------------------
+Using default paths
+-------------------
+
+The (otherwise-required) ``--tests`` and ``--metadata`` command-line
+options/flags be omitted if any configuration file is found that
+contains a section specifying the ``tests`` and ``metadata`` keys.
+
+See the `Configuration File`_ section for more information about
+configuration files, including information about their expected
+locations.
+
+The content of the ``wptrunner.default.ini`` default configuration file
+makes wptrunner look for tests (that is, a web-platform-tests checkout)
+as a subdirectory of the current directory named ``tests``, and for
+metadata files in a subdirectory of the current directory named ``meta``.
+
+Output
+------
+
+wptrunner uses the :py:mod:`mozlog` package for output. This
+structures events such as test results or log messages as JSON objects
+that can then be fed to other tools for interpretation. More details
+about the message format are given in the
+:py:mod:`mozlog` documentation.
+
+By default the raw JSON messages are dumped to stdout. This is
+convenient for piping into other tools, but not ideal for humans
+reading the output. :py:mod:`mozlog` comes with several other
+formatters, which are accessible through command line options. The
+general format of these options is ``--log-name=dest``, where ``name``
+is the name of the format and ``dest`` is a path to a destination
+file, or ``-`` for stdout. The raw JSON data is written by the ``raw``
+formatter so, the default setup corresponds to ``--log-raw=-``.
+
+A reasonable output format for humans is provided as ``mach``. So in
+order to output the full raw log to a file and a human-readable
+summary to stdout, one might pass the options::
+
+  --log-raw=output.log --log-mach=-
+
+Configuration File
+------------------
+
+wptrunner uses a ``.ini`` file to control some configuration
+sections. The file has three sections; ``[products]``,
+``[manifest:default]`` and ``[web-platform-tests]``.
+
+``[products]`` is used to
+define the set of available products. By default this section is empty
+which means that all the products distributed with wptrunner are
+enabled (although their dependencies may not be installed). The set
+of enabled products can be set by using the product name as the
+key. For built in products the value is empty. It is also possible to
+provide the path to a script implementing the browser functionality
+e.g.::
+
+  [products]
+  chrome =
+  netscape4 = path/to/netscape.py
+
+``[manifest:default]`` specifies the default paths for the tests and metadata,
+relative to the config file. For example::
+
+  [manifest:default]
+  tests = ~/testtwf/web-platform-tests
+  metadata = ~/testtwf/meta
+
+
+``[web-platform-tests]`` is used to set the properties of the upstream
+repository when updating the paths. ``remote_url`` specifies the git
+url to pull from; ``branch`` the branch to sync against and
+``sync_path`` the local path, relative to the configuration file, to
+use when checking out the tests e.g.::
+
+  [web-platform-tests]
+  remote_url = https://github.com/w3c/web-platform-tests.git
+  branch = master
+  sync_path = sync
+
+A configuration file must contain all the above fields; falling back
+to the default values for unspecified fields is not yet supported.
+
+The ``wptrunner`` and ``wptupdate`` commands will use configuration
+files in the following order:
+
+ * Any path supplied with a ``--config`` flag to the command.
+
+ * A file called ``wptrunner.ini`` in the current directory
+
+ * The default configuration file (``wptrunner.default.ini`` in the
+   source directory)
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/requirements.txt b/src/third_party/web_platform_tests/tools/wptrunner/requirements.txt
new file mode 100644
index 0000000..e9d2a35
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/requirements.txt
@@ -0,0 +1,4 @@
+html5lib >= 0.99
+mozinfo >= 0.7
+mozlog >= 3.5
+mozdebug >= 0.1
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/requirements_chrome.txt b/src/third_party/web_platform_tests/tools/wptrunner/requirements_chrome.txt
new file mode 100644
index 0000000..a2f5442
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/requirements_chrome.txt
@@ -0,0 +1,2 @@
+mozprocess >= 0.19
+selenium >= 2.41.0
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/requirements_edge.txt b/src/third_party/web_platform_tests/tools/wptrunner/requirements_edge.txt
new file mode 100644
index 0000000..a2f5442
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/requirements_edge.txt
@@ -0,0 +1,2 @@
+mozprocess >= 0.19
+selenium >= 2.41.0
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/requirements_firefox.txt b/src/third_party/web_platform_tests/tools/wptrunner/requirements_firefox.txt
new file mode 100644
index 0000000..d8d268f
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/requirements_firefox.txt
@@ -0,0 +1,6 @@
+marionette_driver >= 0.4
+mozprofile >= 0.21
+mozprocess >= 0.19
+mozcrash >= 0.13
+mozrunner >= 6.7
+mozleak >= 0.1
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/requirements_sauce.txt b/src/third_party/web_platform_tests/tools/wptrunner/requirements_sauce.txt
new file mode 100644
index 0000000..7b828f8
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/requirements_sauce.txt
@@ -0,0 +1,2 @@
+mozprocess >= 0.19
+selenium >= 3.3.0
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/requirements_servo.txt b/src/third_party/web_platform_tests/tools/wptrunner/requirements_servo.txt
new file mode 100644
index 0000000..22bcfa1
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/requirements_servo.txt
@@ -0,0 +1 @@
+mozprocess >= 0.19
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/setup.py b/src/third_party/web_platform_tests/tools/wptrunner/setup.py
new file mode 100644
index 0000000..7ec189f
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/setup.py
@@ -0,0 +1,74 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import glob
+import os
+import sys
+import textwrap
+
+from setuptools import setup, find_packages
+
+here = os.path.split(__file__)[0]
+
+PACKAGE_NAME = 'wptrunner'
+PACKAGE_VERSION = '1.14'
+
+# Dependencies
+with open(os.path.join(here, "requirements.txt")) as f:
+    deps = f.read().splitlines()
+
+# Browser-specific requirements
+requirements_files = glob.glob("requirements_*.txt")
+
+profile_dest = None
+dest_exists = False
+
+setup(name=PACKAGE_NAME,
+      version=PACKAGE_VERSION,
+      description="Harness for running the W3C web-platform-tests against various products",
+      author='Mozilla Automation and Testing Team',
+      author_email='tools@lists.mozilla.org',
+      license='MPL 2.0',
+      packages=find_packages(exclude=["tests", "metadata", "prefs"]),
+      entry_points={
+          'console_scripts': [
+              'wptrunner = wptrunner.wptrunner:main',
+              'wptupdate = wptrunner.update:main',
+          ]
+      },
+      zip_safe=False,
+      platforms=['Any'],
+      classifiers=['Development Status :: 4 - Beta',
+                   'Environment :: Console',
+                   'Intended Audience :: Developers',
+                   'License :: OSI Approved :: BSD License',
+                   'Operating System :: OS Independent'],
+      package_data={"wptrunner": ["executors/testharness_marionette.js",
+                                  "executors/testharness_webdriver.js",
+                                  "executors/reftest.js",
+                                  "executors/reftest-wait.js",
+                                  "testharnessreport.js",
+                                  "testharness_runner.html",
+                                  "config.json",
+                                  "wptrunner.default.ini",
+                                  "browsers/server-locations.txt",
+                                  "browsers/b2g_setup/*",
+                                  "browsers/sauce_setup/*",
+                                  "prefs/*"]},
+      include_package_data=True,
+      data_files=[("requirements", requirements_files)],
+      install_requires=deps
+     )
+
+if "install" in sys.argv:
+    path = os.path.relpath(os.path.join(sys.prefix, "requirements"), os.curdir)
+    print textwrap.fill("""In order to use with one of the built-in browser
+products, you will need to install the extra dependencies. These are provided
+as requirements_[name].txt in the %s directory and can be installed using
+e.g.""" % path, 80)
+
+    print """
+
+pip install -r %s/requirements_firefox.txt
+""" % path
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/reftest/reftest_and_fail.html.ini b/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/reftest/reftest_and_fail.html.ini
new file mode 100644
index 0000000..81aef04
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/reftest/reftest_and_fail.html.ini
@@ -0,0 +1,3 @@
+[reftest_and_fail.html]
+  type: reftest
+  expected: FAIL
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/reftest/reftest_cycle_fail.html.ini b/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/reftest/reftest_cycle_fail.html.ini
new file mode 100644
index 0000000..472b33f
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/reftest/reftest_cycle_fail.html.ini
@@ -0,0 +1,3 @@
+[reftest_cycle_fail.html]
+  type: reftest
+  expected: FAIL
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/reftest/reftest_match_fail.html.ini b/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/reftest/reftest_match_fail.html.ini
new file mode 100644
index 0000000..f3dc336
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/reftest/reftest_match_fail.html.ini
@@ -0,0 +1,3 @@
+[reftest_match_fail.html]
+  type: reftest
+  expected: FAIL
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/reftest/reftest_mismatch_fail.html.ini b/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/reftest/reftest_mismatch_fail.html.ini
new file mode 100644
index 0000000..1055337
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/reftest/reftest_mismatch_fail.html.ini
@@ -0,0 +1,3 @@
+[reftest_mismatch_fail.html]
+  type: reftest
+  expected: FAIL
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/reftest/reftest_ref_timeout.html.ini b/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/reftest/reftest_ref_timeout.html.ini
new file mode 100644
index 0000000..8936241
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/reftest/reftest_ref_timeout.html.ini
@@ -0,0 +1,3 @@
+[reftest_ref_timeout.html]
+  type: reftest
+  expected: TIMEOUT
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/reftest/reftest_timeout.html.ini b/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/reftest/reftest_timeout.html.ini
new file mode 100644
index 0000000..0d1b9ba
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/reftest/reftest_timeout.html.ini
@@ -0,0 +1,3 @@
+[reftest_timeout.html]
+  type: reftest
+  expected: TIMEOUT
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/testharness/firefox/__dir__.ini b/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/testharness/firefox/__dir__.ini
new file mode 100644
index 0000000..c9d164c
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/testharness/firefox/__dir__.ini
@@ -0,0 +1,2 @@
+prefs: ["browser.display.foreground_color:#FF0000",
+        "browser.display.background_color:#000000"]
\ No newline at end of file
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/testharness/firefox/subdir/test_pref_reset.html.ini b/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/testharness/firefox/subdir/test_pref_reset.html.ini
new file mode 100644
index 0000000..6c9198d
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/testharness/firefox/subdir/test_pref_reset.html.ini
@@ -0,0 +1,2 @@
+[test_pref_reset.html]
+  prefs: [@Reset]
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/testharness/firefox/test_pref_set.html.ini b/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/testharness/firefox/test_pref_set.html.ini
new file mode 100644
index 0000000..bc9bfb9
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/testharness/firefox/test_pref_set.html.ini
@@ -0,0 +1,3 @@
+[test_pref_set.html]
+  prefs: ["browser.display.foreground_color:#00FF00",
+          "browser.display.background_color:#000000"]
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/testharness/subdir/__dir__.ini b/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/testharness/subdir/__dir__.ini
new file mode 100644
index 0000000..a9157fb
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/testharness/subdir/__dir__.ini
@@ -0,0 +1 @@
+disabled: true
\ No newline at end of file
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/testharness/subdir/testharness_1.html.ini b/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/testharness/subdir/testharness_1.html.ini
new file mode 100644
index 0000000..db93939
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/testharness/subdir/testharness_1.html.ini
@@ -0,0 +1,2 @@
+[testharness_1.html]
+  disabled: @False
\ No newline at end of file
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/testharness/tags/__dir__.ini b/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/testharness/tags/__dir__.ini
new file mode 100644
index 0000000..f599add
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/testharness/tags/__dir__.ini
@@ -0,0 +1 @@
+tags: [dir-tag-1, dir-tag-2]
\ No newline at end of file
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/testharness/tags/testharness_0.html.ini b/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/testharness/tags/testharness_0.html.ini
new file mode 100644
index 0000000..fe8ffa4
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/testharness/tags/testharness_0.html.ini
@@ -0,0 +1,4 @@
+tags: [file-tag]
+
+[testharness_0.html]
+  tags: [test-tag]
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/testharness/tags/testharness_1.html.ini b/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/testharness/tags/testharness_1.html.ini
new file mode 100644
index 0000000..d6006a1
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/testharness/tags/testharness_1.html.ini
@@ -0,0 +1,2 @@
+[testharness_0.html]
+  tags: [test-1-tag]
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/testharness/tags/testharness_2.html.ini b/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/testharness/tags/testharness_2.html.ini
new file mode 100644
index 0000000..25fbf55
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/testharness/tags/testharness_2.html.ini
@@ -0,0 +1,4 @@
+tags: [file-tag]
+
+[testharness_2.html]
+  tags: [test-2-tag, @Reset]
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/testharness/testharness_0.html.ini b/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/testharness/testharness_0.html.ini
new file mode 100644
index 0000000..90b9a6e
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/testharness/testharness_0.html.ini
@@ -0,0 +1,4 @@
+[testharness_0.html]
+  type: testharness
+  [Test that should fail]
+    expected: FAIL
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/testharness/testharness_error.html.ini b/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/testharness/testharness_error.html.ini
new file mode 100644
index 0000000..fa53e07
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/testharness/testharness_error.html.ini
@@ -0,0 +1,3 @@
+[testharness_error.html]
+  type: testharness
+  expected: ERROR
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/testharness/testharness_timeout.html.ini b/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/testharness/testharness_timeout.html.ini
new file mode 100644
index 0000000..55eca51
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/metadata/testharness/testharness_timeout.html.ini
@@ -0,0 +1,3 @@
+[testharness_timeout.html]
+  type: testharness
+  expected: TIMEOUT
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/test.cfg.example b/src/third_party/web_platform_tests/tools/wptrunner/test/test.cfg.example
new file mode 100644
index 0000000..db48226
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/test.cfg.example
@@ -0,0 +1,20 @@
+[general]
+tests=/path/to/web-platform-tests/
+metadata=/path/to/web-platform-tests/
+ssl-type=none
+
+# [firefox]
+# binary=/path/to/firefox
+# prefs-root=/path/to/gecko-src/testing/profiles/
+
+# [servo]
+# binary=/path/to/servo-src/target/release/servo
+# exclude=testharness # Because it needs a special testharness.js
+
+# [servodriver]
+# binary=/path/to/servo-src/target/release/servo
+# exclude=testharness # Because it needs a special testharness.js
+
+# [chrome]
+# binary=/path/to/chrome
+# webdriver-binary=/path/to/chromedriver
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/test.py b/src/third_party/web_platform_tests/tools/wptrunner/test/test.py
new file mode 100644
index 0000000..034e317
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/test.py
@@ -0,0 +1,166 @@
+import ConfigParser
+import argparse
+import json
+import os
+import sys
+import tempfile
+import threading
+import time
+from StringIO import StringIO
+
+from mozlog import structuredlog, reader
+from mozlog.handlers import BaseHandler, StreamHandler, StatusHandler
+from mozlog.formatters import MachFormatter
+from wptrunner import wptcommandline, wptrunner
+
+here = os.path.abspath(os.path.dirname(__file__))
+
+def setup_wptrunner_logging(logger):
+    structuredlog.set_default_logger(logger)
+    wptrunner.logger = logger
+    wptrunner.wptlogging.setup_stdlib_logger()
+
+class ResultHandler(BaseHandler):
+    def __init__(self, verbose=False, logger=None):
+        self.inner = StreamHandler(sys.stdout, MachFormatter())
+        BaseHandler.__init__(self, self.inner)
+        self.product = None
+        self.verbose = verbose
+        self.logger = logger
+
+        self.register_message_handlers("wptrunner-test", {"set-product": self.set_product})
+
+    def set_product(self, product):
+        self.product = product
+
+    def __call__(self, data):
+        if self.product is not None and data["action"] in ["suite_start", "suite_end"]:
+            # Hack: mozlog sets some internal state to prevent multiple suite_start or
+            # suite_end messages. We actually want that here (one from the metaharness
+            # and one from the individual test type harness), so override that internal
+            # state (a better solution might be to not share loggers, but this works well
+            # enough)
+            self.logger._state.suite_started = True
+            return
+
+        if (not self.verbose and
+            (data["action"] == "process_output" or
+             data["action"] == "log" and data["level"] not in ["error", "critical"])):
+            return
+
+        if "test" in data:
+            data = data.copy()
+            data["test"] = "%s: %s" % (self.product, data["test"])
+
+        return self.inner(data)
+
+def test_settings():
+    return {
+        "include": "_test",
+        "manifest-update": "",
+        "no-capture-stdio": ""
+    }
+
+def read_config():
+    parser = ConfigParser.ConfigParser()
+    parser.read("test.cfg")
+
+    rv = {"general":{},
+          "products":{}}
+
+    rv["general"].update(dict(parser.items("general")))
+
+    # This only allows one product per whatever for now
+    for product in parser.sections():
+        if product != "general":
+            dest = rv["products"][product] = {}
+            for key, value in parser.items(product):
+                rv["products"][product][key] = value
+
+    return rv
+
+def run_tests(product, kwargs):
+    kwargs["test_paths"]["/_test/"] = {"tests_path": os.path.join(here, "testdata"),
+                                       "metadata_path": os.path.join(here, "metadata")}
+
+    wptrunner.run_tests(**kwargs)
+
+def settings_to_argv(settings):
+    rv = []
+    for name, value in settings.iteritems():
+        key = "--%s" % name
+        if not value:
+            rv.append(key)
+        elif isinstance(value, list):
+            for item in value:
+                rv.extend([key, item])
+        else:
+            rv.extend([key, value])
+    return rv
+
+def set_from_args(settings, args):
+    if args.test:
+        settings["include"] = args.test
+    if args.tags:
+        settings["tags"] = args.tags
+
+def run(config, args):
+    logger = structuredlog.StructuredLogger("web-platform-tests")
+    logger.add_handler(ResultHandler(logger=logger, verbose=args.verbose))
+    setup_wptrunner_logging(logger)
+
+    parser = wptcommandline.create_parser()
+
+    logger.suite_start(tests=[])
+
+    for product, product_settings in config["products"].iteritems():
+        if args.product and product not in args.product:
+            continue
+
+        settings = test_settings()
+        settings.update(config["general"])
+        settings.update(product_settings)
+        settings["product"] = product
+        set_from_args(settings, args)
+
+        kwargs = vars(parser.parse_args(settings_to_argv(settings)))
+        wptcommandline.check_args(kwargs)
+
+        logger.send_message("wptrunner-test", "set-product", product)
+
+        run_tests(product, kwargs)
+
+    logger.send_message("wptrunner-test", "set-product", None)
+    logger.suite_end()
+
+def get_parser():
+    parser = argparse.ArgumentParser()
+    parser.add_argument("-v", "--verbose", action="store_true", default=False,
+                        help="verbose log output")
+    parser.add_argument("--product", action="append",
+                        help="Specific product to include in test run")
+    parser.add_argument("--pdb", action="store_true",
+                        help="Invoke pdb on uncaught exception")
+    parser.add_argument("--tag", action="append", dest="tags",
+                        help="tags to select tests")
+    parser.add_argument("test", nargs="*",
+                        help="Specific tests to include in test run")
+    return parser
+
+def main():
+    config = read_config()
+
+    args = get_parser().parse_args()
+
+    try:
+        run(config, args)
+    except Exception:
+        if args.pdb:
+            import pdb, traceback
+            print traceback.format_exc()
+            pdb.post_mortem()
+        else:
+            raise
+
+if __name__ == "__main__":
+    main()
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/green-ref.html b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/green-ref.html
new file mode 100644
index 0000000..0e145d6
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/green-ref.html
@@ -0,0 +1,4 @@
+<link rel=match href=green.html>
+<style>
+:root {background-color:green}
+</style>
\ No newline at end of file
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/green.html b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/green.html
new file mode 100644
index 0000000..38167bb
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/green.html
@@ -0,0 +1,3 @@
+<style>
+:root {background-color:green}
+</style>
\ No newline at end of file
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/red.html b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/red.html
new file mode 100644
index 0000000..2b677e0
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/red.html
@@ -0,0 +1,3 @@
+<style>
+:root {background-color:red}
+</style>
\ No newline at end of file
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest.https.html b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest.https.html
new file mode 100644
index 0000000..5a45f10
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest.https.html
@@ -0,0 +1,9 @@
+<link rel=match href=green.html>
+<style>
+:root {background-color:red}
+</style>
+<script>
+if (window.location.protocol === "https:") {
+   document.documentElement.style.backgroundColor = "green";
+}
+</script>
\ No newline at end of file
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_and_fail.html b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_and_fail.html
new file mode 100644
index 0000000..2960195
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_and_fail.html
@@ -0,0 +1,5 @@
+<title>Reftest chain that should fail</title>
+<link rel=match href=reftest_and_fail_0-ref.html>
+<style>
+:root {background-color:green}
+</style>
\ No newline at end of file
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_and_fail_0-ref.html b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_and_fail_0-ref.html
new file mode 100644
index 0000000..04fb9aa
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_and_fail_0-ref.html
@@ -0,0 +1,5 @@
+<title>Reftest chain that should fail</title>
+<link rel=match href=red.html>
+<style>
+:root {background-color:green}
+</style>
\ No newline at end of file
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_cycle.html b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_cycle.html
new file mode 100644
index 0000000..4a84a3b
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_cycle.html
@@ -0,0 +1,5 @@
+<title>Reftest with cycle, all match</title>
+<link rel=match href=reftest_cycle_0-ref.html>
+<style>
+:root {background-color:green}
+</style>
\ No newline at end of file
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_cycle_0-ref.html b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_cycle_0-ref.html
new file mode 100644
index 0000000..118bfd8
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_cycle_0-ref.html
@@ -0,0 +1,5 @@
+<title>OR match that should pass</title>
+<link rel=match href=reftest_cycle_1-ref.html>
+<style>
+:root {background-color:green}
+</style>
\ No newline at end of file
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_cycle_1-ref.html b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_cycle_1-ref.html
new file mode 100644
index 0000000..59be0b6
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_cycle_1-ref.html
@@ -0,0 +1,5 @@
+<title>Reftest with cycle, all match</title>
+<link rel=match href=reftest_cycle.html>
+<style>
+:root {background-color:green}
+</style>
\ No newline at end of file
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_cycle_fail.html b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_cycle_fail.html
new file mode 100644
index 0000000..175e76c
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_cycle_fail.html
@@ -0,0 +1,5 @@
+<title>Reftest with cycle, fails</title>
+<link rel=match href=reftest_cycle_fail_0-ref.html>
+<style>
+:root {background-color:green}
+</style>
\ No newline at end of file
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_cycle_fail_0-ref.html b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_cycle_fail_0-ref.html
new file mode 100644
index 0000000..c8e548c
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_cycle_fail_0-ref.html
@@ -0,0 +1,5 @@
+<title>Reftest with cycle, fails</title>
+<link rel=mismatch href=reftest_cycle_fail.html>
+<style>
+:root {background-color:green}
+</style>
\ No newline at end of file
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_match.html b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_match.html
new file mode 100644
index 0000000..333cc6c
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_match.html
@@ -0,0 +1,5 @@
+<title>rel=match that should pass</title>
+<link rel=match href=green.html>
+<style>
+:root {background-color:green}
+</style>
\ No newline at end of file
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_match_fail.html b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_match_fail.html
new file mode 100644
index 0000000..a9272ef
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_match_fail.html
@@ -0,0 +1,5 @@
+<title>rel=match that should fail</title>
+<link rel=match href=red.html>
+<style>
+:root {background-color:green}
+</style>
\ No newline at end of file
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_mismatch.html b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_mismatch.html
new file mode 100644
index 0000000..af5fa07
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_mismatch.html
@@ -0,0 +1,5 @@
+<title>rel=mismatch that should pass</title>
+<link rel=mismatch href=red.html>
+<style>
+:root {background-color:green}
+</style>
\ No newline at end of file
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_mismatch_fail.html b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_mismatch_fail.html
new file mode 100644
index 0000000..8d160c4
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_mismatch_fail.html
@@ -0,0 +1,5 @@
+<title>rel=mismatch that should fail</title>
+<link rel=mismatch href=green.html>
+<style>
+:root {background-color:green}
+</style>
\ No newline at end of file
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_or_0.html b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_or_0.html
new file mode 100644
index 0000000..3a51de2
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_or_0.html
@@ -0,0 +1,6 @@
+<title>OR match that should pass</title>
+<link rel=match href=red.html>
+<link rel=match href=green.html>
+<style>
+:root {background-color:green}
+</style>
\ No newline at end of file
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_ref_timeout-ref.html b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_ref_timeout-ref.html
new file mode 100644
index 0000000..04cbb71
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_ref_timeout-ref.html
@@ -0,0 +1,6 @@
+<html class="reftest-wait">
+<title>rel=match that should time out in the ref</title>
+<link rel=match href=reftest_ref_timeout-ref.html>
+<style>
+:root {background-color:green}
+</style>
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_ref_timeout.html b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_ref_timeout.html
new file mode 100644
index 0000000..aaf68f5
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_ref_timeout.html
@@ -0,0 +1,6 @@
+<html>
+<title>rel=match that should time out in the ref</title>
+<link rel=match href=reftest_ref_timeout-ref.html>
+<style>
+:root {background-color:green}
+</style>
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_timeout.html b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_timeout.html
new file mode 100644
index 0000000..b10e676
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_timeout.html
@@ -0,0 +1,6 @@
+<html class="reftest-wait">
+<title>rel=match that should timeout</title>
+<link rel=match href=green.html>
+<style>
+:root {background-color:green}
+</style>
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_wait_0.html b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_wait_0.html
new file mode 100644
index 0000000..4f92715
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/reftest/reftest_wait_0.html
@@ -0,0 +1,13 @@
+<html class="reftest-wait">
+<title>rel=match that should fail</title>
+<link rel=match href=red.html>
+<style>
+:root {background-color:red}
+</style>
+<script>
+setTimeout(function() {
+  document.documentElement.style.backgroundColor = "green";
+  document.documentElement.className = "";
+}, 2000);
+</script>
+</html>
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/testharness/firefox/subdir/test_pref_inherit.html b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/testharness/firefox/subdir/test_pref_inherit.html
new file mode 100644
index 0000000..10b2851
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/testharness/firefox/subdir/test_pref_inherit.html
@@ -0,0 +1,10 @@
+<!doctype html>
+<title>Example pref test</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<p>Test requires the pref browser.display.foreground_color to be set to #00FF00</p>
+<script>
+test(function() {
+  assert_equals(getComputedStyle(document.body).color, "rgb(255, 0, 0)");
+}, "Test that pref was set");
+</script>
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/testharness/firefox/subdir/test_pref_reset.html b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/testharness/firefox/subdir/test_pref_reset.html
new file mode 100644
index 0000000..5c75c11
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/testharness/firefox/subdir/test_pref_reset.html
@@ -0,0 +1,10 @@
+<!doctype html>
+<title>Example pref test</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<p>Test requires the pref browser.display.foreground_color to be set to #00FF00</p>
+<script>
+test(function() {
+  assert_equals(getComputedStyle(document.body).color, "rgb(0, 0, 0)");
+}, "Test that pref was reset");
+</script>
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/testharness/firefox/test_pref_dir.html b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/testharness/firefox/test_pref_dir.html
new file mode 100644
index 0000000..105d907
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/testharness/firefox/test_pref_dir.html
@@ -0,0 +1,10 @@
+<!doctype html>
+<title>Example pref test</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<p>Test requires the pref browser.display.foreground_color to be set to #FF0000</p>
+<script>
+test(function() {
+  assert_equals(getComputedStyle(document.body).color, "rgb(255, 0, 0)");
+}, "Test that pref was set");
+</script>
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/testharness/firefox/test_pref_set.html b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/testharness/firefox/test_pref_set.html
new file mode 100644
index 0000000..8e5e298
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/testharness/firefox/test_pref_set.html
@@ -0,0 +1,10 @@
+<!doctype html>
+<title>Example pref test</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<p>Test requires the pref browser.display.foreground_color to be set to #00FF00</p>
+<script>
+test(function() {
+  assert_equals(getComputedStyle(document.body).color, "rgb(0, 255, 0)");
+}, "Test that pref was set");
+</script>
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/testharness/subdir/testharness_1.html b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/testharness/subdir/testharness_1.html
new file mode 100644
index 0000000..fd2fc43
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/testharness/subdir/testharness_1.html
@@ -0,0 +1,9 @@
+<!doctype html>
+<title>Test should be enabled</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script>
+test(function() {
+  assert_true(true);
+}, "Test that should pass");
+</script>
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/testharness/tags/testharness_0.html b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/testharness/tags/testharness_0.html
new file mode 100644
index 0000000..5daf02a
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/testharness/tags/testharness_0.html
@@ -0,0 +1,9 @@
+<!doctype html>
+<title>Test</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script>
+test(function() {
+  assert_true(true);
+}, "Test that should pass");
+</script>
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/testharness/tags/testharness_1.html b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/testharness/tags/testharness_1.html
new file mode 100644
index 0000000..5daf02a
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/testharness/tags/testharness_1.html
@@ -0,0 +1,9 @@
+<!doctype html>
+<title>Test</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script>
+test(function() {
+  assert_true(true);
+}, "Test that should pass");
+</script>
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/testharness/tags/testharness_2.html b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/testharness/tags/testharness_2.html
new file mode 100644
index 0000000..5daf02a
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/testharness/tags/testharness_2.html
@@ -0,0 +1,9 @@
+<!doctype html>
+<title>Test</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script>
+test(function() {
+  assert_true(true);
+}, "Test that should pass");
+</script>
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/testharness/testharness.https.html b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/testharness/testharness.https.html
new file mode 100644
index 0000000..5871eac
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/testharness/testharness.https.html
@@ -0,0 +1,10 @@
+<!doctype html>
+<title>Example https test</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script>
+test(function() {
+  assert_equals(window.location.protocol, "https:");
+}, "Test that file was loaded with the correct protocol");
+
+</script>
\ No newline at end of file
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/testharness/testharness_0.html b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/testharness/testharness_0.html
new file mode 100644
index 0000000..ff0654c
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/testharness/testharness_0.html
@@ -0,0 +1,9 @@
+<!doctype html>
+<title>Test should be disabled</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script>
+test(function() {
+  assert_true(false);
+}, "Test that should fail");
+</script>
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/testharness/testharness_error.html b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/testharness/testharness_error.html
new file mode 100644
index 0000000..0ac5ba4
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/testharness/testharness_error.html
@@ -0,0 +1,7 @@
+<!doctype html>
+<title>testharness.js test that should error</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script>
+undefined_function()
+</script>
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/testharness/testharness_long_timeout.html b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/testharness/testharness_long_timeout.html
new file mode 100644
index 0000000..fc94e05
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/testharness/testharness_long_timeout.html
@@ -0,0 +1,9 @@
+<!doctype html>
+<title>testharness.js test with long timeout</title>
+<meta name=timeout content=long>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script>
+var t = async_test("Long timeout test");
+setTimeout(t.step_func_done(function() {assert_true(true)}), 15*1000);
+</script>
\ No newline at end of file
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/testharness/testharness_timeout.html b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/testharness/testharness_timeout.html
new file mode 100644
index 0000000..b99915a
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/test/testdata/testharness/testharness_timeout.html
@@ -0,0 +1,6 @@
+<!doctype html>
+<title>Simple testharness.js usage</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+
+// This file should time out, obviously
\ No newline at end of file
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/tox.ini b/src/third_party/web_platform_tests/tools/wptrunner/tox.ini
new file mode 100644
index 0000000..7909704
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/tox.ini
@@ -0,0 +1,17 @@
+[pytest]
+xfail_strict=true
+
+[tox]
+envlist = {py27,pypy}-{base,b2g,chrome,firefox,servo}
+
+[testenv]
+deps =
+     pytest>=2.9
+     pytest-cov
+     pytest-xdist
+     -r{toxinidir}/requirements.txt
+     chrome: -r{toxinidir}/requirements_chrome.txt
+     firefox: -r{toxinidir}/requirements_firefox.txt
+     servo: -r{toxinidir}/requirements_servo.txt
+
+commands = pytest --cov
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner.default.ini b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner.default.ini
new file mode 100644
index 0000000..34d25f8
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner.default.ini
@@ -0,0 +1,11 @@
+[products]
+
+[web-platform-tests]
+remote_url = https://github.com/w3c/web-platform-tests.git
+branch = master
+sync_path = %(pwd)s/sync
+
+[manifest:default]
+tests = %(pwd)s/tests
+metadata = %(pwd)s/meta
+url_base = /
\ No newline at end of file
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/__init__.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/__init__.py
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/browsers/__init__.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/browsers/__init__.py
new file mode 100644
index 0000000..e3606d2
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/browsers/__init__.py
@@ -0,0 +1,30 @@
+"""Subpackage where each product is defined. Each product is created by adding a
+a .py file containing a __wptrunner__ variable in the global scope. This must be
+a dictionary with the fields
+
+"product": Name of the product, assumed to be unique.
+"browser": String indicating the Browser implementation used to launch that
+           product.
+"executor": Dictionary with keys as supported test types and values as the name
+            of the Executor implemantation that will be used to run that test
+            type.
+"browser_kwargs": String naming function that takes product, binary,
+                  prefs_root and the wptrunner.run_tests kwargs dict as arguments
+                  and returns a dictionary of kwargs to use when creating the
+                  Browser class.
+"executor_kwargs": String naming a function that takes http server url and
+                   timeout multiplier and returns kwargs to use when creating
+                   the executor class.
+"env_options": String naming a funtion of no arguments that returns the
+               arguments passed to the TestEnvironment.
+
+All classes and functions named in the above dict must be imported into the
+module global scope.
+"""
+
+product_list = ["chrome",
+                "edge",
+                "firefox",
+                "sauce",
+                "servo",
+                "servodriver"]
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/browsers/b2g_setup/certtest_app.zip b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/browsers/b2g_setup/certtest_app.zip
new file mode 100644
index 0000000..f9cbd53
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/browsers/b2g_setup/certtest_app.zip
Binary files differ
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/browsers/base.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/browsers/base.py
new file mode 100644
index 0000000..e4c9c30
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/browsers/base.py
@@ -0,0 +1,166 @@
+import os
+import platform
+import socket
+from abc import ABCMeta, abstractmethod
+
+from ..wptcommandline import require_arg
+
+here = os.path.split(__file__)[0]
+
+
+def cmd_arg(name, value=None):
+    prefix = "-" if platform.system() == "Windows" else "--"
+    rv = prefix + name
+    if value is not None:
+        rv += "=" + value
+    return rv
+
+
+def get_free_port(start_port, exclude=None):
+    """Get the first port number after start_port (inclusive) that is
+    not currently bound.
+
+    :param start_port: Integer port number at which to start testing.
+    :param exclude: Set of port numbers to skip"""
+    port = start_port
+    while True:
+        if exclude and port in exclude:
+            port += 1
+            continue
+        s = socket.socket()
+        try:
+            s.bind(("127.0.0.1", port))
+        except socket.error:
+            port += 1
+        else:
+            return port
+        finally:
+            s.close()
+
+def browser_command(binary, args, debug_info):
+    if debug_info:
+        if debug_info.requiresEscapedArgs:
+            args = [item.replace("&", "\\&") for item in args]
+        debug_args = [debug_info.path] + debug_info.args
+    else:
+        debug_args = []
+
+    command = [binary] + args
+
+    return debug_args, command
+
+
+class BrowserError(Exception):
+    pass
+
+
+class Browser(object):
+    __metaclass__ = ABCMeta
+
+    process_cls = None
+    init_timeout = 30
+
+    def __init__(self, logger):
+        """Abstract class serving as the basis for Browser implementations.
+
+        The Browser is used in the TestRunnerManager to start and stop the browser
+        process, and to check the state of that process. This class also acts as a
+        context manager, enabling it to do browser-specific setup at the start of
+        the testrun and cleanup after the run is complete.
+
+        :param logger: Structured logger to use for output.
+        """
+        self.logger = logger
+
+    def __enter__(self):
+        self.setup()
+        return self
+
+    def __exit__(self, *args, **kwargs):
+        self.cleanup()
+
+    def setup(self):
+        """Used for browser-specific setup that happens at the start of a test run"""
+        pass
+
+    def settings(self, test):
+        return {}
+
+    @abstractmethod
+    def start(self, **kwargs):
+        """Launch the browser object and get it into a state where is is ready to run tests"""
+        pass
+
+    @abstractmethod
+    def stop(self, force=False):
+        """Stop the running browser process."""
+        pass
+
+    @abstractmethod
+    def pid(self):
+        """pid of the browser process or None if there is no pid"""
+        pass
+
+    @abstractmethod
+    def is_alive(self):
+        """Boolean indicating whether the browser process is still running"""
+        pass
+
+    def setup_ssl(self, hosts):
+        """Return a certificate to use for tests requiring ssl that will be trusted by the browser"""
+        raise NotImplementedError("ssl testing not supported")
+
+    def cleanup(self):
+        """Browser-specific cleanup that is run after the testrun is finished"""
+        pass
+
+    def executor_browser(self):
+        """Returns the ExecutorBrowser subclass for this Browser subclass and the keyword arguments
+        with which it should be instantiated"""
+        return ExecutorBrowser, {}
+
+    def check_for_crashes(self):
+        """Check for crashes that didn't cause the browser process to terminate"""
+        return False
+
+    def log_crash(self, process, test):
+        """Return a list of dictionaries containing information about crashes that happend
+        in the browser, or an empty list if no crashes occurred"""
+        self.logger.crash(process, test)
+
+
+class NullBrowser(Browser):
+    def __init__(self, logger, **kwargs):
+        super(NullBrowser, self).__init__(logger)
+
+    def start(self, **kwargs):
+        """No-op browser to use in scenarios where the TestRunnerManager shouldn't
+        actually own the browser process (e.g. Servo where we start one browser
+        per test)"""
+        pass
+
+    def stop(self, force=False):
+        pass
+
+    def pid(self):
+        return None
+
+    def is_alive(self):
+        return True
+
+    def on_output(self, line):
+        raise NotImplementedError
+
+
+class ExecutorBrowser(object):
+    def __init__(self, **kwargs):
+        """View of the Browser used by the Executor object.
+        This is needed because the Executor runs in a child process and
+        we can't ship Browser instances between processes on Windows.
+
+        Typically this will have a few product-specific properties set,
+        but in some cases it may have more elaborate methods for setting
+        up the browser from the runner process.
+        """
+        for k, v in kwargs.iteritems():
+            setattr(self, k, v)
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/browsers/chrome.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/browsers/chrome.py
new file mode 100644
index 0000000..a1bbd56
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/browsers/chrome.py
@@ -0,0 +1,97 @@
+from .base import Browser, ExecutorBrowser, require_arg
+from ..webdriver_server import ChromeDriverServer
+from ..executors import executor_kwargs as base_executor_kwargs
+from ..executors.executorselenium import (SeleniumTestharnessExecutor,
+                                          SeleniumRefTestExecutor)
+
+
+__wptrunner__ = {"product": "chrome",
+                 "check_args": "check_args",
+                 "browser": "ChromeBrowser",
+                 "executor": {"testharness": "SeleniumTestharnessExecutor",
+                              "reftest": "SeleniumRefTestExecutor"},
+                 "browser_kwargs": "browser_kwargs",
+                 "executor_kwargs": "executor_kwargs",
+                 "env_extras": "env_extras",
+                 "env_options": "env_options"}
+
+
+def check_args(**kwargs):
+    require_arg(kwargs, "webdriver_binary")
+
+
+def browser_kwargs(test_type, run_info_data, **kwargs):
+    return {"binary": kwargs["binary"],
+            "webdriver_binary": kwargs["webdriver_binary"],
+            "webdriver_args": kwargs.get("webdriver_args")}
+
+
+def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
+                    **kwargs):
+    from selenium.webdriver import DesiredCapabilities
+
+    executor_kwargs = base_executor_kwargs(test_type, server_config,
+                                           cache_manager, **kwargs)
+    executor_kwargs["close_after_done"] = True
+    capabilities = dict(DesiredCapabilities.CHROME.items())
+    capabilities.setdefault("chromeOptions", {})["prefs"] = {
+        "profile": {
+            "default_content_setting_values": {
+                "popups": 1
+            }
+        }
+    }
+    for (kwarg, capability) in [("binary", "binary"), ("binary_args", "args")]:
+        if kwargs[kwarg] is not None:
+            capabilities["chromeOptions"][capability] = kwargs[kwarg]
+    if test_type == "testharness":
+        capabilities["chromeOptions"]["useAutomationExtension"] = False
+        capabilities["chromeOptions"]["excludeSwitches"] = ["enable-automation"]
+    executor_kwargs["capabilities"] = capabilities
+    return executor_kwargs
+
+
+def env_extras(**kwargs):
+    return []
+
+
+def env_options():
+    return {"host": "web-platform.test",
+            "bind_hostname": "true"}
+
+
+class ChromeBrowser(Browser):
+    """Chrome is backed by chromedriver, which is supplied through
+    ``wptrunner.webdriver.ChromeDriverServer``.
+    """
+
+    def __init__(self, logger, binary, webdriver_binary="chromedriver",
+                 webdriver_args=None):
+        """Creates a new representation of Chrome.  The `binary` argument gives
+        the browser binary to use for testing."""
+        Browser.__init__(self, logger)
+        self.binary = binary
+        self.server = ChromeDriverServer(self.logger,
+                                         binary=webdriver_binary,
+                                         args=webdriver_args)
+
+    def start(self, **kwargs):
+        self.server.start(block=False)
+
+    def stop(self, force=False):
+        self.server.stop(force=force)
+
+    def pid(self):
+        return self.server.pid
+
+    def is_alive(self):
+        # TODO(ato): This only indicates the driver is alive,
+        # and doesn't say anything about whether a browser session
+        # is active.
+        return self.server.is_alive()
+
+    def cleanup(self):
+        self.stop()
+
+    def executor_browser(self):
+        return ExecutorBrowser, {"webdriver_url": self.server.url}
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/browsers/edge.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/browsers/edge.py
new file mode 100644
index 0000000..fdc7a77
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/browsers/edge.py
@@ -0,0 +1,74 @@
+from .base import Browser, ExecutorBrowser, require_arg
+from ..webdriver_server import EdgeDriverServer
+from ..executors import executor_kwargs as base_executor_kwargs
+from ..executors.executorselenium import (SeleniumTestharnessExecutor,
+                                          SeleniumRefTestExecutor)
+
+__wptrunner__ = {"product": "edge",
+                 "check_args": "check_args",
+                 "browser": "EdgeBrowser",
+                 "executor": {"testharness": "SeleniumTestharnessExecutor",
+                              "reftest": "SeleniumRefTestExecutor"},
+                 "browser_kwargs": "browser_kwargs",
+                 "executor_kwargs": "executor_kwargs",
+                 "env_extras": "env_extras",
+                 "env_options": "env_options"}
+
+
+def check_args(**kwargs):
+    require_arg(kwargs, "webdriver_binary")
+
+def browser_kwargs(test_type, run_info_data, **kwargs):
+    return {"webdriver_binary": kwargs["webdriver_binary"],
+            "webdriver_args": kwargs.get("webdriver_args")}
+
+def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
+                    **kwargs):
+    from selenium.webdriver import DesiredCapabilities
+
+    executor_kwargs = base_executor_kwargs(test_type, server_config,
+                                           cache_manager, **kwargs)
+    executor_kwargs["close_after_done"] = True
+    executor_kwargs["capabilities"] = dict(DesiredCapabilities.EDGE.items())
+    return executor_kwargs
+
+def env_extras(**kwargs):
+    return []
+
+def env_options():
+    return {"host": "web-platform.test",
+            "bind_hostname": "true",
+            "supports_debugger": False}
+
+class EdgeBrowser(Browser):
+    used_ports = set()
+
+    def __init__(self, logger, webdriver_binary, webdriver_args=None):
+        Browser.__init__(self, logger)
+        self.server = EdgeDriverServer(self.logger,
+                                       binary=webdriver_binary,
+                                       args=webdriver_args)
+        self.webdriver_host = "localhost"
+        self.webdriver_port = self.server.port
+
+    def start(self, **kwargs):
+        print self.server.url
+        self.server.start()
+
+    def stop(self, force=False):
+        self.server.stop(force=force)
+
+    def pid(self):
+        return self.server.pid
+
+    def is_alive(self):
+        # TODO(ato): This only indicates the server is alive,
+        # and doesn't say anything about whether a browser session
+        # is active.
+        return self.server.is_alive()
+
+    def cleanup(self):
+        self.stop()
+
+    def executor_browser(self):
+        return ExecutorBrowser, {"webdriver_url": self.server.url}
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/browsers/firefox.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/browsers/firefox.py
new file mode 100644
index 0000000..5ec2b70
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/browsers/firefox.py
@@ -0,0 +1,381 @@
+import os
+import platform
+import signal
+import subprocess
+import sys
+
+import mozinfo
+import mozleak
+from mozprocess import ProcessHandler
+from mozprofile import FirefoxProfile, Preferences
+from mozprofile.permissions import ServerLocations
+from mozrunner import FirefoxRunner
+from mozrunner.utils import get_stack_fixer_function
+from mozcrash import mozcrash
+
+from .base import (get_free_port,
+                   Browser,
+                   ExecutorBrowser,
+                   require_arg,
+                   cmd_arg,
+                   browser_command)
+from ..executors import executor_kwargs as base_executor_kwargs
+from ..executors.executormarionette import (MarionetteTestharnessExecutor,
+                                            MarionetteRefTestExecutor,
+                                            MarionetteWdspecExecutor)
+from ..environment import hostnames
+
+
+here = os.path.join(os.path.split(__file__)[0])
+
+__wptrunner__ = {"product": "firefox",
+                 "check_args": "check_args",
+                 "browser": "FirefoxBrowser",
+                 "executor": {"testharness": "MarionetteTestharnessExecutor",
+                              "reftest": "MarionetteRefTestExecutor",
+                              "wdspec": "MarionetteWdspecExecutor"},
+                 "browser_kwargs": "browser_kwargs",
+                 "executor_kwargs": "executor_kwargs",
+                 "env_extras": "env_extras",
+                 "env_options": "env_options",
+                 "run_info_extras": "run_info_extras",
+                 "update_properties": "update_properties"}
+
+
+def get_timeout_multiplier(test_type, run_info_data, **kwargs):
+    if kwargs["timeout_multiplier"] is not None:
+        return kwargs["timeout_multiplier"]
+    if test_type == "reftest":
+        if run_info_data["debug"] or run_info_data.get("asan"):
+            return 4
+        else:
+            return 2
+    elif run_info_data["debug"] or run_info_data.get("asan"):
+        return 3
+    return 1
+
+
+def check_args(**kwargs):
+    require_arg(kwargs, "binary")
+    if kwargs["ssl_type"] != "none":
+        require_arg(kwargs, "certutil_binary")
+
+
+def browser_kwargs(test_type, run_info_data, **kwargs):
+    return {"binary": kwargs["binary"],
+            "prefs_root": kwargs["prefs_root"],
+            "extra_prefs": kwargs["extra_prefs"],
+            "test_type": test_type,
+            "debug_info": kwargs["debug_info"],
+            "symbols_path": kwargs["symbols_path"],
+            "stackwalk_binary": kwargs["stackwalk_binary"],
+            "certutil_binary": kwargs["certutil_binary"],
+            "ca_certificate_path": kwargs["ssl_env"].ca_cert_path(),
+            "e10s": kwargs["gecko_e10s"],
+            "stackfix_dir": kwargs["stackfix_dir"],
+            "binary_args": kwargs["binary_args"],
+            "timeout_multiplier": get_timeout_multiplier(test_type,
+                                                         run_info_data,
+                                                         **kwargs),
+            "leak_check": kwargs["leak_check"],
+            "stylo_threads": kwargs["stylo_threads"]}
+
+
+def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
+                    **kwargs):
+    executor_kwargs = base_executor_kwargs(test_type, server_config,
+                                           cache_manager, **kwargs)
+    executor_kwargs["close_after_done"] = test_type != "reftest"
+    executor_kwargs["timeout_multiplier"] = get_timeout_multiplier(test_type,
+                                                                   run_info_data,
+                                                                   **kwargs)
+    if test_type == "reftest":
+        executor_kwargs["reftest_internal"] = kwargs["reftest_internal"]
+        executor_kwargs["reftest_screenshot"] = kwargs["reftest_screenshot"]
+    if test_type == "wdspec":
+        executor_kwargs["binary"] = kwargs["binary"]
+        executor_kwargs["webdriver_binary"] = kwargs.get("webdriver_binary")
+        executor_kwargs["webdriver_args"] = kwargs.get("webdriver_args")
+        fxOptions = {}
+        if kwargs["binary"]:
+            fxOptions["binary"] = kwargs["binary"]
+        if kwargs["binary_args"]:
+            fxOptions["args"] = kwargs["binary_args"]
+        fxOptions["prefs"] = {
+            "network.dns.localDomains": ",".join(hostnames)
+        }
+        capabilities = {"moz:firefoxOptions": fxOptions}
+        executor_kwargs["capabilities"] = capabilities
+    return executor_kwargs
+
+
+def env_extras(**kwargs):
+    return []
+
+
+def env_options():
+    return {"host": "127.0.0.1",
+            "external_host": "web-platform.test",
+            "bind_hostname": "false",
+            "certificate_domain": "web-platform.test",
+            "supports_debugger": True}
+
+
+def run_info_extras(**kwargs):
+    return {"e10s": kwargs["gecko_e10s"],
+            "headless": "MOZ_HEADLESS" in os.environ}
+
+
+def update_properties():
+    return (["debug", "stylo", "e10s", "os", "version", "processor", "bits"],
+            {"debug", "e10s", "stylo"})
+
+
+class FirefoxBrowser(Browser):
+    used_ports = set()
+    init_timeout = 60
+    shutdown_timeout = 60
+
+    def __init__(self, logger, binary, prefs_root, test_type, extra_prefs=None, debug_info=None,
+                 symbols_path=None, stackwalk_binary=None, certutil_binary=None,
+                 ca_certificate_path=None, e10s=False, stackfix_dir=None,
+                 binary_args=None, timeout_multiplier=None, leak_check=False, stylo_threads=1):
+        Browser.__init__(self, logger)
+        self.binary = binary
+        self.prefs_root = prefs_root
+        self.test_type = test_type
+        self.extra_prefs = extra_prefs
+        self.marionette_port = None
+        self.runner = None
+        self.debug_info = debug_info
+        self.profile = None
+        self.symbols_path = symbols_path
+        self.stackwalk_binary = stackwalk_binary
+        self.ca_certificate_path = ca_certificate_path
+        self.certutil_binary = certutil_binary
+        self.e10s = e10s
+        self.binary_args = binary_args
+        if self.symbols_path and stackfix_dir:
+            self.stack_fixer = get_stack_fixer_function(stackfix_dir,
+                                                        self.symbols_path)
+        else:
+            self.stack_fixer = None
+
+        if timeout_multiplier:
+            self.init_timeout = self.init_timeout * timeout_multiplier
+
+        self.leak_report_file = None
+        self.leak_check = leak_check
+        self.stylo_threads = stylo_threads
+
+    def settings(self, test):
+        return {"check_leaks": self.leak_check and not test.leaks}
+
+    def start(self, **kwargs):
+        if self.marionette_port is None:
+            self.marionette_port = get_free_port(2828, exclude=self.used_ports)
+            self.used_ports.add(self.marionette_port)
+
+        env = os.environ.copy()
+        env["MOZ_CRASHREPORTER"] = "1"
+        env["MOZ_CRASHREPORTER_SHUTDOWN"] = "1"
+        env["MOZ_DISABLE_NONLOCAL_CONNECTIONS"] = "1"
+        env["STYLO_THREADS"] = str(self.stylo_threads)
+
+        locations = ServerLocations(filename=os.path.join(here, "server-locations.txt"))
+
+        preferences = self.load_prefs()
+
+        self.profile = FirefoxProfile(locations=locations,
+                                      preferences=preferences)
+        self.profile.set_preferences({"marionette.port": self.marionette_port,
+                                      "dom.disable_open_during_load": False,
+                                      "network.dns.localDomains": ",".join(hostnames),
+                                      "network.proxy.type": 0,
+                                      "places.history.enabled": False,
+                                      "dom.send_after_paint_to_content": True})
+        if self.e10s:
+            self.profile.set_preferences({"browser.tabs.remote.autostart": True})
+
+        if self.test_type == "reftest":
+            self.profile.set_preferences({"layout.interruptible-reflow.enabled": False})
+
+        if self.leak_check and kwargs.get("check_leaks", True):
+            self.leak_report_file = os.path.join(self.profile.profile, "runtests_leaks.log")
+            if os.path.exists(self.leak_report_file):
+                os.remove(self.leak_report_file)
+            env["XPCOM_MEM_BLOAT_LOG"] = self.leak_report_file
+        else:
+            self.leak_report_file = None
+
+        # Bug 1262954: winxp + e10s, disable hwaccel
+        if (self.e10s and platform.system() in ("Windows", "Microsoft") and
+            '5.1' in platform.version()):
+            self.profile.set_preferences({"layers.acceleration.disabled": True})
+
+        if self.ca_certificate_path is not None:
+            self.setup_ssl()
+
+        debug_args, cmd = browser_command(self.binary,
+                                          self.binary_args if self.binary_args else [] +
+                                          [cmd_arg("marionette"), "about:blank"],
+                                          self.debug_info)
+
+        self.runner = FirefoxRunner(profile=self.profile,
+                                    binary=cmd[0],
+                                    cmdargs=cmd[1:],
+                                    env=env,
+                                    process_class=ProcessHandler,
+                                    process_args={"processOutputLine": [self.on_output]})
+
+        self.logger.debug("Starting Firefox")
+
+        self.runner.start(debug_args=debug_args, interactive=self.debug_info and self.debug_info.interactive)
+        self.logger.debug("Firefox Started")
+
+    def load_prefs(self):
+        prefs = Preferences()
+
+        prefs_path = os.path.join(self.prefs_root, "prefs_general.js")
+        if os.path.exists(prefs_path):
+            prefs.add(Preferences.read_prefs(prefs_path))
+        else:
+            self.logger.warning("Failed to find base prefs file in %s" % prefs_path)
+
+        # Add any custom preferences
+        prefs.add(self.extra_prefs, cast=True)
+
+        return prefs()
+
+    def stop(self, force=False):
+        if self.runner is not None and self.runner.is_running():
+            try:
+                # For Firefox we assume that stopping the runner prompts the
+                # browser to shut down. This allows the leak log to be written
+                for clean, stop_f in [(True, lambda: self.runner.wait(self.shutdown_timeout)),
+                                      (False, lambda: self.runner.stop(signal.SIGTERM)),
+                                      (False, lambda: self.runner.stop(signal.SIGKILL))]:
+                    if not force or not clean:
+                        retcode = stop_f()
+                        if retcode is not None:
+                            self.logger.info("Browser exited with return code %s" % retcode)
+                            break
+            except OSError:
+                # This can happen on Windows if the process is already dead
+                pass
+        self.logger.debug("stopped")
+
+    def process_leaks(self):
+        self.logger.debug("PROCESS LEAKS %s" % self.leak_report_file)
+        if self.leak_report_file is None:
+            return
+        mozleak.process_leak_log(
+            self.leak_report_file,
+            leak_thresholds={
+                "default": 0,
+                "tab": 10000,  # See dependencies of bug 1051230.
+                # GMP rarely gets a log, but when it does, it leaks a little.
+                "geckomediaplugin": 20000,
+            },
+            ignore_missing_leaks=["geckomediaplugin"],
+            log=self.logger,
+            stack_fixer=self.stack_fixer
+        )
+
+    def pid(self):
+        if self.runner.process_handler is None:
+            return None
+
+        try:
+            return self.runner.process_handler.pid
+        except AttributeError:
+            return None
+
+    def on_output(self, line):
+        """Write a line of output from the firefox process to the log"""
+        data = line.decode("utf8", "replace")
+        if self.stack_fixer:
+            data = self.stack_fixer(data)
+        self.logger.process_output(self.pid(),
+                                   data,
+                                   command=" ".join(self.runner.command))
+
+    def is_alive(self):
+        if self.runner:
+            return self.runner.is_running()
+        return False
+
+    def cleanup(self):
+        self.stop()
+        self.process_leaks()
+
+    def executor_browser(self):
+        assert self.marionette_port is not None
+        return ExecutorBrowser, {"marionette_port": self.marionette_port}
+
+    def check_for_crashes(self):
+        dump_dir = os.path.join(self.profile.profile, "minidumps")
+
+        return bool(mozcrash.check_for_crashes(dump_dir,
+                                               symbols_path=self.symbols_path,
+                                               stackwalk_binary=self.stackwalk_binary,
+                                               quiet=True))
+
+    def log_crash(self, process, test):
+        dump_dir = os.path.join(self.profile.profile, "minidumps")
+
+        mozcrash.log_crashes(self.logger,
+                             dump_dir,
+                             symbols_path=self.symbols_path,
+                             stackwalk_binary=self.stackwalk_binary,
+                             process=process,
+                             test=test)
+
+    def setup_ssl(self):
+        """Create a certificate database to use in the test profile. This is configured
+        to trust the CA Certificate that has signed the web-platform.test server
+        certificate."""
+
+        self.logger.info("Setting up ssl")
+
+        # Make sure the certutil libraries from the source tree are loaded when using a
+        # local copy of certutil
+        # TODO: Maybe only set this if certutil won't launch?
+        env = os.environ.copy()
+        certutil_dir = os.path.dirname(self.binary)
+        if mozinfo.isMac:
+            env_var = "DYLD_LIBRARY_PATH"
+        elif mozinfo.isUnix:
+            env_var = "LD_LIBRARY_PATH"
+        else:
+            env_var = "PATH"
+
+
+        env[env_var] = (os.path.pathsep.join([certutil_dir, env[env_var]])
+                        if env_var in env else certutil_dir).encode(
+                                sys.getfilesystemencoding() or 'utf-8', 'replace')
+
+        def certutil(*args):
+            cmd = [self.certutil_binary] + list(args)
+            self.logger.process_output("certutil",
+                                       subprocess.check_output(cmd,
+                                                               env=env,
+                                                               stderr=subprocess.STDOUT),
+                                       " ".join(cmd))
+
+        pw_path = os.path.join(self.profile.profile, ".crtdbpw")
+        with open(pw_path, "w") as f:
+            # Use empty password for certificate db
+            f.write("\n")
+
+        cert_db_path = self.profile.profile
+
+        # Create a new certificate db
+        certutil("-N", "-d", cert_db_path, "-f", pw_path)
+
+        # Add the CA certificate to the database and mark as trusted to issue server certs
+        certutil("-A", "-d", cert_db_path, "-f", pw_path, "-t", "CT,,",
+                 "-n", "web-platform-tests", "-i", self.ca_certificate_path)
+
+        # List all certs in the database
+        certutil("-L", "-d", cert_db_path)
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/browsers/sauce.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/browsers/sauce.py
new file mode 100644
index 0000000..a2f29a4
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/browsers/sauce.py
@@ -0,0 +1,208 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import glob
+import os
+import shutil
+import subprocess
+import tarfile
+import tempfile
+import time
+from cStringIO import StringIO as CStringIO
+
+import requests
+
+from .base import Browser, ExecutorBrowser, require_arg
+from ..executors import executor_kwargs as base_executor_kwargs
+from ..executors.executorselenium import (SeleniumTestharnessExecutor,
+                                          SeleniumRefTestExecutor)
+
+here = os.path.split(__file__)[0]
+
+
+__wptrunner__ = {"product": "sauce",
+                 "check_args": "check_args",
+                 "browser": "SauceBrowser",
+                 "executor": {"testharness": "SeleniumTestharnessExecutor",
+                              "reftest": "SeleniumRefTestExecutor"},
+                 "browser_kwargs": "browser_kwargs",
+                 "executor_kwargs": "executor_kwargs",
+                 "env_extras": "env_extras",
+                 "env_options": "env_options"}
+
+
+def get_capabilities(**kwargs):
+    browser_name = kwargs["sauce_browser"]
+    platform = kwargs["sauce_platform"]
+    version = kwargs["sauce_version"]
+    build = kwargs["sauce_build"]
+    tags = kwargs["sauce_tags"]
+    tunnel_id = kwargs["sauce_tunnel_id"]
+    prerun_script = {
+        "MicrosoftEdge": {
+            "executable": "sauce-storage:edge-prerun.bat",
+            "background": False,
+        },
+        "safari": {
+            "executable": "sauce-storage:safari-prerun.sh",
+            "background": False,
+        }
+    }
+    capabilities = {
+        "browserName": browser_name,
+        "build": build,
+        "disablePopupHandler": True,
+        "name": "%s %s on %s" % (browser_name, version, platform),
+        "platform": platform,
+        "public": "public",
+        "selenium-version": "3.3.1",
+        "tags": tags,
+        "tunnel-identifier": tunnel_id,
+        "version": version,
+        "prerun": prerun_script.get(browser_name)
+    }
+
+    if browser_name == 'MicrosoftEdge':
+        capabilities['selenium-version'] = '2.4.8'
+
+    return capabilities
+
+
+def get_sauce_config(**kwargs):
+    browser_name = kwargs["sauce_browser"]
+    sauce_user = kwargs["sauce_user"]
+    sauce_key = kwargs["sauce_key"]
+
+    hub_url = "%s:%s@localhost:4445" % (sauce_user, sauce_key)
+    data = {
+        "url": "http://%s/wd/hub" % hub_url,
+        "browserName": browser_name,
+        "capabilities": get_capabilities(**kwargs)
+    }
+
+    return data
+
+
+def check_args(**kwargs):
+    require_arg(kwargs, "sauce_browser")
+    require_arg(kwargs, "sauce_platform")
+    require_arg(kwargs, "sauce_version")
+    require_arg(kwargs, "sauce_user")
+    require_arg(kwargs, "sauce_key")
+
+
+def browser_kwargs(test_type, run_info_data, **kwargs):
+    sauce_config = get_sauce_config(**kwargs)
+
+    return {"sauce_config": sauce_config}
+
+
+def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
+                    **kwargs):
+    executor_kwargs = base_executor_kwargs(test_type, server_config,
+                                           cache_manager, **kwargs)
+
+    executor_kwargs["capabilities"] = get_capabilities(**kwargs)
+
+    return executor_kwargs
+
+
+def env_extras(**kwargs):
+    return [SauceConnect(**kwargs)]
+
+
+def env_options():
+    return {"host": "web-platform.test",
+            "bind_hostname": "true",
+            "supports_debugger": False}
+
+
+def get_tar(url, dest):
+    resp = requests.get(url, stream=True)
+    resp.raise_for_status()
+    with tarfile.open(fileobj=CStringIO(resp.raw.read())) as f:
+        f.extractall(path=dest)
+
+
+class SauceConnect():
+
+    def __init__(self, **kwargs):
+        self.sauce_user = kwargs["sauce_user"]
+        self.sauce_key = kwargs["sauce_key"]
+        self.sauce_tunnel_id = kwargs["sauce_tunnel_id"]
+        self.sauce_connect_binary = kwargs.get("sauce_connect_binary")
+        self.sc_process = None
+        self.temp_dir = None
+
+    def __enter__(self, options):
+        if not self.sauce_connect_binary:
+            self.temp_dir = tempfile.mkdtemp()
+            get_tar("https://saucelabs.com/downloads/sc-latest-linux.tar.gz", self.temp_dir)
+            self.sauce_connect_binary = glob.glob(os.path.join(self.temp_dir, "sc-*-linux/bin/sc"))[0]
+
+        self.upload_prerun_exec('edge-prerun.bat')
+        self.upload_prerun_exec('safari-prerun.sh')
+
+        self.sc_process = subprocess.Popen([
+            self.sauce_connect_binary,
+            "--user=%s" % self.sauce_user,
+            "--api-key=%s" % self.sauce_key,
+            "--no-remove-colliding-tunnels",
+            "--tunnel-identifier=%s" % self.sauce_tunnel_id,
+            "--readyfile=./sauce_is_ready",
+            "--tunnel-domains",
+            "web-platform.test",
+            "*.web-platform.test"
+        ])
+        while not os.path.exists('./sauce_is_ready') and not self.sc_process.poll():
+            time.sleep(5)
+
+        if self.sc_process.returncode is not None and self.sc_process.returncode > 0:
+            raise SauceException("Unable to start Sauce Connect Proxy. Process exited with code %s", self.sc_process.returncode)
+
+    def __exit__(self, *args):
+        self.sc_process.terminate()
+        if os.path.exists(self.temp_dir):
+            try:
+                shutil.rmtree(self.temp_dir)
+            except OSError:
+                pass
+
+    def upload_prerun_exec(self, file_name):
+        auth = (self.sauce_user, self.sauce_key)
+        url = "https://saucelabs.com/rest/v1/storage/%s/%s?overwrite=true" % (self.sauce_user, file_name)
+
+        with open(os.path.join(here, 'sauce_setup', file_name), 'rb') as f:
+            requests.post(url, data=f, auth=auth)
+
+
+class SauceException(Exception):
+    pass
+
+
+class SauceBrowser(Browser):
+    init_timeout = 300
+
+    def __init__(self, logger, sauce_config):
+        Browser.__init__(self, logger)
+        self.sauce_config = sauce_config
+
+    def start(self):
+        pass
+
+    def stop(self, force=False):
+        pass
+
+    def pid(self):
+        return None
+
+    def is_alive(self):
+        # TODO: Should this check something about the connection?
+        return True
+
+    def cleanup(self):
+        pass
+
+    def executor_browser(self):
+        return ExecutorBrowser, {"webdriver_url": self.sauce_config["url"]}
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/browsers/sauce_setup/edge-prerun.bat b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/browsers/sauce_setup/edge-prerun.bat
new file mode 100644
index 0000000..4554894
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/browsers/sauce_setup/edge-prerun.bat
@@ -0,0 +1,2 @@
+@echo off
+reg add "HKCU\Software\Classes\Local Settings\Software\Microsoft\Windows\CurrentVersion\AppContainer\Storage\microsoft.microsoftedge_8wekyb3d8bbwe\MicrosoftEdge\New Windows" /v "PopupMgr" /t REG_SZ /d no
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/browsers/sauce_setup/safari-prerun.sh b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/browsers/sauce_setup/safari-prerun.sh
new file mode 100644
index 0000000..85c72e6
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/browsers/sauce_setup/safari-prerun.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+defaults write com.apple.Safari com.apple.Safari.ContentPageGroupIdentifier.WebKit2JavaScriptCanOpenWindowsAutomatically -bool true
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/browsers/server-locations.txt b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/browsers/server-locations.txt
new file mode 100644
index 0000000..5dcaf4b
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/browsers/server-locations.txt
@@ -0,0 +1,31 @@
+http://localhost:8000    primary
+
+http://web-platform.test:8000
+http://www.web-platform.test:8000
+http://www1.web-platform.test:8000
+http://www2.web-platform.test:8000
+http://xn--n8j6ds53lwwkrqhv28a.web-platform.test:8000
+http://xn--lve-6lad.web-platform.test:8000
+
+http://web-platform.test:8001
+http://www.web-platform.test:8001
+http://www1.web-platform.test:8001
+http://www2.web-platform.test:8001
+http://xn--n8j6ds53lwwkrqhv28a.web-platform.test:8001
+http://xn--lve-6lad.web-platform.test:8001
+
+https://web-platform.test:8443
+https://www.web-platform.test:8443
+https://www1.web-platform.test:8443
+https://www2.web-platform.test:8443
+https://xn--n8j6ds53lwwkrqhv28a.web-platform.test:8443
+https://xn--lve-6lad.web-platform.test:8443
+
+# These are actually ws servers, but until mozprofile is
+# fixed we have to pretend that they are http servers
+http://web-platform.test:8888
+http://www.web-platform.test:8888
+http://www1.web-platform.test:8888
+http://www2.web-platform.test:8888
+http://xn--n8j6ds53lwwkrqhv28a.web-platform.test:8888
+http://xn--lve-6lad.web-platform.test:8888
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/browsers/servo.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/browsers/servo.py
new file mode 100644
index 0000000..89d69ab
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/browsers/servo.py
@@ -0,0 +1,81 @@
+import os
+
+from .base import NullBrowser, ExecutorBrowser, require_arg
+from ..executors import executor_kwargs as base_executor_kwargs
+from ..executors.executorservo import ServoTestharnessExecutor, ServoRefTestExecutor, ServoWdspecExecutor
+
+here = os.path.join(os.path.split(__file__)[0])
+
+__wptrunner__ = {
+    "product": "servo",
+    "check_args": "check_args",
+    "browser": "ServoBrowser",
+    "executor": {
+        "testharness": "ServoTestharnessExecutor",
+        "reftest": "ServoRefTestExecutor",
+        "wdspec": "ServoWdspecExecutor",
+    },
+    "browser_kwargs": "browser_kwargs",
+    "executor_kwargs": "executor_kwargs",
+    "env_extras": "env_extras",
+    "env_options": "env_options",
+    "update_properties": "update_properties",
+}
+
+
+def check_args(**kwargs):
+    require_arg(kwargs, "binary")
+
+
+def browser_kwargs(test_type, run_info_data, **kwargs):
+    return {
+        "binary": kwargs["binary"],
+        "debug_info": kwargs["debug_info"],
+        "binary_args": kwargs["binary_args"],
+        "user_stylesheets": kwargs.get("user_stylesheets"),
+        "ca_certificate_path": kwargs["ssl_env"].ca_cert_path(),
+    }
+
+
+def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
+                    **kwargs):
+    rv = base_executor_kwargs(test_type, server_config,
+                              cache_manager, **kwargs)
+    rv["pause_after_test"] = kwargs["pause_after_test"]
+    return rv
+
+
+def env_extras(**kwargs):
+    return []
+
+
+def env_options():
+    return {"host": "127.0.0.1",
+            "external_host": "web-platform.test",
+            "bind_hostname": "true",
+            "testharnessreport": "testharnessreport-servo.js",
+            "supports_debugger": True}
+
+
+def update_properties():
+    return ["debug", "os", "version", "processor", "bits"], None
+
+
+class ServoBrowser(NullBrowser):
+    def __init__(self, logger, binary, debug_info=None, binary_args=None,
+                 user_stylesheets=None, ca_certificate_path=None):
+        NullBrowser.__init__(self, logger)
+        self.binary = binary
+        self.debug_info = debug_info
+        self.binary_args = binary_args or []
+        self.user_stylesheets = user_stylesheets or []
+        self.ca_certificate_path = ca_certificate_path
+
+    def executor_browser(self):
+        return ExecutorBrowser, {
+            "binary": self.binary,
+            "debug_info": self.debug_info,
+            "binary_args": self.binary_args,
+            "user_stylesheets": self.user_stylesheets,
+            "ca_certificate_path": self.ca_certificate_path,
+        }
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/browsers/servodriver.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/browsers/servodriver.py
new file mode 100644
index 0000000..c251de8
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/browsers/servodriver.py
@@ -0,0 +1,165 @@
+import os
+import subprocess
+import tempfile
+
+from mozprocess import ProcessHandler
+
+from .base import Browser, require_arg, get_free_port, browser_command, ExecutorBrowser
+from ..executors import executor_kwargs as base_executor_kwargs
+from ..executors.executorservodriver import (ServoWebDriverTestharnessExecutor,
+                                             ServoWebDriverRefTestExecutor)
+
+here = os.path.join(os.path.split(__file__)[0])
+
+__wptrunner__ = {
+    "product": "servodriver",
+    "check_args": "check_args",
+    "browser": "ServoWebDriverBrowser",
+    "executor": {
+        "testharness": "ServoWebDriverTestharnessExecutor",
+        "reftest": "ServoWebDriverRefTestExecutor",
+    },
+    "browser_kwargs": "browser_kwargs",
+    "executor_kwargs": "executor_kwargs",
+    "env_extras": "env_extras",
+    "env_options": "env_options",
+    "update_properties": "update_properties",
+}
+
+hosts_text = """127.0.0.1 web-platform.test
+127.0.0.1 www.web-platform.test
+127.0.0.1 www1.web-platform.test
+127.0.0.1 www2.web-platform.test
+127.0.0.1 xn--n8j6ds53lwwkrqhv28a.web-platform.test
+127.0.0.1 xn--lve-6lad.web-platform.test
+"""
+
+
+def check_args(**kwargs):
+    require_arg(kwargs, "binary")
+
+
+def browser_kwargs(test_type, run_info_data, **kwargs):
+    return {
+        "binary": kwargs["binary"],
+        "debug_info": kwargs["debug_info"],
+        "user_stylesheets": kwargs.get("user_stylesheets"),
+    }
+
+
+def executor_kwargs(test_type, server_config, cache_manager, run_info_data, **kwargs):
+    rv = base_executor_kwargs(test_type, server_config,
+                              cache_manager, **kwargs)
+    return rv
+
+
+def env_extras(**kwargs):
+    return []
+
+
+def env_options():
+    return {"host": "127.0.0.1",
+            "external_host": "web-platform.test",
+            "bind_hostname": "true",
+            "testharnessreport": "testharnessreport-servodriver.js",
+            "supports_debugger": True}
+
+
+def update_properties():
+    return ["debug", "os", "version", "processor", "bits"], None
+
+
+def make_hosts_file():
+    hosts_fd, hosts_path = tempfile.mkstemp()
+    with os.fdopen(hosts_fd, "w") as f:
+        f.write(hosts_text)
+    return hosts_path
+
+
+class ServoWebDriverBrowser(Browser):
+    used_ports = set()
+
+    def __init__(self, logger, binary, debug_info=None, webdriver_host="127.0.0.1",
+                 user_stylesheets=None):
+        Browser.__init__(self, logger)
+        self.binary = binary
+        self.webdriver_host = webdriver_host
+        self.webdriver_port = None
+        self.proc = None
+        self.debug_info = debug_info
+        self.hosts_path = make_hosts_file()
+        self.command = None
+        self.user_stylesheets = user_stylesheets if user_stylesheets else []
+
+    def start(self, **kwargs):
+        self.webdriver_port = get_free_port(4444, exclude=self.used_ports)
+        self.used_ports.add(self.webdriver_port)
+
+        env = os.environ.copy()
+        env["HOST_FILE"] = self.hosts_path
+        env["RUST_BACKTRACE"] = "1"
+
+        debug_args, command = browser_command(
+            self.binary,
+            [
+                "--hard-fail",
+                "--webdriver", str(self.webdriver_port),
+                "about:blank",
+            ],
+            self.debug_info
+        )
+
+        for stylesheet in self.user_stylesheets:
+            command += ["--user-stylesheet", stylesheet]
+
+        self.command = command
+
+        self.command = debug_args + self.command
+
+        if not self.debug_info or not self.debug_info.interactive:
+            self.proc = ProcessHandler(self.command,
+                                       processOutputLine=[self.on_output],
+                                       env=env,
+                                       storeOutput=False)
+            self.proc.run()
+        else:
+            self.proc = subprocess.Popen(self.command, env=env)
+
+        self.logger.debug("Servo Started")
+
+    def stop(self, force=False):
+        self.logger.debug("Stopping browser")
+        if self.proc is not None:
+            try:
+                self.proc.kill()
+            except OSError:
+                # This can happen on Windows if the process is already dead
+                pass
+
+    def pid(self):
+        if self.proc is None:
+            return None
+
+        try:
+            return self.proc.pid
+        except AttributeError:
+            return None
+
+    def on_output(self, line):
+        """Write a line of output from the process to the log"""
+        self.logger.process_output(self.pid(),
+                                   line.decode("utf8", "replace"),
+                                   command=" ".join(self.command))
+
+    def is_alive(self):
+        if self.runner:
+            return self.runner.is_running()
+        return False
+
+    def cleanup(self):
+        self.stop()
+
+    def executor_browser(self):
+        assert self.webdriver_port is not None
+        return ExecutorBrowser, {"webdriver_host": self.webdriver_host,
+                                 "webdriver_port": self.webdriver_port}
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/config.json b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/config.json
new file mode 100644
index 0000000..d146424
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/config.json
@@ -0,0 +1,7 @@
+{"host": "%(host)s",
+ "ports":{"http":[8000, 8001],
+          "https":[8443],
+          "ws":[8888]},
+ "check_subdomains":false,
+ "bind_hostname":%(bind_hostname)s,
+ "ssl":{}}
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/config.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/config.py
new file mode 100644
index 0000000..5bd3f46
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/config.py
@@ -0,0 +1,60 @@
+import ConfigParser
+import os
+import sys
+from collections import OrderedDict
+
+here = os.path.split(__file__)[0]
+
+class ConfigDict(dict):
+    def __init__(self, base_path, *args, **kwargs):
+        self.base_path = base_path
+        dict.__init__(self, *args, **kwargs)
+
+    def get_path(self, key, default=None):
+        if key not in self:
+            return default
+        path = self[key]
+        os.path.expanduser(path)
+        return os.path.abspath(os.path.join(self.base_path, path))
+
+def read(config_path):
+    config_path = os.path.abspath(config_path)
+    config_root = os.path.split(config_path)[0]
+    parser = ConfigParser.SafeConfigParser()
+    success = parser.read(config_path)
+    assert config_path in success, success
+
+    subns = {"pwd": os.path.abspath(os.path.curdir)}
+
+    rv = OrderedDict()
+    for section in parser.sections():
+        rv[section] = ConfigDict(config_root)
+        for key in parser.options(section):
+            rv[section][key] = parser.get(section, key, False, subns)
+
+    return rv
+
+def path(argv=None):
+    if argv is None:
+        argv = []
+    path = None
+
+    for i, arg in enumerate(argv):
+        if arg == "--config":
+            if i + 1 < len(argv):
+                path = argv[i + 1]
+        elif arg.startswith("--config="):
+            path = arg.split("=", 1)[1]
+        if path is not None:
+            break
+
+    if path is None:
+        if os.path.exists("wptrunner.ini"):
+            path = os.path.abspath("wptrunner.ini")
+        else:
+            path = os.path.join(here, "..", "wptrunner.default.ini")
+
+    return os.path.abspath(path)
+
+def load():
+    return read(path(sys.argv))
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/environment.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/environment.py
new file mode 100644
index 0000000..167da95
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/environment.py
@@ -0,0 +1,225 @@
+import json
+import os
+import multiprocessing
+import signal
+import socket
+import sys
+import time
+
+from mozlog import get_default_logger, handlers, proxy
+
+from wptlogging import LogLevelRewriter
+
+here = os.path.split(__file__)[0]
+
+serve = None
+sslutils = None
+
+
+hostnames = ["web-platform.test",
+             "www.web-platform.test",
+             "www1.web-platform.test",
+             "www2.web-platform.test",
+             "xn--n8j6ds53lwwkrqhv28a.web-platform.test",
+             "xn--lve-6lad.web-platform.test"]
+
+
+def do_delayed_imports(logger, test_paths):
+    global serve, sslutils
+
+    serve_root = serve_path(test_paths)
+    sys.path.insert(0, serve_root)
+
+    failed = []
+
+    try:
+        from tools.serve import serve
+    except ImportError:
+        from wpt_tools.serve import serve
+    except ImportError:
+        failed.append("serve")
+
+    try:
+        import sslutils
+    except ImportError:
+        failed.append("sslutils")
+
+    if failed:
+        logger.critical(
+            "Failed to import %s. Ensure that tests path %s contains web-platform-tests" %
+            (", ".join(failed), serve_root))
+        sys.exit(1)
+
+
+def serve_path(test_paths):
+    return test_paths["/"]["tests_path"]
+
+
+def get_ssl_kwargs(**kwargs):
+    if kwargs["ssl_type"] == "openssl":
+        args = {"openssl_binary": kwargs["openssl_binary"]}
+    elif kwargs["ssl_type"] == "pregenerated":
+        args = {"host_key_path": kwargs["host_key_path"],
+                "host_cert_path": kwargs["host_cert_path"],
+                 "ca_cert_path": kwargs["ca_cert_path"]}
+    else:
+        args = {}
+    return args
+
+
+def ssl_env(logger, **kwargs):
+    ssl_env_cls = sslutils.environments[kwargs["ssl_type"]]
+    return ssl_env_cls(logger, **get_ssl_kwargs(**kwargs))
+
+
+class TestEnvironmentError(Exception):
+    pass
+
+
+class TestEnvironment(object):
+    def __init__(self, test_paths, ssl_env, pause_after_test, debug_info, options, env_extras):
+        """Context manager that owns the test environment i.e. the http and
+        websockets servers"""
+        self.test_paths = test_paths
+        self.ssl_env = ssl_env
+        self.server = None
+        self.config = None
+        self.external_config = None
+        self.pause_after_test = pause_after_test
+        self.test_server_port = options.pop("test_server_port", True)
+        self.debug_info = debug_info
+        self.options = options if options is not None else {}
+
+        self.cache_manager = multiprocessing.Manager()
+        self.stash = serve.stash.StashServer()
+        self.env_extras = env_extras
+
+
+    def __enter__(self):
+        self.stash.__enter__()
+        self.ssl_env.__enter__()
+        self.cache_manager.__enter__()
+        for cm in self.env_extras:
+            cm.__enter__(self.options)
+        self.setup_server_logging()
+        self.config = self.load_config()
+        serve.set_computed_defaults(self.config)
+        self.external_config, self.servers = serve.start(self.config, self.ssl_env,
+                                                         self.get_routes())
+        if self.options.get("supports_debugger") and self.debug_info and self.debug_info.interactive:
+            self.ignore_interrupts()
+        return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        self.process_interrupts()
+
+        for scheme, servers in self.servers.iteritems():
+            for port, server in servers:
+                server.kill()
+        for cm in self.env_extras:
+            cm.__exit__()
+        self.cache_manager.__exit__(exc_type, exc_val, exc_tb)
+        self.ssl_env.__exit__(exc_type, exc_val, exc_tb)
+        self.stash.__exit__()
+
+    def ignore_interrupts(self):
+        signal.signal(signal.SIGINT, signal.SIG_IGN)
+
+    def process_interrupts(self):
+        signal.signal(signal.SIGINT, signal.SIG_DFL)
+
+    def load_config(self):
+        default_config_path = os.path.join(serve_path(self.test_paths), "config.default.json")
+        local_config_path = os.path.join(here, "config.json")
+
+        with open(default_config_path) as f:
+            default_config = json.load(f)
+
+        with open(local_config_path) as f:
+            data = f.read()
+            local_config = json.loads(data % self.options)
+
+        #TODO: allow non-default configuration for ssl
+
+        local_config["external_host"] = self.options.get("external_host", None)
+        local_config["ssl"]["encrypt_after_connect"] = self.options.get("encrypt_after_connect", False)
+
+        config = serve.merge_json(default_config, local_config)
+        config["doc_root"] = serve_path(self.test_paths)
+
+        if not self.ssl_env.ssl_enabled:
+            config["ports"]["https"] = [None]
+
+        host = self.options.get("certificate_domain", config["host"])
+        hosts = [host]
+        hosts.extend("%s.%s" % (item[0], host) for item in serve.get_subdomains(host).values())
+        key_file, certificate = self.ssl_env.host_cert_path(hosts)
+
+        config["key_file"] = key_file
+        config["certificate"] = certificate
+
+        return config
+
+    def setup_server_logging(self):
+        server_logger = get_default_logger(component="wptserve")
+        assert server_logger is not None
+        log_filter = handlers.LogLevelFilter(lambda x:x, "info")
+        # Downgrade errors to warnings for the server
+        log_filter = LogLevelRewriter(log_filter, ["error"], "warning")
+        server_logger.component_filter = log_filter
+
+        server_logger = proxy.QueuedProxyLogger(server_logger)
+
+        try:
+            #Set as the default logger for wptserve
+            serve.set_logger(server_logger)
+            serve.logger = server_logger
+        except Exception:
+            # This happens if logging has already been set up for wptserve
+            pass
+
+    def get_routes(self):
+        route_builder = serve.RoutesBuilder()
+
+        for path, format_args, content_type, route in [
+                ("testharness_runner.html", {}, "text/html", "/testharness_runner.html"),
+                (self.options.get("testharnessreport", "testharnessreport.js"),
+                 {"output": self.pause_after_test}, "text/javascript",
+                 "/resources/testharnessreport.js")]:
+            path = os.path.normpath(os.path.join(here, path))
+            route_builder.add_static(path, format_args, content_type, route)
+
+        for url_base, paths in self.test_paths.iteritems():
+            if url_base == "/":
+                continue
+            route_builder.add_mount_point(url_base, paths["tests_path"])
+
+        if "/" not in self.test_paths:
+            del route_builder.mountpoint_routes["/"]
+
+        return route_builder.get_routes()
+
+    def ensure_started(self):
+        # Pause for a while to ensure that the server has a chance to start
+        for _ in xrange(20):
+            failed = self.test_servers()
+            if not failed:
+                return
+            time.sleep(0.5)
+        raise EnvironmentError("Servers failed to start (scheme:port): %s" % ("%s:%s" for item in failed))
+
+    def test_servers(self):
+        failed = []
+        for scheme, servers in self.servers.iteritems():
+            for port, server in servers:
+                if self.test_server_port:
+                    s = socket.socket()
+                    try:
+                        s.connect((self.config["host"], port))
+                    except socket.error:
+                        failed.append((scheme, port))
+                    finally:
+                        s.close()
+
+                if not server.is_alive():
+                    failed.append((scheme, port))
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/executors/__init__.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/executors/__init__.py
new file mode 100644
index 0000000..24761b8
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/executors/__init__.py
@@ -0,0 +1,4 @@
+from base import (executor_kwargs,
+                  testharness_result_converter,
+                  reftest_result_converter,
+                  TestExecutor)
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/executors/base.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/executors/base.py
new file mode 100644
index 0000000..c92493f
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/executors/base.py
@@ -0,0 +1,331 @@
+import hashlib
+import json
+import os
+import traceback
+import urlparse
+from abc import ABCMeta, abstractmethod
+
+from ..testrunner import Stop
+
+here = os.path.split(__file__)[0]
+
+
+def executor_kwargs(test_type, server_config, cache_manager, **kwargs):
+    timeout_multiplier = kwargs["timeout_multiplier"]
+    if timeout_multiplier is None:
+        timeout_multiplier = 1
+
+    executor_kwargs = {"server_config": server_config,
+                       "timeout_multiplier": timeout_multiplier,
+                       "debug_info": kwargs["debug_info"]}
+
+    if test_type == "reftest":
+        executor_kwargs["screenshot_cache"] = cache_manager.dict()
+
+    return executor_kwargs
+
+
+def strip_server(url):
+    """Remove the scheme and netloc from a url, leaving only the path and any query
+    or fragment.
+
+    url - the url to strip
+
+    e.g. http://example.org:8000/tests?id=1#2 becomes /tests?id=1#2"""
+
+    url_parts = list(urlparse.urlsplit(url))
+    url_parts[0] = ""
+    url_parts[1] = ""
+    return urlparse.urlunsplit(url_parts)
+
+
+class TestharnessResultConverter(object):
+    harness_codes = {0: "OK",
+                     1: "ERROR",
+                     2: "TIMEOUT"}
+
+    test_codes = {0: "PASS",
+                  1: "FAIL",
+                  2: "TIMEOUT",
+                  3: "NOTRUN"}
+
+    def __call__(self, test, result):
+        """Convert a JSON result into a (TestResult, [SubtestResult]) tuple"""
+        result_url, status, message, stack, subtest_results = result
+        assert result_url == test.url, ("Got results from %s, expected %s" %
+                                      (result_url, test.url))
+        harness_result = test.result_cls(self.harness_codes[status], message)
+        return (harness_result,
+                [test.subtest_result_cls(name, self.test_codes[status], message, stack)
+                 for name, status, message, stack in subtest_results])
+
+
+testharness_result_converter = TestharnessResultConverter()
+
+
+def reftest_result_converter(self, test, result):
+    return (test.result_cls(result["status"], result["message"],
+                            extra=result.get("extra")), [])
+
+
+def pytest_result_converter(self, test, data):
+    harness_data, subtest_data = data
+
+    if subtest_data is None:
+        subtest_data = []
+
+    harness_result = test.result_cls(*harness_data)
+    subtest_results = [test.subtest_result_cls(*item) for item in subtest_data]
+
+    return (harness_result, subtest_results)
+
+
+class ExecutorException(Exception):
+    def __init__(self, status, message):
+        self.status = status
+        self.message = message
+
+
+class TestExecutor(object):
+    __metaclass__ = ABCMeta
+
+    test_type = None
+    convert_result = None
+
+    def __init__(self, browser, server_config, timeout_multiplier=1,
+                 debug_info=None, **kwargs):
+        """Abstract Base class for object that actually executes the tests in a
+        specific browser. Typically there will be a different TestExecutor
+        subclass for each test type and method of executing tests.
+
+        :param browser: ExecutorBrowser instance providing properties of the
+                        browser that will be tested.
+        :param server_config: Dictionary of wptserve server configuration of the
+                              form stored in TestEnvironment.external_config
+        :param timeout_multiplier: Multiplier relative to base timeout to use
+                                   when setting test timeout.
+        """
+        self.runner = None
+        self.browser = browser
+        self.server_config = server_config
+        self.timeout_multiplier = timeout_multiplier
+        self.debug_info = debug_info
+        self.last_environment = {"protocol": "http",
+                                 "prefs": {}}
+        self.protocol = None # This must be set in subclasses
+
+    @property
+    def logger(self):
+        """StructuredLogger for this executor"""
+        if self.runner is not None:
+            return self.runner.logger
+
+    def setup(self, runner):
+        """Run steps needed before tests can be started e.g. connecting to
+        browser instance
+
+        :param runner: TestRunner instance that is going to run the tests"""
+        self.runner = runner
+        if self.protocol is not None:
+            self.protocol.setup(runner)
+
+    def teardown(self):
+        """Run cleanup steps after tests have finished"""
+        if self.protocol is not None:
+            self.protocol.teardown()
+
+    def run_test(self, test):
+        """Run a particular test.
+
+        :param test: The test to run"""
+        if test.environment != self.last_environment:
+            self.on_environment_change(test.environment)
+
+        try:
+            result = self.do_test(test)
+        except Exception as e:
+            result = self.result_from_exception(test, e)
+
+        if result is Stop:
+            return result
+
+        # log result of parent test
+        if result[0].status == "ERROR":
+            self.logger.debug(result[0].message)
+
+        self.last_environment = test.environment
+
+        self.runner.send_message("test_ended", test, result)
+
+    def server_url(self, protocol):
+        return "%s://%s:%s" % (protocol,
+                               self.server_config["host"],
+                               self.server_config["ports"][protocol][0])
+
+    def test_url(self, test):
+        return urlparse.urljoin(self.server_url(test.environment["protocol"]), test.url)
+
+    @abstractmethod
+    def do_test(self, test):
+        """Test-type and protocol specific implementation of running a
+        specific test.
+
+        :param test: The test to run."""
+        pass
+
+    def on_environment_change(self, new_environment):
+        pass
+
+    def result_from_exception(self, test, e):
+        if hasattr(e, "status") and e.status in test.result_cls.statuses:
+            status = e.status
+        else:
+            status = "ERROR"
+        message = unicode(getattr(e, "message", ""))
+        if message:
+            message += "\n"
+        message += traceback.format_exc(e)
+        return test.result_cls(status, message), []
+
+
+class TestharnessExecutor(TestExecutor):
+    convert_result = testharness_result_converter
+
+
+class RefTestExecutor(TestExecutor):
+    convert_result = reftest_result_converter
+
+    def __init__(self, browser, server_config, timeout_multiplier=1, screenshot_cache=None,
+                 debug_info=None, **kwargs):
+        TestExecutor.__init__(self, browser, server_config,
+                              timeout_multiplier=timeout_multiplier,
+                              debug_info=debug_info)
+
+        self.screenshot_cache = screenshot_cache
+
+
+class RefTestImplementation(object):
+    def __init__(self, executor):
+        self.timeout_multiplier = executor.timeout_multiplier
+        self.executor = executor
+        # Cache of url:(screenshot hash, screenshot). Typically the
+        # screenshot is None, but we set this value if a test fails
+        # and the screenshot was taken from the cache so that we may
+        # retrieve the screenshot from the cache directly in the future
+        self.screenshot_cache = self.executor.screenshot_cache
+        self.message = None
+
+    def setup(self):
+        pass
+
+    def teardown(self):
+        pass
+
+    @property
+    def logger(self):
+        return self.executor.logger
+
+    def get_hash(self, test, viewport_size, dpi):
+        timeout = test.timeout * self.timeout_multiplier
+        key = (test.url, viewport_size, dpi)
+
+        if key not in self.screenshot_cache:
+            success, data = self.executor.screenshot(test, viewport_size, dpi)
+
+            if not success:
+                return False, data
+
+            screenshot = data
+            hash_value = hashlib.sha1(screenshot).hexdigest()
+
+            self.screenshot_cache[key] = (hash_value, None)
+
+            rv = (hash_value, screenshot)
+        else:
+            rv = self.screenshot_cache[key]
+
+        self.message.append("%s %s" % (test.url, rv[0]))
+        return True, rv
+
+    def is_pass(self, lhs_hash, rhs_hash, relation):
+        assert relation in ("==", "!=")
+        self.message.append("Testing %s %s %s" % (lhs_hash, relation, rhs_hash))
+        return ((relation == "==" and lhs_hash == rhs_hash) or
+                (relation == "!=" and lhs_hash != rhs_hash))
+
+    def run_test(self, test):
+        viewport_size = test.viewport_size
+        dpi = test.dpi
+        self.message = []
+
+        # Depth-first search of reference tree, with the goal
+        # of reachings a leaf node with only pass results
+
+        stack = list(((test, item[0]), item[1]) for item in reversed(test.references))
+        while stack:
+            hashes = [None, None]
+            screenshots = [None, None]
+
+            nodes, relation = stack.pop()
+
+            for i, node in enumerate(nodes):
+                success, data = self.get_hash(node, viewport_size, dpi)
+                if success is False:
+                    return {"status": data[0], "message": data[1]}
+
+                hashes[i], screenshots[i] = data
+
+            if self.is_pass(hashes[0], hashes[1], relation):
+                if nodes[1].references:
+                    stack.extend(list(((nodes[1], item[0]), item[1]) for item in reversed(nodes[1].references)))
+                else:
+                    # We passed
+                    return {"status":"PASS", "message": None}
+
+        # We failed, so construct a failure message
+
+        for i, (node, screenshot) in enumerate(zip(nodes, screenshots)):
+            if screenshot is None:
+                success, screenshot = self.retake_screenshot(node, viewport_size, dpi)
+                if success:
+                    screenshots[i] = screenshot
+
+        log_data = [{"url": nodes[0].url, "screenshot": screenshots[0]}, relation,
+                    {"url": nodes[1].url, "screenshot": screenshots[1]}]
+
+        return {"status": "FAIL",
+                "message": "\n".join(self.message),
+                "extra": {"reftest_screenshots": log_data}}
+
+    def retake_screenshot(self, node, viewport_size, dpi):
+        success, data = self.executor.screenshot(node, viewport_size, dpi)
+        if not success:
+            return False, data
+
+        key = (node.url, viewport_size, dpi)
+        hash_val, _ = self.screenshot_cache[key]
+        self.screenshot_cache[key] = hash_val, data
+        return True, data
+
+
+class WdspecExecutor(TestExecutor):
+    convert_result = pytest_result_converter
+
+
+class Protocol(object):
+    def __init__(self, executor, browser):
+        self.executor = executor
+        self.browser = browser
+
+    @property
+    def logger(self):
+        return self.executor.logger
+
+    def setup(self, runner):
+        pass
+
+    def teardown(self):
+        pass
+
+    def wait(self):
+        pass
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/executors/executormarionette.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/executors/executormarionette.py
new file mode 100644
index 0000000..1c588d3
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/executors/executormarionette.py
@@ -0,0 +1,691 @@
+import hashlib
+import httplib
+import os
+import socket
+import threading
+import time
+import traceback
+import urlparse
+import uuid
+from collections import defaultdict
+
+from ..wpttest import WdspecResult, WdspecSubtestResult
+
+errors = None
+marionette = None
+pytestrunner = None
+
+here = os.path.join(os.path.split(__file__)[0])
+
+from .base import (ExecutorException,
+                   Protocol,
+                   RefTestExecutor,
+                   RefTestImplementation,
+                   TestExecutor,
+                   TestharnessExecutor,
+                   testharness_result_converter,
+                   reftest_result_converter,
+                   strip_server,
+                   WdspecExecutor)
+from ..testrunner import Stop
+from ..webdriver_server import GeckoDriverServer
+
+# Extra timeout to use after internal test timeout at which the harness
+# should force a timeout
+extra_timeout = 5 # seconds
+
+
+def do_delayed_imports():
+    global errors, marionette
+
+    # Marionette client used to be called marionette, recently it changed
+    # to marionette_driver for unfathomable reasons
+    try:
+        import marionette
+        from marionette import errors
+    except ImportError:
+        from marionette_driver import marionette, errors
+
+
+class MarionetteProtocol(Protocol):
+    def __init__(self, executor, browser, timeout_multiplier=1):
+        do_delayed_imports()
+
+        Protocol.__init__(self, executor, browser)
+        self.marionette = None
+        self.marionette_port = browser.marionette_port
+        self.timeout_multiplier = timeout_multiplier
+        self.timeout = None
+        self.runner_handle = None
+
+    def setup(self, runner):
+        """Connect to browser via Marionette."""
+        Protocol.setup(self, runner)
+
+        self.logger.debug("Connecting to Marionette on port %i" % self.marionette_port)
+        startup_timeout = marionette.Marionette.DEFAULT_STARTUP_TIMEOUT * self.timeout_multiplier
+        self.marionette = marionette.Marionette(host='localhost',
+                                                port=self.marionette_port,
+                                                socket_timeout=None,
+                                                startup_timeout=startup_timeout)
+
+        # XXX Move this timeout somewhere
+        self.logger.debug("Waiting for Marionette connection")
+        while True:
+            success = self.marionette.wait_for_port(60 * self.timeout_multiplier)
+            #When running in a debugger wait indefinitely for firefox to start
+            if success or self.executor.debug_info is None:
+                break
+
+        session_started = False
+        if success:
+            try:
+                self.logger.debug("Starting Marionette session")
+                self.marionette.start_session()
+            except Exception as e:
+                self.logger.warning("Starting marionette session failed: %s" % e)
+            else:
+                self.logger.debug("Marionette session started")
+                session_started = True
+
+        if not success or not session_started:
+            self.logger.warning("Failed to connect to Marionette")
+            self.executor.runner.send_message("init_failed")
+        else:
+            try:
+                self.after_connect()
+            except Exception:
+                self.logger.warning("Post-connection steps failed")
+                self.logger.error(traceback.format_exc())
+                self.executor.runner.send_message("init_failed")
+            else:
+                self.executor.runner.send_message("init_succeeded")
+
+    def teardown(self):
+        try:
+            self.marionette._request_in_app_shutdown()
+            self.marionette.delete_session(send_request=False, reset_session_id=True)
+        except Exception:
+            # This is typically because the session never started
+            pass
+        if self.marionette is not None:
+            del self.marionette
+
+    @property
+    def is_alive(self):
+        """Check if the Marionette connection is still active."""
+        try:
+            self.marionette.current_window_handle
+        except Exception:
+            return False
+        return True
+
+    def after_connect(self):
+        self.load_runner(self.executor.last_environment["protocol"])
+
+    def set_timeout(self, timeout):
+        """Set the Marionette script timeout.
+
+        :param timeout: Script timeout in seconds
+
+        """
+        self.marionette.timeout.script = timeout
+        self.timeout = timeout
+
+    def load_runner(self, protocol):
+        # Check if we previously had a test window open, and if we did make sure it's closed
+        self.marionette.execute_script("if (window.wrappedJSObject.win) {window.wrappedJSObject.win.close()}")
+        url = urlparse.urljoin(self.executor.server_url(protocol), "/testharness_runner.html")
+        self.logger.debug("Loading %s" % url)
+        self.runner_handle = self.marionette.current_window_handle
+        try:
+            self.marionette.navigate(url)
+        except Exception as e:
+            self.logger.critical(
+                "Loading initial page %s failed. Ensure that the "
+                "there are no other programs bound to this port and "
+                "that your firewall rules or network setup does not "
+                "prevent access.\e%s" % (url, traceback.format_exc(e)))
+        self.marionette.execute_script(
+            "document.title = '%s'" % threading.current_thread().name.replace("'", '"'))
+
+    def close_old_windows(self, protocol):
+        handles = self.marionette.window_handles
+        runner_handle = None
+        try:
+            handles.remove(self.runner_handle)
+            runner_handle = self.runner_handle
+        except ValueError:
+            # The runner window probably changed id but we can restore it
+            # This isn't supposed to happen, but marionette ids are not yet stable
+            # We assume that the first handle returned corresponds to the runner,
+            # but it hopefully doesn't matter too much if that assumption is
+            # wrong since we reload the runner in that tab anyway.
+            runner_handle = handles.pop(0)
+
+        for handle in handles:
+            self.marionette.switch_to_window(handle)
+            self.marionette.close()
+
+        self.marionette.switch_to_window(runner_handle)
+        if runner_handle != self.runner_handle:
+            self.load_runner(protocol)
+
+    def wait(self):
+        socket_timeout = self.marionette.client.sock.gettimeout()
+        if socket_timeout:
+            self.marionette.timeout.script = socket_timeout / 2
+
+        self.marionette.switch_to_window(self.runner_handle)
+        while True:
+            try:
+                self.marionette.execute_async_script("")
+            except errors.ScriptTimeoutException:
+                self.logger.debug("Script timed out")
+                pass
+            except (socket.timeout, IOError):
+                self.logger.debug("Socket closed")
+                break
+            except Exception as e:
+                self.logger.error(traceback.format_exc(e))
+                break
+
+    def on_environment_change(self, old_environment, new_environment):
+        #Unset all the old prefs
+        for name in old_environment.get("prefs", {}).iterkeys():
+            value = self.executor.original_pref_values[name]
+            if value is None:
+                self.clear_user_pref(name)
+            else:
+                self.set_pref(name, value)
+
+        for name, value in new_environment.get("prefs", {}).iteritems():
+            self.executor.original_pref_values[name] = self.get_pref(name)
+            self.set_pref(name, value)
+
+    def set_pref(self, name, value):
+        if value.lower() not in ("true", "false"):
+            try:
+                int(value)
+            except ValueError:
+                value = "'%s'" % value
+        else:
+            value = value.lower()
+
+        self.logger.info("Setting pref %s (%s)" % (name, value))
+
+        script = """
+            let prefInterface = Components.classes["@mozilla.org/preferences-service;1"]
+                                          .getService(Components.interfaces.nsIPrefBranch);
+            let pref = '%s';
+            let type = prefInterface.getPrefType(pref);
+            let value = %s;
+            switch(type) {
+                case prefInterface.PREF_STRING:
+                    prefInterface.setCharPref(pref, value);
+                    break;
+                case prefInterface.PREF_BOOL:
+                    prefInterface.setBoolPref(pref, value);
+                    break;
+                case prefInterface.PREF_INT:
+                    prefInterface.setIntPref(pref, value);
+                    break;
+            }
+            """ % (name, value)
+        with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
+            self.marionette.execute_script(script)
+
+    def clear_user_pref(self, name):
+        self.logger.info("Clearing pref %s" % (name))
+        script = """
+            let prefInterface = Components.classes["@mozilla.org/preferences-service;1"]
+                                          .getService(Components.interfaces.nsIPrefBranch);
+            let pref = '%s';
+            prefInterface.clearUserPref(pref);
+            """ % name
+        with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
+            self.marionette.execute_script(script)
+
+    def get_pref(self, name):
+        script = """
+            let prefInterface = Components.classes["@mozilla.org/preferences-service;1"]
+                                          .getService(Components.interfaces.nsIPrefBranch);
+            let pref = '%s';
+            let type = prefInterface.getPrefType(pref);
+            switch(type) {
+                case prefInterface.PREF_STRING:
+                    return prefInterface.getCharPref(pref);
+                case prefInterface.PREF_BOOL:
+                    return prefInterface.getBoolPref(pref);
+                case prefInterface.PREF_INT:
+                    return prefInterface.getIntPref(pref);
+                case prefInterface.PREF_INVALID:
+                    return null;
+            }
+            """ % name
+        with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
+            self.marionette.execute_script(script)
+
+    def clear_origin(self, url):
+        self.logger.info("Clearing origin %s" % (url))
+        script = """
+            let url = '%s';
+            let uri = Components.classes["@mozilla.org/network/io-service;1"]
+                                .getService(Ci.nsIIOService)
+                                .newURI(url);
+            let ssm = Components.classes["@mozilla.org/scriptsecuritymanager;1"]
+                                .getService(Ci.nsIScriptSecurityManager);
+            let principal = ssm.createCodebasePrincipal(uri, {});
+            let qms = Components.classes["@mozilla.org/dom/quota-manager-service;1"]
+                                .getService(Components.interfaces.nsIQuotaManagerService);
+            qms.clearStoragesForPrincipal(principal, "default", true);
+            """ % url
+        with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
+            self.marionette.execute_script(script)
+
+
+class RemoteMarionetteProtocol(Protocol):
+    def __init__(self, executor, browser):
+        do_delayed_imports()
+        Protocol.__init__(self, executor, browser)
+        self.webdriver_binary = executor.webdriver_binary
+        self.webdriver_args = executor.webdriver_args
+        self.capabilities = self.executor.capabilities
+        self.session_config = None
+        self.server = None
+
+    def setup(self, runner):
+        """Connect to browser via the Marionette HTTP server."""
+        try:
+            self.server = GeckoDriverServer(
+                self.logger,
+                binary=self.webdriver_binary,
+                args=self.webdriver_args)
+            self.server.start(block=False)
+            self.logger.info(
+                "WebDriver HTTP server listening at %s" % self.server.url)
+            self.session_config = {"host": self.server.host,
+                                   "port": self.server.port,
+                                   "capabilities": self.capabilities}
+        except Exception:
+            self.logger.error(traceback.format_exc())
+            self.executor.runner.send_message("init_failed")
+        else:
+            self.executor.runner.send_message("init_succeeded")
+
+    def teardown(self):
+        if self.server is not None and self.server.is_alive:
+            self.server.stop()
+
+    @property
+    def is_alive(self):
+        """Test that the Marionette connection is still alive.
+
+        Because the remote communication happens over HTTP we need to
+        make an explicit request to the remote.  It is allowed for
+        WebDriver spec tests to not have a WebDriver session, since this
+        may be what is tested.
+
+        An HTTP request to an invalid path that results in a 404 is
+        proof enough to us that the server is alive and kicking.
+        """
+        conn = httplib.HTTPConnection(self.server.host, self.server.port)
+        conn.request("HEAD", self.server.base_path + "invalid")
+        res = conn.getresponse()
+        return res.status == 404
+
+
+class ExecuteAsyncScriptRun(object):
+    def __init__(self, logger, func, protocol, url, timeout):
+        self.logger = logger
+        self.result = (None, None)
+        self.protocol = protocol
+        self.marionette = protocol.marionette
+        self.func = func
+        self.url = url
+        self.timeout = timeout
+        self.result_flag = threading.Event()
+
+    def run(self):
+        index = self.url.rfind("/storage/");
+        if index != -1:
+            # Clear storage
+            self.protocol.clear_origin(self.url)
+
+        timeout = self.timeout
+
+        try:
+            if timeout is not None:
+                if timeout + extra_timeout != self.protocol.timeout:
+                    self.protocol.set_timeout(timeout + extra_timeout)
+            else:
+                # We just want it to never time out, really, but marionette doesn't
+                # make that possible. It also seems to time out immediately if the
+                # timeout is set too high. This works at least.
+                self.protocol.set_timeout(2**28 - 1)
+        except IOError:
+            self.logger.error("Lost marionette connection before starting test")
+            return Stop
+
+        executor = threading.Thread(target = self._run)
+        executor.start()
+
+        if timeout is not None:
+            wait_timeout = timeout + 2 * extra_timeout
+        else:
+            wait_timeout = None
+
+        flag = self.result_flag.wait(wait_timeout)
+
+        if self.result == (None, None):
+            self.logger.debug("Timed out waiting for a result")
+            self.result = False, ("EXTERNAL-TIMEOUT", None)
+        elif self.result[1] is None:
+            # We didn't get any data back from the test, so check if the
+            # browser is still responsive
+            if self.protocol.is_alive:
+                self.result = False, ("ERROR", None)
+            else:
+                self.result = False, ("CRASH", None)
+        return self.result
+
+    def _run(self):
+        try:
+            self.result = True, self.func(self.marionette, self.url, self.timeout)
+        except errors.ScriptTimeoutException:
+            self.logger.debug("Got a marionette timeout")
+            self.result = False, ("EXTERNAL-TIMEOUT", None)
+        except (socket.timeout, IOError):
+            # This can happen on a crash
+            # Also, should check after the test if the firefox process is still running
+            # and otherwise ignore any other result and set it to crash
+            self.result = False, ("CRASH", None)
+        except Exception as e:
+            message = getattr(e, "message", "")
+            if message:
+                message += "\n"
+            message += traceback.format_exc(e)
+            self.result = False, ("ERROR", e)
+
+        finally:
+            self.result_flag.set()
+
+
+class MarionetteTestharnessExecutor(TestharnessExecutor):
+    def __init__(self, browser, server_config, timeout_multiplier=1,
+                 close_after_done=True, debug_info=None, **kwargs):
+        """Marionette-based executor for testharness.js tests"""
+        TestharnessExecutor.__init__(self, browser, server_config,
+                                     timeout_multiplier=timeout_multiplier,
+                                     debug_info=debug_info)
+
+        self.protocol = MarionetteProtocol(self, browser, timeout_multiplier)
+        self.script = open(os.path.join(here, "testharness_marionette.js")).read()
+        self.close_after_done = close_after_done
+        self.window_id = str(uuid.uuid4())
+
+        self.original_pref_values = {}
+
+        if marionette is None:
+            do_delayed_imports()
+
+    def is_alive(self):
+        return self.protocol.is_alive
+
+    def on_environment_change(self, new_environment):
+        self.protocol.on_environment_change(self.last_environment, new_environment)
+
+        if new_environment["protocol"] != self.last_environment["protocol"]:
+            self.protocol.load_runner(new_environment["protocol"])
+
+    def do_test(self, test):
+        timeout = (test.timeout * self.timeout_multiplier if self.debug_info is None
+                   else None)
+
+        success, data = ExecuteAsyncScriptRun(self.logger,
+                                              self.do_testharness,
+                                              self.protocol,
+                                              self.test_url(test),
+                                              timeout).run()
+        if success:
+            return self.convert_result(test, data)
+
+        return (test.result_cls(*data), [])
+
+    def do_testharness(self, marionette, url, timeout):
+        if self.close_after_done:
+            marionette.execute_script("if (window.wrappedJSObject.win) {window.wrappedJSObject.win.close()}")
+            self.protocol.close_old_windows(self.protocol)
+
+        if timeout is not None:
+            timeout_ms = str(timeout * 1000)
+        else:
+            timeout_ms = "null"
+
+        script = self.script % {"abs_url": url,
+                                "url": strip_server(url),
+                                "window_id": self.window_id,
+                                "timeout_multiplier": self.timeout_multiplier,
+                                "timeout": timeout_ms,
+                                "explicit_timeout": timeout is None}
+
+        rv = marionette.execute_async_script(script, new_sandbox=False)
+        return rv
+
+
+class MarionetteRefTestExecutor(RefTestExecutor):
+    def __init__(self, browser, server_config, timeout_multiplier=1,
+                 screenshot_cache=None, close_after_done=True,
+                 debug_info=None, reftest_internal=False,
+                 reftest_screenshot="unexpected",
+                 group_metadata=None, **kwargs):
+        """Marionette-based executor for reftests"""
+        RefTestExecutor.__init__(self,
+                                 browser,
+                                 server_config,
+                                 screenshot_cache=screenshot_cache,
+                                 timeout_multiplier=timeout_multiplier,
+                                 debug_info=debug_info)
+        self.protocol = MarionetteProtocol(self, browser)
+        self.implementation = (InternalRefTestImplementation
+                               if reftest_internal
+                               else RefTestImplementation)(self)
+        self.implementation_kwargs = ({"screenshot": reftest_screenshot} if
+                                      reftest_internal else {})
+
+        self.close_after_done = close_after_done
+        self.has_window = False
+        self.original_pref_values = {}
+        self.group_metadata = group_metadata
+
+        with open(os.path.join(here, "reftest.js")) as f:
+            self.script = f.read()
+        with open(os.path.join(here, "reftest-wait_marionette.js")) as f:
+            self.wait_script = f.read()
+
+    def setup(self, runner):
+        super(self.__class__, self).setup(runner)
+        self.implementation.setup(**self.implementation_kwargs)
+
+    def teardown(self):
+        self.implementation.teardown()
+        handle = self.protocol.marionette.window_handles[0]
+        self.protocol.marionette.switch_to_window(handle)
+        super(self.__class__, self).teardown()
+
+    def is_alive(self):
+        return self.protocol.is_alive
+
+    def on_environment_change(self, new_environment):
+        self.protocol.on_environment_change(self.last_environment, new_environment)
+
+    def do_test(self, test):
+        if not isinstance(self.implementation, InternalRefTestImplementation):
+            if self.close_after_done and self.has_window:
+                self.protocol.marionette.close()
+                self.protocol.marionette.switch_to_window(
+                    self.protocol.marionette.window_handles[-1])
+                self.has_window = False
+
+            if not self.has_window:
+                self.protocol.marionette.execute_script(self.script)
+                self.protocol.marionette.switch_to_window(self.protocol.marionette.window_handles[-1])
+                self.has_window = True
+
+        result = self.implementation.run_test(test)
+        return self.convert_result(test, result)
+
+    def screenshot(self, test, viewport_size, dpi):
+        # https://github.com/w3c/wptrunner/issues/166
+        assert viewport_size is None
+        assert dpi is None
+
+        timeout =  self.timeout_multiplier * test.timeout if self.debug_info is None else None
+
+        test_url = self.test_url(test)
+
+        return ExecuteAsyncScriptRun(self.logger,
+                             self._screenshot,
+                             self.protocol,
+                             test_url,
+                             timeout).run()
+
+    def _screenshot(self, marionette, url, timeout):
+        marionette.navigate(url)
+
+        marionette.execute_async_script(self.wait_script)
+
+        screenshot = marionette.screenshot(full=False)
+        # strip off the data:img/png, part of the url
+        if screenshot.startswith("data:image/png;base64,"):
+            screenshot = screenshot.split(",", 1)[1]
+
+        return screenshot
+
+
+class InternalRefTestImplementation(object):
+    def __init__(self, executor):
+        self.timeout_multiplier = executor.timeout_multiplier
+        self.executor = executor
+
+    @property
+    def logger(self):
+        return self.executor.logger
+
+    def setup(self, screenshot="unexpected"):
+        data = {"screenshot": screenshot}
+        if self.executor.group_metadata is not None:
+            data["urlCount"] = {urlparse.urljoin(self.executor.server_url(key[0]), key[1]):value
+                                for key, value in self.executor.group_metadata.get("url_count", {}).iteritems()
+                                if value > 1}
+        self.executor.protocol.marionette.set_context(self.executor.protocol.marionette.CONTEXT_CHROME)
+        self.executor.protocol.marionette._send_message("reftest:setup", data)
+
+    def run_test(self, test):
+        viewport_size = test.viewport_size
+        dpi = test.dpi
+
+        references = self.get_references(test)
+        rv = self.executor.protocol.marionette._send_message("reftest:run",
+                                                             {"test": self.executor.test_url(test),
+                                                              "references": references,
+                                                              "expected": test.expected(),
+                                                              "timeout": test.timeout * 1000})["value"]
+        return rv
+
+    def get_references(self, node):
+        rv = []
+        for item, relation in node.references:
+            rv.append([self.executor.test_url(item), self.get_references(item), relation])
+        return rv
+
+    def teardown(self):
+        try:
+            self.executor.protocol.marionette._send_message("reftest:teardown", {})
+            self.executor.protocol.marionette.set_context(self.executor.protocol.marionette.CONTEXT_CONTENT)
+        except socket.error:
+            pass
+
+class WdspecRun(object):
+    def __init__(self, func, session, path, timeout):
+        self.func = func
+        self.result = (None, None)
+        self.session = session
+        self.path = path
+        self.timeout = timeout
+        self.result_flag = threading.Event()
+
+    def run(self):
+        """Runs function in a thread and interrupts it if it exceeds the
+        given timeout.  Returns (True, (Result, [SubtestResult ...])) in
+        case of success, or (False, (status, extra information)) in the
+        event of failure.
+        """
+
+        executor = threading.Thread(target=self._run)
+        executor.start()
+
+        flag = self.result_flag.wait(self.timeout)
+        if self.result[1] is None:
+            self.result = False, ("EXTERNAL-TIMEOUT", None)
+
+        return self.result
+
+    def _run(self):
+        try:
+            self.result = True, self.func(self.session, self.path, self.timeout)
+        except (socket.timeout, IOError):
+            self.result = False, ("CRASH", None)
+        except Exception as e:
+            message = getattr(e, "message")
+            if message:
+                message += "\n"
+            message += traceback.format_exc(e)
+            self.result = False, ("ERROR", message)
+        finally:
+            self.result_flag.set()
+
+
+class MarionetteWdspecExecutor(WdspecExecutor):
+    def __init__(self, browser, server_config, webdriver_binary,
+                 timeout_multiplier=1, close_after_done=True, debug_info=None,
+                 capabilities=None, webdriver_args=None, binary=None, **kwargs):
+        self.do_delayed_imports()
+        WdspecExecutor.__init__(self, browser, server_config,
+                                timeout_multiplier=timeout_multiplier,
+                                debug_info=debug_info)
+        self.webdriver_binary = webdriver_binary
+        self.webdriver_args = webdriver_args + ["--binary", binary]
+        self.capabilities = capabilities
+        self.protocol = RemoteMarionetteProtocol(self, browser)
+
+    def is_alive(self):
+        return self.protocol.is_alive
+
+    def on_environment_change(self, new_environment):
+        pass
+
+    def do_test(self, test):
+        timeout = test.timeout * self.timeout_multiplier + extra_timeout
+
+        success, data = WdspecRun(self.do_wdspec,
+                                  self.protocol.session_config,
+                                  test.abs_path,
+                                  timeout).run()
+
+        if success:
+            return self.convert_result(test, data)
+
+        return (test.result_cls(*data), [])
+
+    def do_wdspec(self, session_config, path, timeout):
+        harness_result = ("OK", None)
+        subtest_results = pytestrunner.run(path,
+                                           self.server_config,
+                                           session_config,
+                                           timeout=timeout)
+        return (harness_result, subtest_results)
+
+    def do_delayed_imports(self):
+        global pytestrunner
+        from . import pytestrunner
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/executors/executorselenium.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/executors/executorselenium.py
new file mode 100644
index 0000000..ef898f9
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/executors/executorselenium.py
@@ -0,0 +1,261 @@
+import os
+import socket
+import sys
+import threading
+import time
+import traceback
+import urlparse
+import uuid
+
+from .base import (ExecutorException,
+                   Protocol,
+                   RefTestExecutor,
+                   RefTestImplementation,
+                   TestExecutor,
+                   TestharnessExecutor,
+                   testharness_result_converter,
+                   reftest_result_converter,
+                   strip_server)
+from ..testrunner import Stop
+
+here = os.path.join(os.path.split(__file__)[0])
+
+webdriver = None
+exceptions = None
+RemoteConnection = None
+
+extra_timeout = 5
+
+def do_delayed_imports():
+    global webdriver
+    global exceptions
+    global RemoteConnection
+    from selenium import webdriver
+    from selenium.common import exceptions
+    from selenium.webdriver.remote.remote_connection import RemoteConnection
+
+class SeleniumProtocol(Protocol):
+    def __init__(self, executor, browser, capabilities, **kwargs):
+        do_delayed_imports()
+
+        Protocol.__init__(self, executor, browser)
+        self.capabilities = capabilities
+        self.url = browser.webdriver_url
+        self.webdriver = None
+
+    def setup(self, runner):
+        """Connect to browser via Selenium's WebDriver implementation."""
+        self.runner = runner
+        self.logger.debug("Connecting to Selenium on URL: %s" % self.url)
+
+        session_started = False
+        try:
+            self.webdriver = webdriver.Remote(command_executor=RemoteConnection(self.url.strip("/"),
+                                                                                resolve_ip=False),
+                                              desired_capabilities=self.capabilities)
+        except:
+            self.logger.warning(
+                "Connecting to Selenium failed:\n%s" % traceback.format_exc())
+        else:
+            self.logger.debug("Selenium session started")
+            session_started = True
+
+        if not session_started:
+            self.logger.warning("Failed to connect to Selenium")
+            self.executor.runner.send_message("init_failed")
+        else:
+            try:
+                self.after_connect()
+            except:
+                print >> sys.stderr, traceback.format_exc()
+                self.logger.warning(
+                    "Failed to connect to navigate initial page")
+                self.executor.runner.send_message("init_failed")
+            else:
+                self.executor.runner.send_message("init_succeeded")
+
+    def teardown(self):
+        self.logger.debug("Hanging up on Selenium session")
+        try:
+            self.webdriver.quit()
+        except:
+            pass
+        del self.webdriver
+
+    def is_alive(self):
+        try:
+            # Get a simple property over the connection
+            self.webdriver.current_window_handle
+        # TODO what exception?
+        except (socket.timeout, exceptions.ErrorInResponseException):
+            return False
+        return True
+
+    def after_connect(self):
+        self.load_runner("http")
+
+    def load_runner(self, protocol):
+        url = urlparse.urljoin(self.executor.server_url(protocol),
+                               "/testharness_runner.html")
+        self.logger.debug("Loading %s" % url)
+        self.webdriver.get(url)
+        self.webdriver.execute_script("document.title = '%s'" %
+                                      threading.current_thread().name.replace("'", '"'))
+
+    def wait(self):
+        while True:
+            try:
+                self.webdriver.execute_async_script("");
+            except exceptions.TimeoutException:
+                pass
+            except (socket.timeout, exceptions.NoSuchWindowException,
+                    exceptions.ErrorInResponseException, IOError):
+                break
+            except Exception as e:
+                self.logger.error(traceback.format_exc(e))
+                break
+
+
+class SeleniumRun(object):
+    def __init__(self, func, webdriver, url, timeout):
+        self.func = func
+        self.result = None
+        self.webdriver = webdriver
+        self.url = url
+        self.timeout = timeout
+        self.result_flag = threading.Event()
+
+    def run(self):
+        timeout = self.timeout
+
+        try:
+            self.webdriver.set_script_timeout((timeout + extra_timeout) * 1000)
+        except exceptions.ErrorInResponseException:
+            self.logger.error("Lost WebDriver connection")
+            return Stop
+
+        executor = threading.Thread(target=self._run)
+        executor.start()
+
+        flag = self.result_flag.wait(timeout + 2 * extra_timeout)
+        if self.result is None:
+            assert not flag
+            self.result = False, ("EXTERNAL-TIMEOUT", None)
+
+        return self.result
+
+    def _run(self):
+        try:
+            self.result = True, self.func(self.webdriver, self.url, self.timeout)
+        except exceptions.TimeoutException:
+            self.result = False, ("EXTERNAL-TIMEOUT", None)
+        except (socket.timeout, exceptions.ErrorInResponseException):
+            self.result = False, ("CRASH", None)
+        except Exception as e:
+            message = getattr(e, "message", "")
+            if message:
+                message += "\n"
+            message += traceback.format_exc(e)
+            self.result = False, ("ERROR", e)
+        finally:
+            self.result_flag.set()
+
+
+class SeleniumTestharnessExecutor(TestharnessExecutor):
+    def __init__(self, browser, server_config, timeout_multiplier=1,
+                 close_after_done=True, capabilities=None, debug_info=None,
+                 **kwargs):
+        """Selenium-based executor for testharness.js tests"""
+        TestharnessExecutor.__init__(self, browser, server_config,
+                                     timeout_multiplier=timeout_multiplier,
+                                     debug_info=debug_info)
+        self.protocol = SeleniumProtocol(self, browser, capabilities)
+        with open(os.path.join(here, "testharness_webdriver.js")) as f:
+            self.script = f.read()
+        self.close_after_done = close_after_done
+        self.window_id = str(uuid.uuid4())
+
+    def is_alive(self):
+        return self.protocol.is_alive()
+
+    def on_environment_change(self, new_environment):
+        if new_environment["protocol"] != self.last_environment["protocol"]:
+            self.protocol.load_runner(new_environment["protocol"])
+
+    def do_test(self, test):
+        url = self.test_url(test)
+
+        success, data = SeleniumRun(self.do_testharness,
+                                    self.protocol.webdriver,
+                                    url,
+                                    test.timeout * self.timeout_multiplier).run()
+
+        if success:
+            return self.convert_result(test, data)
+
+        return (test.result_cls(*data), [])
+
+    def do_testharness(self, webdriver, url, timeout):
+        return webdriver.execute_async_script(
+            self.script % {"abs_url": url,
+                           "url": strip_server(url),
+                           "window_id": self.window_id,
+                           "timeout_multiplier": self.timeout_multiplier,
+                           "timeout": timeout * 1000})
+
+class SeleniumRefTestExecutor(RefTestExecutor):
+    def __init__(self, browser, server_config, timeout_multiplier=1,
+                 screenshot_cache=None, close_after_done=True,
+                 debug_info=None, capabilities=None, **kwargs):
+        """Selenium WebDriver-based executor for reftests"""
+        RefTestExecutor.__init__(self,
+                                 browser,
+                                 server_config,
+                                 screenshot_cache=screenshot_cache,
+                                 timeout_multiplier=timeout_multiplier,
+                                 debug_info=debug_info)
+        self.protocol = SeleniumProtocol(self, browser,
+                                         capabilities=capabilities)
+        self.implementation = RefTestImplementation(self)
+        self.close_after_done = close_after_done
+        self.has_window = False
+
+        with open(os.path.join(here, "reftest.js")) as f:
+            self.script = f.read()
+        with open(os.path.join(here, "reftest-wait_webdriver.js")) as f:
+            self.wait_script = f.read()
+
+    def is_alive(self):
+        return self.protocol.is_alive()
+
+    def do_test(self, test):
+        self.logger.info("Test requires OS-level window focus")
+
+        self.protocol.webdriver.set_window_size(600, 600)
+
+        result = self.implementation.run_test(test)
+
+        return self.convert_result(test, result)
+
+    def screenshot(self, test, viewport_size, dpi):
+        # https://github.com/w3c/wptrunner/issues/166
+        assert viewport_size is None
+        assert dpi is None
+
+        return SeleniumRun(self._screenshot,
+                           self.protocol.webdriver,
+                           self.test_url(test),
+                           test.timeout).run()
+
+    def _screenshot(self, webdriver, url, timeout):
+        webdriver.get(url)
+
+        webdriver.execute_async_script(self.wait_script)
+
+        screenshot = webdriver.get_screenshot_as_base64()
+
+        # strip off the data:img/png, part of the url
+        if screenshot.startswith("data:image/png;base64,"):
+            screenshot = screenshot.split(",", 1)[1]
+
+        return screenshot
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/executors/executorservo.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/executors/executorservo.py
new file mode 100644
index 0000000..216d687
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/executors/executorservo.py
@@ -0,0 +1,368 @@
+import base64
+import hashlib
+import httplib
+import json
+import os
+import subprocess
+import tempfile
+import threading
+import traceback
+import urlparse
+import uuid
+from collections import defaultdict
+
+from mozprocess import ProcessHandler
+
+from .base import (ExecutorException,
+                   Protocol,
+                   RefTestImplementation,
+                   testharness_result_converter,
+                   reftest_result_converter,
+                   WdspecExecutor)
+from .process import ProcessTestExecutor
+from ..browsers.base import browser_command
+from ..wpttest import WdspecResult, WdspecSubtestResult
+from ..webdriver_server import ServoDriverServer
+from .executormarionette import WdspecRun
+
+pytestrunner = None
+webdriver = None
+
+extra_timeout = 5 # seconds
+
+hosts_text = """127.0.0.1 web-platform.test
+127.0.0.1 www.web-platform.test
+127.0.0.1 www1.web-platform.test
+127.0.0.1 www2.web-platform.test
+127.0.0.1 xn--n8j6ds53lwwkrqhv28a.web-platform.test
+127.0.0.1 xn--lve-6lad.web-platform.test
+"""
+
+def make_hosts_file():
+    hosts_fd, hosts_path = tempfile.mkstemp()
+    with os.fdopen(hosts_fd, "w") as f:
+        f.write(hosts_text)
+    return hosts_path
+
+
+class ServoTestharnessExecutor(ProcessTestExecutor):
+    convert_result = testharness_result_converter
+
+    def __init__(self, browser, server_config, timeout_multiplier=1, debug_info=None,
+                 pause_after_test=False, **kwargs):
+        ProcessTestExecutor.__init__(self, browser, server_config,
+                                     timeout_multiplier=timeout_multiplier,
+                                     debug_info=debug_info)
+        self.pause_after_test = pause_after_test
+        self.result_data = None
+        self.result_flag = None
+        self.protocol = Protocol(self, browser)
+        self.hosts_path = make_hosts_file()
+
+    def teardown(self):
+        try:
+            os.unlink(self.hosts_path)
+        except OSError:
+            pass
+        ProcessTestExecutor.teardown(self)
+
+    def do_test(self, test):
+        self.result_data = None
+        self.result_flag = threading.Event()
+
+        args = [
+            "--hard-fail", "-u", "Servo/wptrunner",
+            "-Z", "replace-surrogates", "-z", self.test_url(test),
+        ]
+        for stylesheet in self.browser.user_stylesheets:
+            args += ["--user-stylesheet", stylesheet]
+        for pref, value in test.environment.get('prefs', {}).iteritems():
+            args += ["--pref", "%s=%s" % (pref, value)]
+        if self.browser.ca_certificate_path:
+            args += ["--certificate-path", self.browser.ca_certificate_path]
+        args += self.browser.binary_args
+        debug_args, command = browser_command(self.binary, args, self.debug_info)
+
+        self.command = command
+
+        if self.pause_after_test:
+            self.command.remove("-z")
+
+        self.command = debug_args + self.command
+
+        env = os.environ.copy()
+        env["HOST_FILE"] = self.hosts_path
+        env["RUST_BACKTRACE"] = "1"
+
+
+        if not self.interactive:
+            self.proc = ProcessHandler(self.command,
+                                       processOutputLine=[self.on_output],
+                                       onFinish=self.on_finish,
+                                       env=env,
+                                       storeOutput=False)
+            self.proc.run()
+        else:
+            self.proc = subprocess.Popen(self.command, env=env)
+
+        try:
+            timeout = test.timeout * self.timeout_multiplier
+
+            # Now wait to get the output we expect, or until we reach the timeout
+            if not self.interactive and not self.pause_after_test:
+                wait_timeout = timeout + 5
+                self.result_flag.wait(wait_timeout)
+            else:
+                wait_timeout = None
+                self.proc.wait()
+
+            proc_is_running = True
+
+            if self.result_flag.is_set():
+                if self.result_data is not None:
+                    result = self.convert_result(test, self.result_data)
+                else:
+                    self.proc.wait()
+                    result = (test.result_cls("CRASH", None), [])
+                    proc_is_running = False
+            else:
+                result = (test.result_cls("TIMEOUT", None), [])
+
+
+            if proc_is_running:
+                if self.pause_after_test:
+                    self.logger.info("Pausing until the browser exits")
+                    self.proc.wait()
+                else:
+                    self.proc.kill()
+        except KeyboardInterrupt:
+            self.proc.kill()
+            raise
+
+        return result
+
+    def on_output(self, line):
+        prefix = "ALERT: RESULT: "
+        line = line.decode("utf8", "replace")
+        if line.startswith(prefix):
+            self.result_data = json.loads(line[len(prefix):])
+            self.result_flag.set()
+        else:
+            if self.interactive:
+                print line
+            else:
+                self.logger.process_output(self.proc.pid,
+                                           line,
+                                           " ".join(self.command))
+
+    def on_finish(self):
+        self.result_flag.set()
+
+
+class TempFilename(object):
+    def __init__(self, directory):
+        self.directory = directory
+        self.path = None
+
+    def __enter__(self):
+        self.path = os.path.join(self.directory, str(uuid.uuid4()))
+        return self.path
+
+    def __exit__(self, *args, **kwargs):
+        try:
+            os.unlink(self.path)
+        except OSError:
+            pass
+
+
+class ServoRefTestExecutor(ProcessTestExecutor):
+    convert_result = reftest_result_converter
+
+    def __init__(self, browser, server_config, binary=None, timeout_multiplier=1,
+                 screenshot_cache=None, debug_info=None, pause_after_test=False,
+                 **kwargs):
+        do_delayed_imports()
+        ProcessTestExecutor.__init__(self,
+                                     browser,
+                                     server_config,
+                                     timeout_multiplier=timeout_multiplier,
+                                     debug_info=debug_info)
+
+        self.protocol = Protocol(self, browser)
+        self.screenshot_cache = screenshot_cache
+        self.implementation = RefTestImplementation(self)
+        self.tempdir = tempfile.mkdtemp()
+        self.hosts_path = make_hosts_file()
+
+    def teardown(self):
+        try:
+            os.unlink(self.hosts_path)
+        except OSError:
+            pass
+        os.rmdir(self.tempdir)
+        ProcessTestExecutor.teardown(self)
+
+    def screenshot(self, test, viewport_size, dpi):
+        full_url = self.test_url(test)
+
+        with TempFilename(self.tempdir) as output_path:
+            debug_args, command = browser_command(
+                self.binary,
+                [
+                    "--hard-fail", "--exit",
+                    "-u", "Servo/wptrunner",
+                    "-Z", "disable-text-aa,load-webfonts-synchronously,replace-surrogates",
+                    "--output=%s" % output_path, full_url
+                ] + self.browser.binary_args,
+                self.debug_info)
+
+            for stylesheet in self.browser.user_stylesheets:
+                command += ["--user-stylesheet", stylesheet]
+
+            for pref, value in test.environment.get('prefs', {}).iteritems():
+                command += ["--pref", "%s=%s" % (pref, value)]
+
+            command += ["--resolution", viewport_size or "800x600"]
+
+            if self.browser.ca_certificate_path:
+                command += ["--certificate-path", self.browser.ca_certificate_path]
+
+            if dpi:
+                command += ["--device-pixel-ratio", dpi]
+
+            # Run ref tests in headless mode
+            command += ["-z"]
+
+            self.command = debug_args + command
+
+            env = os.environ.copy()
+            env["HOST_FILE"] = self.hosts_path
+            env["RUST_BACKTRACE"] = "1"
+
+            if not self.interactive:
+                self.proc = ProcessHandler(self.command,
+                                           processOutputLine=[self.on_output],
+                                           env=env)
+
+
+                try:
+                    self.proc.run()
+                    timeout = test.timeout * self.timeout_multiplier + 5
+                    rv = self.proc.wait(timeout=timeout)
+                except KeyboardInterrupt:
+                    self.proc.kill()
+                    raise
+            else:
+                self.proc = subprocess.Popen(self.command,
+                                             env=env)
+                try:
+                    rv = self.proc.wait()
+                except KeyboardInterrupt:
+                    self.proc.kill()
+                    raise
+
+            if rv is None:
+                self.proc.kill()
+                return False, ("EXTERNAL-TIMEOUT", None)
+
+            if rv != 0 or not os.path.exists(output_path):
+                return False, ("CRASH", None)
+
+            with open(output_path) as f:
+                # Might need to strip variable headers or something here
+                data = f.read()
+                return True, base64.b64encode(data)
+
+    def do_test(self, test):
+        result = self.implementation.run_test(test)
+
+        return self.convert_result(test, result)
+
+    def on_output(self, line):
+        line = line.decode("utf8", "replace")
+        if self.interactive:
+            print line
+        else:
+            self.logger.process_output(self.proc.pid,
+                                       line,
+                                       " ".join(self.command))
+
+class ServoWdspecProtocol(Protocol):
+    def __init__(self, executor, browser):
+        self.do_delayed_imports()
+        Protocol.__init__(self, executor, browser)
+        self.session = None
+        self.server = None
+
+    def setup(self, runner):
+        try:
+            self.server = ServoDriverServer(self.logger, binary=self.browser.binary, binary_args=self.browser.binary_args)
+            self.server.start(block=False)
+            self.logger.info(
+                "WebDriver HTTP server listening at %s" % self.server.url)
+
+            self.logger.info(
+                "Establishing new WebDriver session with %s" % self.server.url)
+            self.session = webdriver.Session(
+                self.server.host, self.server.port, self.server.base_path)
+        except Exception:
+            self.logger.error(traceback.format_exc())
+            self.executor.runner.send_message("init_failed")
+        else:
+            self.executor.runner.send_message("init_succeeded")
+
+    def teardown(self):
+        if self.server is not None:
+            try:
+                if self.session.session_id is not None:
+                    self.session.end()
+            except Exception:
+                pass
+            if self.server.is_alive:
+                self.server.stop()
+
+    @property
+    def is_alive(self):
+        conn = httplib.HTTPConnection(self.server.host, self.server.port)
+        conn.request("HEAD", self.server.base_path + "invalid")
+        res = conn.getresponse()
+        return res.status == 404
+
+    def do_delayed_imports(self):
+        global pytestrunner, webdriver
+        from . import pytestrunner
+        import webdriver
+
+
+class ServoWdspecExecutor(WdspecExecutor):
+    def __init__(self, browser, server_config,
+                 timeout_multiplier=1, close_after_done=True, debug_info=None,
+                 **kwargs):
+        WdspecExecutor.__init__(self, browser, server_config,
+                                timeout_multiplier=timeout_multiplier,
+                                debug_info=debug_info)
+        self.protocol = ServoWdspecProtocol(self, browser)
+
+    def is_alive(self):
+        return self.protocol.is_alive
+
+    def on_environment_change(self, new_environment):
+        pass
+
+    def do_test(self, test):
+        timeout = test.timeout * self.timeout_multiplier + extra_timeout
+
+        success, data = WdspecRun(self.do_wdspec,
+                                  self.protocol.session,
+                                  test.path,
+                                  timeout).run()
+
+        if success:
+            return self.convert_result(test, data)
+
+        return (test.result_cls(*data), [])
+
+    def do_wdspec(self, session, path, timeout):
+        harness_result = ("OK", None)
+        subtest_results = pytestrunner.run(path, session, timeout=timeout)
+        return (harness_result, subtest_results)
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/executors/executorservodriver.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/executors/executorservodriver.py
new file mode 100644
index 0000000..af09da6
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/executors/executorservodriver.py
@@ -0,0 +1,259 @@
+import json
+import os
+import socket
+import threading
+import time
+import traceback
+
+from .base import (Protocol,
+                   RefTestExecutor,
+                   RefTestImplementation,
+                   TestharnessExecutor,
+                   strip_server)
+from ..testrunner import Stop
+
+webdriver = None
+
+here = os.path.join(os.path.split(__file__)[0])
+
+extra_timeout = 5
+
+
+def do_delayed_imports():
+    global webdriver
+    import webdriver
+
+
+class ServoWebDriverProtocol(Protocol):
+    def __init__(self, executor, browser, capabilities, **kwargs):
+        do_delayed_imports()
+        Protocol.__init__(self, executor, browser)
+        self.capabilities = capabilities
+        self.host = browser.webdriver_host
+        self.port = browser.webdriver_port
+        self.session = None
+
+    def setup(self, runner):
+        """Connect to browser via WebDriver."""
+        self.runner = runner
+
+        url = "http://%s:%d" % (self.host, self.port)
+        session_started = False
+        try:
+            self.session = webdriver.Session(self.host, self.port,
+                extension=webdriver.servo.ServoCommandExtensions)
+            self.session.start()
+        except:
+            self.logger.warning(
+                "Connecting with WebDriver failed:\n%s" % traceback.format_exc())
+        else:
+            self.logger.debug("session started")
+            session_started = True
+
+        if not session_started:
+            self.logger.warning("Failed to connect via WebDriver")
+            self.executor.runner.send_message("init_failed")
+        else:
+            self.executor.runner.send_message("init_succeeded")
+
+    def teardown(self):
+        self.logger.debug("Hanging up on WebDriver session")
+        try:
+            self.session.end()
+        except:
+            pass
+
+    def is_alive(self):
+        try:
+            # Get a simple property over the connection
+            self.session.window_handle
+        # TODO what exception?
+        except Exception:
+            return False
+        return True
+
+    def after_connect(self):
+        pass
+
+    def wait(self):
+        while True:
+            try:
+                self.session.execute_async_script("")
+            except webdriver.TimeoutException:
+                pass
+            except (socket.timeout, IOError):
+                break
+            except Exception as e:
+                self.logger.error(traceback.format_exc(e))
+                break
+
+    def on_environment_change(self, old_environment, new_environment):
+        #Unset all the old prefs
+        self.session.extension.reset_prefs(*old_environment.get("prefs", {}).keys())
+        self.session.extension.set_prefs(new_environment.get("prefs", {}))
+
+
+class ServoWebDriverRun(object):
+    def __init__(self, func, session, url, timeout, current_timeout=None):
+        self.func = func
+        self.result = None
+        self.session = session
+        self.url = url
+        self.timeout = timeout
+        self.result_flag = threading.Event()
+
+    def run(self):
+        executor = threading.Thread(target=self._run)
+        executor.start()
+
+        flag = self.result_flag.wait(self.timeout + extra_timeout)
+        if self.result is None:
+            assert not flag
+            self.result = False, ("EXTERNAL-TIMEOUT", None)
+
+        return self.result
+
+    def _run(self):
+        try:
+            self.result = True, self.func(self.session, self.url, self.timeout)
+        except webdriver.TimeoutException:
+            self.result = False, ("EXTERNAL-TIMEOUT", None)
+        except (socket.timeout, IOError):
+            self.result = False, ("CRASH", None)
+        except Exception as e:
+            message = getattr(e, "message", "")
+            if message:
+                message += "\n"
+            message += traceback.format_exc(e)
+            self.result = False, ("ERROR", e)
+        finally:
+            self.result_flag.set()
+
+
+def timeout_func(timeout):
+    if timeout:
+        t0 = time.time()
+        return lambda: time.time() - t0 > timeout + extra_timeout
+    else:
+        return lambda: False
+
+
+class ServoWebDriverTestharnessExecutor(TestharnessExecutor):
+    def __init__(self, browser, server_config, timeout_multiplier=1,
+                 close_after_done=True, capabilities=None, debug_info=None,
+                 **kwargs):
+        TestharnessExecutor.__init__(self, browser, server_config, timeout_multiplier=1,
+                                     debug_info=None)
+        self.protocol = ServoWebDriverProtocol(self, browser, capabilities=capabilities)
+        with open(os.path.join(here, "testharness_servodriver.js")) as f:
+            self.script = f.read()
+        self.timeout = None
+
+    def on_protocol_change(self, new_protocol):
+        pass
+
+    def is_alive(self):
+        return self.protocol.is_alive()
+
+    def do_test(self, test):
+        url = self.test_url(test)
+
+        timeout = test.timeout * self.timeout_multiplier + extra_timeout
+
+        if timeout != self.timeout:
+            try:
+                self.protocol.session.timeouts.script = timeout
+                self.timeout = timeout
+            except IOError:
+                self.logger.error("Lost webdriver connection")
+                return Stop
+
+        success, data = ServoWebDriverRun(self.do_testharness,
+                                          self.protocol.session,
+                                          url,
+                                          timeout).run()
+
+        if success:
+            return self.convert_result(test, data)
+
+        return (test.result_cls(*data), [])
+
+    def do_testharness(self, session, url, timeout):
+        session.url = url
+        result = json.loads(
+            session.execute_async_script(
+                self.script % {"abs_url": url,
+                               "url": strip_server(url),
+                               "timeout_multiplier": self.timeout_multiplier,
+                               "timeout": timeout * 1000}))
+        # Prevent leaking every page in history until Servo develops a more sane
+        # page cache
+        session.back()
+        return result
+
+
+class TimeoutError(Exception):
+    pass
+
+
+class ServoWebDriverRefTestExecutor(RefTestExecutor):
+    def __init__(self, browser, server_config, timeout_multiplier=1,
+                 screenshot_cache=None, capabilities=None, debug_info=None,
+                 **kwargs):
+        """Selenium WebDriver-based executor for reftests"""
+        RefTestExecutor.__init__(self,
+                                 browser,
+                                 server_config,
+                                 screenshot_cache=screenshot_cache,
+                                 timeout_multiplier=timeout_multiplier,
+                                 debug_info=debug_info)
+        self.protocol = ServoWebDriverProtocol(self, browser,
+                                               capabilities=capabilities)
+        self.implementation = RefTestImplementation(self)
+        self.timeout = None
+        with open(os.path.join(here, "reftest-wait_webdriver.js")) as f:
+            self.wait_script = f.read()
+
+    def is_alive(self):
+        return self.protocol.is_alive()
+
+    def do_test(self, test):
+        try:
+            result = self.implementation.run_test(test)
+            return self.convert_result(test, result)
+        except IOError:
+            return test.result_cls("CRASH", None), []
+        except TimeoutError:
+            return test.result_cls("TIMEOUT", None), []
+        except Exception as e:
+            message = getattr(e, "message", "")
+            if message:
+                message += "\n"
+            message += traceback.format_exc(e)
+            return test.result_cls("ERROR", message), []
+
+    def screenshot(self, test, viewport_size, dpi):
+        # https://github.com/w3c/wptrunner/issues/166
+        assert viewport_size is None
+        assert dpi is None
+
+        timeout = (test.timeout * self.timeout_multiplier + extra_timeout
+                   if self.debug_info is None else None)
+
+        if self.timeout != timeout:
+            try:
+                self.protocol.session.timeouts.script = timeout
+                self.timeout = timeout
+            except IOError:
+                self.logger.error("Lost webdriver connection")
+                return Stop
+
+        return ServoWebDriverRun(self._screenshot,
+                                 self.protocol.session,
+                                 self.test_url(test),
+                                 timeout).run()
+
+    def _screenshot(self, session, url, timeout):
+        session.url = url
+        session.execute_async_script(self.wait_script)
+        return session.screenshot()
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/executors/process.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/executors/process.py
new file mode 100644
index 0000000..fb8c17a
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/executors/process.py
@@ -0,0 +1,20 @@
+from .base import TestExecutor
+
+
+class ProcessTestExecutor(TestExecutor):
+    def __init__(self, *args, **kwargs):
+        TestExecutor.__init__(self, *args, **kwargs)
+        self.binary = self.browser.binary
+        self.interactive = (False if self.debug_info is None
+                            else self.debug_info.interactive)
+
+    def setup(self, runner):
+        self.runner = runner
+        self.runner.send_message("init_succeeded")
+        return True
+
+    def is_alive(self):
+        return True
+
+    def do_test(self, test):
+        raise NotImplementedError
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/executors/pytestrunner/__init__.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/executors/pytestrunner/__init__.py
new file mode 100644
index 0000000..a92b3a8
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/executors/pytestrunner/__init__.py
@@ -0,0 +1 @@
+from .runner import run
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/executors/pytestrunner/runner.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/executors/pytestrunner/runner.py
new file mode 100644
index 0000000..bc37384
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/executors/pytestrunner/runner.py
@@ -0,0 +1,116 @@
+"""Provides interface to deal with pytest.
+
+Usage::
+
+    session = webdriver.client.Session("127.0.0.1", "4444", "/")
+    harness_result = ("OK", None)
+    subtest_results = pytestrunner.run("/path/to/test", session.url)
+    return (harness_result, subtest_results)
+"""
+
+import errno
+import json
+import os
+import shutil
+import tempfile
+
+
+pytest = None
+
+
+def do_delayed_imports():
+    global pytest
+    import pytest
+
+
+def run(path, server_config, session_config, timeout=0):
+    """Run Python test at ``path`` in pytest.  The provided ``session``
+    is exposed as a fixture available in the scope of the test functions.
+
+    :param path: Path to the test file.
+    :param session_config: dictionary of host, port,capabilities parameters
+    to pass through to the webdriver session
+    :param timeout: Duration before interrupting potentially hanging
+        tests.  If 0, there is no timeout.
+
+    :returns: List of subtest results, which are tuples of (test id,
+        status, message, stacktrace).
+    """
+
+    if pytest is None:
+        do_delayed_imports()
+
+    recorder = SubtestResultRecorder()
+
+    os.environ["WD_HOST"] = session_config["host"]
+    os.environ["WD_PORT"] = str(session_config["port"])
+    os.environ["WD_CAPABILITIES"] = json.dumps(session_config["capabilities"])
+    os.environ["WD_SERVER_CONFIG"] = json.dumps(server_config)
+
+    plugins = [recorder]
+
+    # TODO(ato): Deal with timeouts
+
+    with TemporaryDirectory() as cache:
+        pytest.main(["--strict",  # turn warnings into errors
+                     "--verbose",  # show each individual subtest
+                     "--capture", "no",  # enable stdout/stderr from tests
+                     "--basetemp", cache,  # temporary directory
+                     "-p", "no:mozlog",
+                     path],
+                    plugins=plugins)
+
+    return recorder.results
+
+
+class SubtestResultRecorder(object):
+    def __init__(self):
+        self.results = []
+
+    def pytest_runtest_logreport(self, report):
+        if report.passed and report.when == "call":
+            self.record_pass(report)
+        elif report.failed:
+            if report.when != "call":
+                self.record_error(report)
+            else:
+                self.record_fail(report)
+        elif report.skipped:
+            self.record_skip(report)
+
+    def record_pass(self, report):
+        self.record(report.nodeid, "PASS")
+
+    def record_fail(self, report):
+        self.record(report.nodeid, "FAIL", stack=report.longrepr)
+
+    def record_error(self, report):
+        # error in setup/teardown
+        if report.when != "call":
+            message = "%s error" % report.when
+        self.record(report.nodeid, "ERROR", message, report.longrepr)
+
+    def record_skip(self, report):
+        self.record(report.nodeid, "ERROR",
+                    "In-test skip decorators are disallowed, "
+                    "please use WPT metadata to ignore tests.")
+
+    def record(self, test, status, message=None, stack=None):
+        if stack is not None:
+            stack = str(stack)
+        new_result = (test, status, message, stack)
+        self.results.append(new_result)
+
+
+class TemporaryDirectory(object):
+    def __enter__(self):
+        self.path = tempfile.mkdtemp(prefix="pytest-")
+        return self.path
+
+    def __exit__(self, *args):
+        try:
+            shutil.rmtree(self.path)
+        except OSError as e:
+            # no such file or directory
+            if e.errno != errno.ENOENT:
+                raise
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/executors/reftest-wait_marionette.js b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/executors/reftest-wait_marionette.js
new file mode 100644
index 0000000..c226027
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/executors/reftest-wait_marionette.js
@@ -0,0 +1,17 @@
+function test(x) {
+  if (!root.classList.contains("reftest-wait")) {
+    observer.disconnect();
+    marionetteScriptFinished();
+  }
+}
+
+var root = document.documentElement;
+var observer = new MutationObserver(test);
+
+observer.observe(root, {attributes: true});
+
+if (document.readyState != "complete") {
+  onload = test
+} else {
+  test();
+}
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/executors/reftest-wait_webdriver.js b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/executors/reftest-wait_webdriver.js
new file mode 100644
index 0000000..c1cc649
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/executors/reftest-wait_webdriver.js
@@ -0,0 +1,44 @@
+var callback = arguments[arguments.length - 1];
+
+function root_wait() {
+  if (!root.classList.contains("reftest-wait")) {
+    observer.disconnect();
+
+    if (Document.prototype.hasOwnProperty("fonts")) {
+      document.fonts.ready.then(ready_for_screenshot);
+    } else {
+      // This might take the screenshot too early, depending on whether the
+      // load event is blocked on fonts being loaded. See:
+      // https://github.com/w3c/csswg-drafts/issues/1088
+      ready_for_screenshot();
+    }
+  }
+}
+
+function ready_for_screenshot() {
+  // As of 2017-04-05, the Chromium web browser exhibits a rendering bug
+  // (https://bugs.chromium.org/p/chromium/issues/detail?id=708757) that
+  // produces instability during screen capture. The following use of
+  // `requestAnimationFrame` is intended as a short-term workaround, though
+  // it is not guaranteed to resolve the issue.
+  //
+  // For further detail, see:
+  // https://github.com/jugglinmike/chrome-screenshot-race/issues/1
+
+  requestAnimationFrame(function() {
+    requestAnimationFrame(function() {
+      callback();
+    });
+  });
+}
+
+var root = document.documentElement;
+var observer = new MutationObserver(root_wait);
+
+observer.observe(root, {attributes: true});
+
+if (document.readyState != "complete") {
+    onload = root_wait;
+} else {
+    root_wait();
+}
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/executors/reftest.js b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/executors/reftest.js
new file mode 100644
index 0000000..5bd5c60
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/executors/reftest.js
@@ -0,0 +1 @@
+var win = window.open("about:blank", "test", "width=600,height=600");
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/executors/testharness_marionette.js b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/executors/testharness_marionette.js
new file mode 100644
index 0000000..e2b70a0
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/executors/testharness_marionette.js
@@ -0,0 +1,31 @@
+window.wrappedJSObject.timeout_multiplier = %(timeout_multiplier)d;
+window.wrappedJSObject.explicit_timeout = %(explicit_timeout)d;
+
+window.wrappedJSObject.addEventListener("message", function listener(event) {
+    if (event.data.type != "complete") {
+        return;
+    }
+    window.wrappedJSObject.removeEventListener("message", listener);
+    clearTimeout(timer);
+    var tests = event.data.tests;
+    var status = event.data.status;
+
+    var subtest_results = tests.map(function (x) {
+        return [x.name, x.status, x.message, x.stack]
+    });
+
+    marionetteScriptFinished(["%(url)s",
+                              status.status,
+                              status.message,
+                              status.stack,
+                              subtest_results]);
+}, false);
+
+window.wrappedJSObject.win = window.open("%(abs_url)s", "%(window_id)s");
+
+var timer = null;
+if (%(timeout)s) {
+    timer = setTimeout(function() {
+        window.wrappedJSObject.win.timeout();
+    }, %(timeout)s);
+}
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/executors/testharness_servodriver.js b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/executors/testharness_servodriver.js
new file mode 100644
index 0000000..d731cc0
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/executors/testharness_servodriver.js
@@ -0,0 +1,2 @@
+window.__wd_results_callback__ = arguments[arguments.length - 1];
+window.__wd_results_timer__ = setTimeout(timeout, %(timeout)s);
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/executors/testharness_webdriver.js b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/executors/testharness_webdriver.js
new file mode 100644
index 0000000..f5cbff9
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/executors/testharness_webdriver.js
@@ -0,0 +1,29 @@
+var callback = arguments[arguments.length - 1];
+window.timeout_multiplier = %(timeout_multiplier)d;
+
+window.addEventListener("message", function f(event) {
+  if (event.data.type != "complete") {
+    return;
+  }
+  window.removeEventListener("message", f);
+
+  var tests = event.data.tests;
+  var status = event.data.status;
+
+  var subtest_results = tests.map(function(x) {
+    return [x.name, x.status, x.message, x.stack]
+  });
+  clearTimeout(timer);
+  callback(["%(url)s",
+            status.status,
+            status.message,
+            status.stack,
+            subtest_results]);
+}, false);
+
+window.win = window.open("%(abs_url)s", "%(window_id)s");
+
+var timer = setTimeout(function() {
+  window.win.timeout();
+  window.win.close();
+}, %(timeout)s);
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/expected.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/expected.py
new file mode 100644
index 0000000..f06abb9
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/expected.py
@@ -0,0 +1,14 @@
+import os
+
+
+def expected_path(metadata_path, test_path):
+    """Path to the expectation data file for a given test path.
+
+    This is defined as metadata_path + relative_test_path + .ini
+
+    :param metadata_path: Path to the root of the metadata directory
+    :param test_path: Relative path to the test file from the test root
+    """
+    args = list(test_path.split("/"))
+    args[-1] += ".ini"
+    return os.path.join(metadata_path, *args)
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/formatters.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/formatters.py
new file mode 100755
index 0000000..0e888ce
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/formatters.py
@@ -0,0 +1,54 @@
+import json
+
+from mozlog.structured.formatters.base import BaseFormatter
+
+
+class WptreportFormatter(BaseFormatter):
+    """Formatter that produces results in the format that wpreport expects."""
+
+    def __init__(self):
+        self.raw_results = {}
+
+    def suite_end(self, data):
+        results = {}
+        results["results"] = []
+        for test_name in self.raw_results:
+            result = {"test": test_name}
+            result.update(self.raw_results[test_name])
+            results["results"].append(result)
+        return json.dumps(results)
+
+    def find_or_create_test(self, data):
+        test_name = data["test"]
+        if test_name not in self.raw_results:
+            self.raw_results[test_name] = {
+                "subtests": [],
+                "status": "",
+                "message": None
+            }
+        return self.raw_results[test_name]
+
+    def create_subtest(self, data):
+        test = self.find_or_create_test(data)
+        subtest_name = data["subtest"]
+
+        subtest = {
+            "name": subtest_name,
+            "status": "",
+            "message": None
+        }
+        test["subtests"].append(subtest)
+
+        return subtest
+
+    def test_status(self, data):
+        subtest = self.create_subtest(data)
+        subtest["status"] = data["status"]
+        if "message" in data:
+            subtest["message"] = data["message"]
+
+    def test_end(self, data):
+        test = self.find_or_create_test(data)
+        test["status"] = data["status"]
+        if "message" in data:
+            test["message"] = data["message"]
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/hosts.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/hosts.py
new file mode 100644
index 0000000..915c17f
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/hosts.py
@@ -0,0 +1,100 @@
+from __future__ import unicode_literals
+
+
+class HostsLine(object):
+    def __init__(self, ip_address, canonical_hostname, aliases=None, comment=None):
+        self.ip_address = ip_address
+        self.canonical_hostname = canonical_hostname
+        self.aliases = aliases if aliases is not None else []
+        self.comment = comment
+        if self.ip_address is None:
+            assert self.canonical_hostname is None
+            assert not self.aliases
+            assert self.comment is not None
+
+    @classmethod
+    def from_string(cls, line):
+        if not line.strip():
+            return
+
+        line = line.strip()
+
+        ip_address = None
+        canonical_hostname = None
+        aliases = []
+        comment = None
+
+        comment_parts = line.split("#", 1)
+        if len(comment_parts) > 1:
+            comment = comment_parts[1]
+
+        data = comment_parts[0].strip()
+
+        if data:
+            fields = data.split()
+            if len(fields) < 2:
+                raise ValueError("Invalid hosts line")
+
+            ip_address = fields[0]
+            canonical_hostname = fields[1]
+            aliases = fields[2:]
+
+        return cls(ip_address, canonical_hostname, aliases, comment)
+
+
+class HostsFile(object):
+    def __init__(self):
+        self.data = []
+        self.by_hostname = {}
+
+    def set_host(self, host):
+        if host.canonical_hostname is None:
+            self.data.append(host)
+        elif host.canonical_hostname in self.by_hostname:
+            old_host = self.by_hostname[host.canonical_hostname]
+            old_host.ip_address = host.ip_address
+            old_host.aliases = host.aliases
+            old_host.comment = host.comment
+        else:
+            self.data.append(host)
+            self.by_hostname[host.canonical_hostname] = host
+
+    @classmethod
+    def from_file(cls, f):
+        rv = cls()
+        for line in f:
+            host = HostsLine.from_string(line)
+            if host is not None:
+                rv.set_host(host)
+        return rv
+
+    def to_string(self):
+        field_widths = [0, 0]
+        for line in self.data:
+            if line.ip_address is not None:
+                field_widths[0] = max(field_widths[0], len(line.ip_address))
+                field_widths[1] = max(field_widths[1], len(line.canonical_hostname))
+
+        lines = []
+
+        for host in self.data:
+            line = ""
+            if host.ip_address is not None:
+                ip_string = host.ip_address.ljust(field_widths[0])
+                hostname_str = host.canonical_hostname
+                if host.aliases:
+                    hostname_str = "%s %s" % (hostname_str.ljust(field_widths[1]),
+                                              " ".join(host.aliases))
+                line = "%s %s" % (ip_string, hostname_str)
+            if host.comment:
+                if line:
+                    line += " "
+                line += "#%s" % host.comment
+            lines.append(line)
+
+        lines.append("")
+
+        return "\n".join(lines)
+
+    def to_file(self, f):
+        f.write(self.to_string().encode("utf8"))
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/manifestexpected.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/manifestexpected.py
new file mode 100644
index 0000000..5d57b70
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/manifestexpected.py
@@ -0,0 +1,262 @@
+import os
+import urlparse
+
+from wptmanifest.backends import static
+from wptmanifest.backends.static import ManifestItem
+
+import expected
+
+"""Manifest structure used to store expected results of a test.
+
+Each manifest file is represented by an ExpectedManifest that
+has one or more TestNode children, one per test in the manifest.
+Each TestNode has zero or more SubtestNode children, one for each
+known subtest of the test.
+"""
+
+def data_cls_getter(output_node, visited_node):
+    # visited_node is intentionally unused
+    if output_node is None:
+        return ExpectedManifest
+    if isinstance(output_node, ExpectedManifest):
+        return TestNode
+    if isinstance(output_node, TestNode):
+        return SubtestNode
+    raise ValueError
+
+
+def bool_prop(name, node):
+    """Boolean property"""
+    try:
+        return node.get(name)
+    except KeyError:
+        return None
+
+
+def tags(node):
+    """Set of tags that have been applied to the test"""
+    try:
+        value = node.get("tags")
+        if isinstance(value, (str, unicode)):
+            return {value}
+        return set(value)
+    except KeyError:
+        return set()
+
+
+def prefs(node):
+    def value(ini_value):
+        if isinstance(ini_value, (str, unicode)):
+            return tuple(ini_value.split(":", 1))
+        else:
+            return (ini_value, None)
+
+    try:
+        node_prefs = node.get("prefs")
+        if type(node_prefs) in (str, unicode):
+            prefs = {value(node_prefs)}
+        rv = dict(value(item) for item in node_prefs)
+    except KeyError:
+        rv = {}
+    return rv
+
+
+class ExpectedManifest(ManifestItem):
+    def __init__(self, name, test_path, url_base):
+        """Object representing all the tests in a particular manifest
+
+        :param name: Name of the AST Node associated with this object.
+                     Should always be None since this should always be associated with
+                     the root node of the AST.
+        :param test_path: Path of the test file associated with this manifest.
+        :param url_base: Base url for serving the tests in this manifest
+        """
+        if name is not None:
+            raise ValueError("ExpectedManifest should represent the root node")
+        if test_path is None:
+            raise ValueError("ExpectedManifest requires a test path")
+        if url_base is None:
+            raise ValueError("ExpectedManifest requires a base url")
+        ManifestItem.__init__(self, name)
+        self.child_map = {}
+        self.test_path = test_path
+        self.url_base = url_base
+
+    def append(self, child):
+        """Add a test to the manifest"""
+        ManifestItem.append(self, child)
+        self.child_map[child.id] = child
+
+    def _remove_child(self, child):
+        del self.child_map[child.id]
+        ManifestItem.remove_child(self, child)
+        assert len(self.child_map) == len(self.children)
+
+    def get_test(self, test_id):
+        """Get a test from the manifest by ID
+
+        :param test_id: ID of the test to return."""
+        return self.child_map.get(test_id)
+
+    @property
+    def url(self):
+        return urlparse.urljoin(self.url_base,
+                                "/".join(self.test_path.split(os.path.sep)))
+
+    @property
+    def disabled(self):
+        return bool_prop("disabled", self)
+
+    @property
+    def restart_after(self):
+        return bool_prop("restart-after", self)
+
+    @property
+    def leaks(self):
+        return bool_prop("leaks", self)
+
+    @property
+    def tags(self):
+        return tags(self)
+
+    @property
+    def prefs(self):
+        return prefs(self)
+
+
+class DirectoryManifest(ManifestItem):
+    @property
+    def disabled(self):
+        return bool_prop("disabled", self)
+
+    @property
+    def restart_after(self):
+        return bool_prop("restart-after", self)
+
+    @property
+    def leaks(self):
+        return bool_prop("leaks", self)
+
+    @property
+    def tags(self):
+        return tags(self)
+
+    @property
+    def prefs(self):
+        return prefs(self)
+
+
+class TestNode(ManifestItem):
+    def __init__(self, name):
+        """Tree node associated with a particular test in a manifest
+
+        :param name: name of the test"""
+        assert name is not None
+        ManifestItem.__init__(self, name)
+        self.updated_expected = []
+        self.new_expected = []
+        self.subtests = {}
+        self.default_status = None
+        self._from_file = True
+
+    @property
+    def is_empty(self):
+        required_keys = set(["type"])
+        if set(self._data.keys()) != required_keys:
+            return False
+        return all(child.is_empty for child in self.children)
+
+    @property
+    def test_type(self):
+        return self.get("type")
+
+    @property
+    def id(self):
+        return urlparse.urljoin(self.parent.url, self.name)
+
+    @property
+    def disabled(self):
+        return bool_prop("disabled", self)
+
+    @property
+    def restart_after(self):
+        return bool_prop("restart-after", self)
+
+    @property
+    def leaks(self):
+        return bool_prop("leaks", self)
+
+    @property
+    def tags(self):
+        return tags(self)
+
+    @property
+    def prefs(self):
+        return prefs(self)
+
+    def append(self, node):
+        """Add a subtest to the current test
+
+        :param node: AST Node associated with the subtest"""
+        child = ManifestItem.append(self, node)
+        self.subtests[child.name] = child
+
+    def get_subtest(self, name):
+        """Get the SubtestNode corresponding to a particular subtest, by name
+
+        :param name: Name of the node to return"""
+        if name in self.subtests:
+            return self.subtests[name]
+        return None
+
+
+class SubtestNode(TestNode):
+    def __init__(self, name):
+        """Tree node associated with a particular subtest in a manifest
+
+        :param name: name of the subtest"""
+        TestNode.__init__(self, name)
+
+    @property
+    def is_empty(self):
+        if self._data:
+            return False
+        return True
+
+
+def get_manifest(metadata_root, test_path, url_base, run_info):
+    """Get the ExpectedManifest for a particular test path, or None if there is no
+    metadata stored for that test path.
+
+    :param metadata_root: Absolute path to the root of the metadata directory
+    :param test_path: Path to the test(s) relative to the test root
+    :param url_base: Base url for serving the tests in this manifest
+    :param run_info: Dictionary of properties of the test run for which the expectation
+                     values should be computed.
+    """
+    manifest_path = expected.expected_path(metadata_root, test_path)
+    try:
+        with open(manifest_path) as f:
+            return static.compile(f,
+                                  run_info,
+                                  data_cls_getter=data_cls_getter,
+                                  test_path=test_path,
+                                  url_base=url_base)
+    except IOError:
+        return None
+
+def get_dir_manifest(path, run_info):
+    """Get the ExpectedManifest for a particular test path, or None if there is no
+    metadata stored for that test path.
+
+    :param path: Full path to the ini file
+    :param run_info: Dictionary of properties of the test run for which the expectation
+                     values should be computed.
+    """
+    try:
+        with open(path) as f:
+            return static.compile(f,
+                                  run_info,
+                                  data_cls_getter=lambda x,y: DirectoryManifest)
+    except IOError:
+        return None
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/manifestinclude.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/manifestinclude.py
new file mode 100644
index 0000000..1b53a42
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/manifestinclude.py
@@ -0,0 +1,151 @@
+"""Manifest structure used to store paths that should be included in a test run.
+
+The manifest is represented by a tree of IncludeManifest objects, the root
+representing the file and each subnode representing a subdirectory that should
+be included or excluded.
+"""
+import glob
+import os
+import urlparse
+
+from wptmanifest.node import DataNode
+from wptmanifest.backends import conditional
+from wptmanifest.backends.conditional import ManifestItem
+
+
+class IncludeManifest(ManifestItem):
+    def __init__(self, node):
+        """Node in a tree structure representing the paths
+        that should be included or excluded from the test run.
+
+        :param node: AST Node corresponding to this Node.
+        """
+        ManifestItem.__init__(self, node)
+        self.set("skip", "False")
+        self.child_map = {}
+
+    @classmethod
+    def create(cls):
+        """Create an empty IncludeManifest tree"""
+        node = DataNode(None)
+        return cls(node)
+
+    def append(self, child):
+        ManifestItem.append(self, child)
+        self.child_map[child.name] = child
+        assert len(self.child_map) == len(self.children)
+
+    def include(self, test):
+        """Return a boolean indicating whether a particular test should be
+        included in a test run, based on the IncludeManifest tree rooted on
+        this object.
+
+        :param test: The test object"""
+        path_components = self._get_components(test.url)
+        return self._include(test, path_components)
+
+    def _include(self, test, path_components):
+        if path_components:
+            next_path_part = path_components.pop()
+            if next_path_part in self.child_map:
+                return self.child_map[next_path_part]._include(test, path_components)
+
+        node = self
+        while node:
+            try:
+                skip_value = self.get("skip", {"test_type": test.item_type}).lower()
+                assert skip_value in ("true", "false")
+                return skip_value != "true"
+            except KeyError:
+                if node.parent is not None:
+                    node = node.parent
+                else:
+                    # Include by default
+                    return True
+
+    def _get_components(self, url):
+        rv = []
+        url_parts = urlparse.urlsplit(url)
+        variant = ""
+        if url_parts.query:
+            variant += "?" + url_parts.query
+        if url_parts.fragment:
+            variant += "#" + url_parts.fragment
+        if variant:
+            rv.append(variant)
+        rv.extend([item for item in reversed(url_parts.path.split("/")) if item])
+        return rv
+
+    def _add_rule(self, test_manifests, url, direction):
+        maybe_path = os.path.join(os.path.abspath(os.curdir), url)
+        rest, last = os.path.split(maybe_path)
+        fragment = query = None
+        if "#" in last:
+            last, fragment = last.rsplit("#", 1)
+        if "?" in last:
+            last, query = last.rsplit("?", 1)
+
+        maybe_path = os.path.join(rest, last)
+        paths = glob.glob(maybe_path)
+
+        if paths:
+            urls = []
+            for path in paths:
+                for manifest, data in test_manifests.iteritems():
+                    found = False
+                    rel_path = os.path.relpath(path, data["tests_path"])
+                    iterator = manifest.iterpath if os.path.isfile(path) else manifest.iterdir
+                    for test in iterator(rel_path):
+                        if not hasattr(test, "url"):
+                            continue
+                        url = test.url
+                        if query or fragment:
+                            parsed = urlparse.urlparse(url)
+                            if ((query and query != parsed.query) or
+                                (fragment and fragment != parsed.fragment)):
+                                continue
+                        urls.append(url)
+                        found = True
+                    if found:
+                        break
+        else:
+            urls = [url]
+
+        assert direction in ("include", "exclude")
+
+        for url in urls:
+            components = self._get_components(url)
+
+            node = self
+            while components:
+                component = components.pop()
+                if component not in node.child_map:
+                    new_node = IncludeManifest(DataNode(component))
+                    node.append(new_node)
+                    new_node.set("skip", node.get("skip", {}))
+
+                node = node.child_map[component]
+
+            skip = False if direction == "include" else True
+            node.set("skip", str(skip))
+
+    def add_include(self, test_manifests, url_prefix):
+        """Add a rule indicating that tests under a url path
+        should be included in test runs
+
+        :param url_prefix: The url prefix to include
+        """
+        return self._add_rule(test_manifests, url_prefix, "include")
+
+    def add_exclude(self, test_manifests, url_prefix):
+        """Add a rule indicating that tests under a url path
+        should be excluded from test runs
+
+        :param url_prefix: The url prefix to exclude
+        """
+        return self._add_rule(test_manifests, url_prefix, "exclude")
+
+
+def get_manifest(manifest_path):
+    with open(manifest_path) as f:
+        return conditional.compile(f, data_cls_getter=lambda x, y: IncludeManifest)
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/manifestupdate.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/manifestupdate.py
new file mode 100644
index 0000000..07e623c
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/manifestupdate.py
@@ -0,0 +1,460 @@
+import os
+import urlparse
+from collections import namedtuple, defaultdict
+
+from wptmanifest.node import (DataNode, ConditionalNode, BinaryExpressionNode,
+                              BinaryOperatorNode, VariableNode, StringNode, NumberNode,
+                              UnaryExpressionNode, UnaryOperatorNode, KeyValueNode)
+from wptmanifest.backends import conditional
+from wptmanifest.backends.conditional import ManifestItem
+
+import expected
+
+"""Manifest structure used to update the expected results of a test
+
+Each manifest file is represented by an ExpectedManifest that has one
+or more TestNode children, one per test in the manifest.  Each
+TestNode has zero or more SubtestNode children, one for each known
+subtest of the test.
+
+In these representations, conditionals expressions in the manifest are
+not evaluated upfront but stored as python functions to be evaluated
+at runtime.
+
+When a result for a test is to be updated set_result on the
+[Sub]TestNode is called to store the new result, alongside the
+existing conditional that result's run info matched, if any. Once all
+new results are known, coalesce_expected is called to compute the new
+set of results and conditionals. The AST of the underlying parsed manifest
+is updated with the changes, and the result is serialised to a file.
+"""
+
+class ConditionError(Exception):
+    pass
+
+Result = namedtuple("Result", ["run_info", "status"])
+
+
+def data_cls_getter(output_node, visited_node):
+    # visited_node is intentionally unused
+    if output_node is None:
+        return ExpectedManifest
+    elif isinstance(output_node, ExpectedManifest):
+        return TestNode
+    elif isinstance(output_node, TestNode):
+        return SubtestNode
+    else:
+        raise ValueError
+
+
+class ExpectedManifest(ManifestItem):
+    def __init__(self, node, test_path=None, url_base=None, property_order=None,
+                 boolean_properties=None):
+        """Object representing all the tests in a particular manifest
+
+        :param node: AST Node associated with this object. If this is None,
+                     a new AST is created to associate with this manifest.
+        :param test_path: Path of the test file associated with this manifest.
+        :param url_base: Base url for serving the tests in this manifest.
+        :param property_order: List of properties to use in expectation metadata
+                               from most to least significant.
+        :param boolean_properties: Set of properties in property_order that should
+                                   be treated as boolean.
+        """
+        if node is None:
+            node = DataNode(None)
+        ManifestItem.__init__(self, node)
+        self.child_map = {}
+        self.test_path = test_path
+        self.url_base = url_base
+        assert self.url_base is not None
+        self.modified = False
+        self.boolean_properties = boolean_properties
+        self.property_order = property_order
+
+    def append(self, child):
+        ManifestItem.append(self, child)
+        if child.id in self.child_map:
+            print "Warning: Duplicate heading %s" % child.id
+        self.child_map[child.id] = child
+
+    def _remove_child(self, child):
+        del self.child_map[child.id]
+        ManifestItem._remove_child(self, child)
+
+    def get_test(self, test_id):
+        """Return a TestNode by test id, or None if no test matches
+
+        :param test_id: The id of the test to look up"""
+
+        return self.child_map[test_id]
+
+    def has_test(self, test_id):
+        """Boolean indicating whether the current test has a known child test
+        with id test id
+
+        :param test_id: The id of the test to look up"""
+
+        return test_id in self.child_map
+
+    @property
+    def url(self):
+        return urlparse.urljoin(self.url_base,
+                                "/".join(self.test_path.split(os.path.sep)))
+
+class TestNode(ManifestItem):
+    def __init__(self, node):
+        """Tree node associated with a particular test in a manifest
+
+        :param node: AST node associated with the test"""
+
+        ManifestItem.__init__(self, node)
+        self.updated_expected = []
+        self.new_expected = []
+        self.subtests = {}
+        self.default_status = None
+        self._from_file = True
+
+    @classmethod
+    def create(cls, test_type, test_id):
+        """Create a TestNode corresponding to a given test
+
+        :param test_type: The type of the test
+        :param test_id: The id of the test"""
+
+        url = test_id
+        name = url.split("/")[-1]
+        node = DataNode(name)
+        self = cls(node)
+
+        self.set("type", test_type)
+        self._from_file = False
+        return self
+
+    @property
+    def is_empty(self):
+        required_keys = set(["type"])
+        if set(self._data.keys()) != required_keys:
+            return False
+        return all(child.is_empty for child in self.children)
+
+    @property
+    def test_type(self):
+        """The type of the test represented by this TestNode"""
+
+        return self.get("type", None)
+
+    @property
+    def id(self):
+        """The id of the test represented by this TestNode"""
+        return urlparse.urljoin(self.parent.url, self.name)
+
+    def disabled(self, run_info):
+        """Boolean indicating whether this test is disabled when run in an
+        environment with the given run_info
+
+        :param run_info: Dictionary of run_info parameters"""
+
+        return self.get("disabled", run_info) is not None
+
+    def set_result(self, run_info, result):
+        """Set the result of the test in a particular run
+
+        :param run_info: Dictionary of run_info parameters corresponding
+                         to this run
+        :param result: Status of the test in this run"""
+
+        if self.default_status is not None:
+            assert self.default_status == result.default_expected
+        else:
+            self.default_status = result.default_expected
+
+        # Add this result to the list of results satisfying
+        # any condition in the list of updated results it matches
+        for (cond, values) in self.updated_expected:
+            if cond(run_info):
+                values.append(Result(run_info, result.status))
+                if result.status != cond.value:
+                    self.root.modified = True
+                break
+        else:
+            # We didn't find a previous value for this
+            self.new_expected.append(Result(run_info, result.status))
+            self.root.modified = True
+
+    def coalesce_expected(self):
+        """Update the underlying manifest AST for this test based on all the
+        added results.
+
+        This will update existing conditionals if they got the same result in
+        all matching runs in the updated results, will delete existing conditionals
+        that get more than one different result in the updated run, and add new
+        conditionals for anything that doesn't match an existing conditional.
+
+        Conditionals not matched by any added result are not changed."""
+
+        final_conditionals = []
+
+        try:
+            unconditional_status = self.get("expected")
+        except KeyError:
+            unconditional_status = self.default_status
+
+        for conditional_value, results in self.updated_expected:
+            if not results:
+                # The conditional didn't match anything in these runs so leave it alone
+                final_conditionals.append(conditional_value)
+            elif all(results[0].status == result.status for result in results):
+                # All the new values for this conditional matched, so update the node
+                result = results[0]
+                if (result.status == unconditional_status and
+                    conditional_value.condition_node is not None):
+                    if "expected" in self:
+                        self.remove_value("expected", conditional_value)
+                else:
+                    conditional_value.value = result.status
+                    final_conditionals.append(conditional_value)
+            elif conditional_value.condition_node is not None:
+                # Blow away the existing condition and rebuild from scratch
+                # This isn't sure to work if we have a conditional later that matches
+                # these values too, but we can hope, verify that we get the results
+                # we expect, and if not let a human sort it out
+                self.remove_value("expected", conditional_value)
+                self.new_expected.extend(results)
+            elif conditional_value.condition_node is None:
+                self.new_expected.extend(result for result in results
+                                         if result.status != unconditional_status)
+
+        # It is an invariant that nothing in new_expected matches an existing
+        # condition except for the default condition
+
+        if self.new_expected:
+            if all(self.new_expected[0].status == result.status
+                   for result in self.new_expected) and not self.updated_expected:
+                status = self.new_expected[0].status
+                if status != self.default_status:
+                    self.set("expected", status, condition=None)
+                    final_conditionals.append(self._data["expected"][-1])
+            else:
+                try:
+                    conditionals = group_conditionals(
+                        self.new_expected,
+                        property_order=self.root.property_order,
+                        boolean_properties=self.root.boolean_properties)
+                except ConditionError:
+                    print "Conflicting test results for %s, cannot update" % self.root.test_path
+                    return
+                for conditional_node, status in conditionals:
+                    if status != unconditional_status:
+                        self.set("expected", status, condition=conditional_node.children[0])
+                        final_conditionals.append(self._data["expected"][-1])
+
+        if ("expected" in self._data and
+            len(self._data["expected"]) > 0 and
+            self._data["expected"][-1].condition_node is None and
+            self._data["expected"][-1].value == self.default_status):
+
+            self.remove_value("expected", self._data["expected"][-1])
+
+        if ("expected" in self._data and
+            len(self._data["expected"]) == 0):
+            for child in self.node.children:
+                if (isinstance(child, KeyValueNode) and
+                    child.data == "expected"):
+                    child.remove()
+                    break
+
+    def _add_key_value(self, node, values):
+        ManifestItem._add_key_value(self, node, values)
+        if node.data == "expected":
+            self.updated_expected = []
+            for value in values:
+                self.updated_expected.append((value, []))
+
+    def clear_expected(self):
+        """Clear all the expected data for this test and all of its subtests"""
+
+        self.updated_expected = []
+        if "expected" in self._data:
+            for child in self.node.children:
+                if (isinstance(child, KeyValueNode) and
+                    child.data == "expected"):
+                    child.remove()
+                    del self._data["expected"]
+                    break
+
+        for subtest in self.subtests.itervalues():
+            subtest.clear_expected()
+
+    def append(self, node):
+        child = ManifestItem.append(self, node)
+        self.subtests[child.name] = child
+
+    def get_subtest(self, name):
+        """Return a SubtestNode corresponding to a particular subtest of
+        the current test, creating a new one if no subtest with that name
+        already exists.
+
+        :param name: Name of the subtest"""
+
+        if name in self.subtests:
+            return self.subtests[name]
+        else:
+            subtest = SubtestNode.create(name)
+            self.append(subtest)
+            return subtest
+
+
+class SubtestNode(TestNode):
+    def __init__(self, node):
+        assert isinstance(node, DataNode)
+        TestNode.__init__(self, node)
+
+    @classmethod
+    def create(cls, name):
+        node = DataNode(name)
+        self = cls(node)
+        return self
+
+    @property
+    def is_empty(self):
+        if self._data:
+            return False
+        return True
+
+
+def group_conditionals(values, property_order=None, boolean_properties=None):
+    """Given a list of Result objects, return a list of
+    (conditional_node, status) pairs representing the conditional
+    expressions that are required to match each status
+
+    :param values: List of Results
+    :param property_order: List of properties to use in expectation metadata
+                           from most to least significant.
+    :param boolean_properties: Set of properties in property_order that should
+                               be treated as boolean."""
+
+    by_property = defaultdict(set)
+    for run_info, status in values:
+        for prop_name, prop_value in run_info.iteritems():
+            by_property[(prop_name, prop_value)].add(status)
+
+    if property_order is None:
+        property_order = ["debug", "os", "version", "processor", "bits"]
+
+    if boolean_properties is None:
+        boolean_properties = set(["debug"])
+    else:
+        boolean_properties = set(boolean_properties)
+
+    # If we have more than one value, remove any properties that are common
+    # for all the values
+    if len(values) > 1:
+        for key, statuses in by_property.copy().iteritems():
+            if len(statuses) == len(values):
+                del by_property[key]
+        if not by_property:
+            raise ConditionError
+
+    properties = set(item[0] for item in by_property.iterkeys())
+    include_props = []
+
+    for prop in property_order:
+        if prop in properties:
+            include_props.append(prop)
+
+    conditions = {}
+
+    for run_info, status in values:
+        prop_set = tuple((prop, run_info[prop]) for prop in include_props)
+        if prop_set in conditions:
+            continue
+
+        expr = make_expr(prop_set, status, boolean_properties=boolean_properties)
+        conditions[prop_set] = (expr, status)
+
+    return conditions.values()
+
+
+def make_expr(prop_set, status, boolean_properties=None):
+    """Create an AST that returns the value ``status`` given all the
+    properties in prop_set match.
+
+    :param prop_set: tuple of (property name, value) pairs for each
+                     property in this expression and the value it must match
+    :param status: Status on RHS when all the given properties match
+    :param boolean_properties: Set of properties in property_order that should
+                               be treated as boolean.
+    """
+    root = ConditionalNode()
+
+    assert len(prop_set) > 0
+
+    expressions = []
+    for prop, value in prop_set:
+        number_types = (int, float, long)
+        value_cls = (NumberNode
+                     if type(value) in number_types
+                     else StringNode)
+        if prop not in boolean_properties:
+            expressions.append(
+                BinaryExpressionNode(
+                    BinaryOperatorNode("=="),
+                    VariableNode(prop),
+                    value_cls(unicode(value))
+                ))
+        else:
+            if value:
+                expressions.append(VariableNode(prop))
+            else:
+                expressions.append(
+                    UnaryExpressionNode(
+                        UnaryOperatorNode("not"),
+                        VariableNode(prop)
+                    ))
+    if len(expressions) > 1:
+        prev = expressions[-1]
+        for curr in reversed(expressions[:-1]):
+            node = BinaryExpressionNode(
+                BinaryOperatorNode("and"),
+                curr,
+                prev)
+            prev = node
+    else:
+        node = expressions[0]
+
+    root.append(node)
+    root.append(StringNode(status))
+
+    return root
+
+
+def get_manifest(metadata_root, test_path, url_base, property_order=None,
+                 boolean_properties=None):
+    """Get the ExpectedManifest for a particular test path, or None if there is no
+    metadata stored for that test path.
+
+    :param metadata_root: Absolute path to the root of the metadata directory
+    :param test_path: Path to the test(s) relative to the test root
+    :param url_base: Base url for serving the tests in this manifest
+    :param property_order: List of properties to use in expectation metadata
+                           from most to least significant.
+    :param boolean_properties: Set of properties in property_order that should
+                               be treated as boolean."""
+    manifest_path = expected.expected_path(metadata_root, test_path)
+    try:
+        with open(manifest_path) as f:
+            return compile(f, test_path, url_base, property_order=property_order,
+                           boolean_properties=boolean_properties)
+    except IOError:
+        return None
+
+
+def compile(manifest_file, test_path, url_base, property_order=None,
+            boolean_properties=None):
+    return conditional.compile(manifest_file,
+                               data_cls_getter=data_cls_getter,
+                               test_path=test_path,
+                               url_base=url_base,
+                               property_order=property_order,
+                               boolean_properties=boolean_properties)
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/metadata.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/metadata.py
new file mode 100644
index 0000000..73f266f
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/metadata.py
@@ -0,0 +1,351 @@
+import os
+import shutil
+import sys
+import tempfile
+import types
+import uuid
+from collections import defaultdict
+
+from mozlog import reader
+from mozlog import structuredlog
+
+import expected
+import manifestupdate
+import testloader
+import wptmanifest
+import wpttest
+from vcs import git
+manifest = None  # Module that will be imported relative to test_root
+manifestitem = None
+
+logger = structuredlog.StructuredLogger("web-platform-tests")
+
+
+def load_test_manifests(serve_root, test_paths):
+    do_delayed_imports(serve_root)
+    manifest_loader = testloader.ManifestLoader(test_paths, False)
+    return manifest_loader.load()
+
+
+def update_expected(test_paths, serve_root, log_file_names,
+                    rev_old=None, rev_new="HEAD", ignore_existing=False,
+                    sync_root=None, property_order=None, boolean_properties=None):
+    """Update the metadata files for web-platform-tests based on
+    the results obtained in a previous run"""
+
+    manifests = load_test_manifests(serve_root, test_paths)
+
+    change_data = {}
+
+    if sync_root is not None:
+        if rev_old is not None:
+            rev_old = git("rev-parse", rev_old, repo=sync_root).strip()
+        rev_new = git("rev-parse", rev_new, repo=sync_root).strip()
+
+        if rev_old is not None:
+            change_data = load_change_data(rev_old, rev_new, repo=sync_root)
+
+
+    expected_map_by_manifest = update_from_logs(manifests,
+                                                *log_file_names,
+                                                ignore_existing=ignore_existing,
+                                                property_order=property_order,
+                                                boolean_properties=boolean_properties)
+
+    for test_manifest, expected_map in expected_map_by_manifest.iteritems():
+        url_base = manifests[test_manifest]["url_base"]
+        metadata_path = test_paths[url_base]["metadata_path"]
+        write_changes(metadata_path, expected_map)
+
+    results_changed = [item.test_path for item in expected_map.itervalues() if item.modified]
+
+    return unexpected_changes(manifests, change_data, results_changed)
+
+
+def do_delayed_imports(serve_root):
+    global manifest, manifestitem
+    from manifest import manifest, item as manifestitem
+
+
+def files_in_repo(repo_root):
+    return git("ls-tree", "-r", "--name-only", "HEAD").split("\n")
+
+
+def rev_range(rev_old, rev_new, symmetric=False):
+    joiner = ".." if not symmetric else "..."
+    return "".join([rev_old, joiner, rev_new])
+
+
+def paths_changed(rev_old, rev_new, repo):
+    data = git("diff", "--name-status", rev_range(rev_old, rev_new), repo=repo)
+    lines = [tuple(item.strip() for item in line.strip().split("\t", 1))
+             for line in data.split("\n") if line.strip()]
+    output = set(lines)
+    return output
+
+
+def load_change_data(rev_old, rev_new, repo):
+    changes = paths_changed(rev_old, rev_new, repo)
+    rv = {}
+    status_keys = {"M": "modified",
+                   "A": "new",
+                   "D": "deleted"}
+    # TODO: deal with renames
+    for item in changes:
+        rv[item[1]] = status_keys[item[0]]
+    return rv
+
+
+def unexpected_changes(manifests, change_data, files_changed):
+    files_changed = set(files_changed)
+
+    root_manifest = None
+    for manifest, paths in manifests.iteritems():
+        if paths["url_base"] == "/":
+            root_manifest = manifest
+            break
+    else:
+        return []
+
+    rv = []
+
+    return [fn for _, fn, _ in root_manifest if fn in files_changed and change_data.get(fn) != "M"]
+
+# For each testrun
+# Load all files and scan for the suite_start entry
+# Build a hash of filename: properties
+# For each different set of properties, gather all chunks
+# For each chunk in the set of chunks, go through all tests
+# for each test, make a map of {conditionals: [(platform, new_value)]}
+# Repeat for each platform
+# For each test in the list of tests:
+#   for each conditional:
+#      If all the new values match (or there aren't any) retain that conditional
+#      If any new values mismatch mark the test as needing human attention
+#   Check if all the RHS values are the same; if so collapse the conditionals
+
+
+def update_from_logs(manifests, *log_filenames, **kwargs):
+    ignore_existing = kwargs.get("ignore_existing", False)
+    property_order = kwargs.get("property_order")
+    boolean_properties = kwargs.get("boolean_properties")
+
+    expected_map = {}
+    id_test_map = {}
+
+    for test_manifest, paths in manifests.iteritems():
+        expected_map_manifest, id_path_map_manifest = create_test_tree(
+            paths["metadata_path"],
+            test_manifest,
+            property_order=property_order,
+            boolean_properties=boolean_properties)
+        expected_map[test_manifest] = expected_map_manifest
+        id_test_map.update(id_path_map_manifest)
+
+    updater = ExpectedUpdater(manifests, expected_map, id_test_map,
+                              ignore_existing=ignore_existing)
+    for log_filename in log_filenames:
+        with open(log_filename) as f:
+            updater.update_from_log(f)
+
+    for manifest_expected in expected_map.itervalues():
+        for tree in manifest_expected.itervalues():
+            for test in tree.iterchildren():
+                for subtest in test.iterchildren():
+                    subtest.coalesce_expected()
+                test.coalesce_expected()
+
+    return expected_map
+
+def directory_manifests(metadata_path):
+    rv = []
+    for dirpath, dirname, filenames in os.walk(metadata_path):
+        if "__dir__.ini" in filenames:
+            rel_path = os.path.relpath(dirpath, metadata_path)
+            rv.append(os.path.join(rel_path, "__dir__.ini"))
+    return rv
+
+def write_changes(metadata_path, expected_map):
+    # First write the new manifest files to a temporary directory
+    temp_path = tempfile.mkdtemp(dir=os.path.split(metadata_path)[0])
+    write_new_expected(temp_path, expected_map)
+
+    # Keep all __dir__.ini files (these are not in expected_map because they
+    # aren't associated with a specific test)
+    keep_files = directory_manifests(metadata_path)
+
+    # Copy all files in the root to the temporary location since
+    # these cannot be ini files
+    keep_files.extend(item for item in os.listdir(metadata_path) if
+                      not os.path.isdir(os.path.join(metadata_path, item)))
+
+    for item in keep_files:
+        dest_dir = os.path.dirname(os.path.join(temp_path, item))
+        if not os.path.exists(dest_dir):
+            os.makedirs(dest_dir)
+        shutil.copyfile(os.path.join(metadata_path, item),
+                        os.path.join(temp_path, item))
+
+    # Then move the old manifest files to a new location
+    temp_path_2 = metadata_path + str(uuid.uuid4())
+    os.rename(metadata_path, temp_path_2)
+    # Move the new files to the destination location and remove the old files
+    os.rename(temp_path, metadata_path)
+    shutil.rmtree(temp_path_2)
+
+
+def write_new_expected(metadata_path, expected_map):
+    # Serialize the data back to a file
+    for tree in expected_map.itervalues():
+        if not tree.is_empty:
+            manifest_str = wptmanifest.serialize(tree.node, skip_empty_data=True)
+            assert manifest_str != ""
+            path = expected.expected_path(metadata_path, tree.test_path)
+            dir = os.path.split(path)[0]
+            if not os.path.exists(dir):
+                os.makedirs(dir)
+            with open(path, "wb") as f:
+                f.write(manifest_str)
+
+
+class ExpectedUpdater(object):
+    def __init__(self, test_manifests, expected_tree, id_path_map, ignore_existing=False):
+        self.test_manifests = test_manifests
+        self.expected_tree = expected_tree
+        self.id_path_map = id_path_map
+        self.ignore_existing = ignore_existing
+        self.run_info = None
+        self.action_map = {"suite_start": self.suite_start,
+                           "test_start": self.test_start,
+                           "test_status": self.test_status,
+                           "test_end": self.test_end}
+        self.tests_visited = {}
+
+        self.test_cache = {}
+
+    def update_from_log(self, log_file):
+        self.run_info = None
+        log_reader = reader.read(log_file)
+        reader.each_log(log_reader, self.action_map)
+
+    def suite_start(self, data):
+        self.run_info = data["run_info"]
+
+    def test_id(self, id):
+        if type(id) in types.StringTypes:
+            return id
+        else:
+            return tuple(id)
+
+    def test_start(self, data):
+        test_id = self.test_id(data["test"])
+        try:
+            test_manifest, test = self.id_path_map[test_id]
+            expected_node = self.expected_tree[test_manifest][test].get_test(test_id)
+        except KeyError:
+            print "Test not found %s, skipping" % test_id
+            return
+        self.test_cache[test_id] = expected_node
+
+        if test_id not in self.tests_visited:
+            if self.ignore_existing:
+                expected_node.clear_expected()
+            self.tests_visited[test_id] = set()
+
+    def test_status(self, data):
+        test_id = self.test_id(data["test"])
+        test = self.test_cache.get(test_id)
+        if test is None:
+            return
+        test_cls = wpttest.manifest_test_cls[test.test_type]
+
+        subtest = test.get_subtest(data["subtest"])
+
+        self.tests_visited[test.id].add(data["subtest"])
+
+        result = test_cls.subtest_result_cls(
+            data["subtest"],
+            data["status"],
+            data.get("message"))
+
+        subtest.set_result(self.run_info, result)
+
+    def test_end(self, data):
+        test_id = self.test_id(data["test"])
+        test = self.test_cache.get(test_id)
+        if test is None:
+            return
+        test_cls = wpttest.manifest_test_cls[test.test_type]
+
+        if data["status"] == "SKIP":
+            return
+
+        result = test_cls.result_cls(
+            data["status"],
+            data.get("message"))
+
+        test.set_result(self.run_info, result)
+        del self.test_cache[test_id]
+
+
+def create_test_tree(metadata_path, test_manifest, property_order=None,
+                     boolean_properties=None):
+    expected_map = {}
+    id_test_map = {}
+    exclude_types = frozenset(["stub", "helper", "manual", "support", "conformancechecker"])
+    all_types = [item.item_type for item in manifestitem.__dict__.itervalues()
+                 if type(item) == type and
+                 issubclass(item, manifestitem.ManifestItem) and
+                 item.item_type is not None]
+    include_types = set(all_types) - exclude_types
+    for _, test_path, tests in test_manifest.itertypes(*include_types):
+        expected_data = load_expected(test_manifest, metadata_path, test_path, tests,
+                                      property_order=property_order,
+                                      boolean_properties=boolean_properties)
+        if expected_data is None:
+            expected_data = create_expected(test_manifest,
+                                            test_path,
+                                            tests,
+                                            property_order=property_order,
+                                            boolean_properties=boolean_properties)
+
+        for test in tests:
+            id_test_map[test.id] = (test_manifest, test)
+            expected_map[test] = expected_data
+
+    return expected_map, id_test_map
+
+
+def create_expected(test_manifest, test_path, tests, property_order=None,
+                    boolean_properties=None):
+    expected = manifestupdate.ExpectedManifest(None, test_path, test_manifest.url_base,
+                                               property_order=property_order,
+                                               boolean_properties=boolean_properties)
+    for test in tests:
+        expected.append(manifestupdate.TestNode.create(test.item_type, test.id))
+    return expected
+
+
+def load_expected(test_manifest, metadata_path, test_path, tests, property_order=None,
+                  boolean_properties=None):
+    expected_manifest = manifestupdate.get_manifest(metadata_path,
+                                                    test_path,
+                                                    test_manifest.url_base,
+                                                    property_order=property_order,
+                                                    boolean_properties=boolean_properties)
+    if expected_manifest is None:
+        return
+
+    tests_by_id = {item.id: item for item in tests}
+
+    # Remove expected data for tests that no longer exist
+    for test in expected_manifest.iterchildren():
+        if not test.id in tests_by_id:
+            test.remove()
+
+    # Add tests that don't have expected data
+    for test in tests:
+        if not expected_manifest.has_test(test.id):
+            expected_manifest.append(manifestupdate.TestNode.create(test.item_type, test.id))
+
+    return expected_manifest
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/products.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/products.py
new file mode 100644
index 0000000..c077f95
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/products.py
@@ -0,0 +1,69 @@
+import os
+import importlib
+import imp
+
+from .browsers import product_list
+
+def products_enabled(config):
+    names = config.get("products", {}).keys()
+    if not names:
+        return product_list
+    else:
+        return names
+
+def product_module(config, product):
+    here = os.path.join(os.path.split(__file__)[0])
+    product_dir = os.path.join(here, "browsers")
+
+    if product not in products_enabled(config):
+        raise ValueError("Unknown product %s" % product)
+
+    path = config.get("products", {}).get(product, None)
+    if path:
+        module = imp.load_source('wptrunner.browsers.' + product, path)
+    else:
+        module = importlib.import_module("wptrunner.browsers." + product)
+
+    if not hasattr(module, "__wptrunner__"):
+        raise ValueError("Product module does not define __wptrunner__ variable")
+
+    return module
+
+
+def load_product(config, product):
+    module = product_module(config, product)
+    data = module.__wptrunner__
+
+    check_args = getattr(module, data["check_args"])
+    browser_cls = getattr(module, data["browser"])
+    browser_kwargs = getattr(module, data["browser_kwargs"])
+    executor_kwargs = getattr(module, data["executor_kwargs"])
+    env_options = getattr(module, data["env_options"])()
+    env_extras = getattr(module, data["env_extras"])
+    run_info_extras = (getattr(module, data["run_info_extras"])
+                       if "run_info_extras" in data else lambda **kwargs:{})
+
+    executor_classes = {}
+    for test_type, cls_name in data["executor"].iteritems():
+        cls = getattr(module, cls_name)
+        executor_classes[test_type] = cls
+
+    return (check_args,
+            browser_cls, browser_kwargs,
+            executor_classes, executor_kwargs,
+            env_options, env_extras, run_info_extras)
+
+
+def load_product_update(config, product):
+    """Return tuple of (property_order, boolean_properties) indicating the
+    run_info properties to use when constructing the expectation data for
+    this product. None for either key indicates that the default keys
+    appropriate for distinguishing based on platform will be used."""
+
+    module = product_module(config, product)
+    data = module.__wptrunner__
+
+    update_properties = (getattr(module, data["update_properties"])()
+                         if "update_properties" in data else (None, None))
+
+    return update_properties
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/reduce.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/reduce.py
new file mode 100644
index 0000000..d245ee3
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/reduce.py
@@ -0,0 +1,193 @@
+import sys
+import tempfile
+from cStringIO import StringIO
+from collections import defaultdict
+
+import wptrunner
+import wpttest
+
+from mozlog import commandline, reader
+
+logger = None
+
+
+def setup_logging(args, defaults):
+    global logger
+    logger = commandline.setup_logging("web-platform-tests-unstable", args, defaults)
+    wptrunner.setup_stdlib_logger()
+
+    for name in args.keys():
+        if name.startswith("log_"):
+            args.pop(name)
+
+    return logger
+
+
+def group(items, size):
+    rv = []
+    i = 0
+    while i < len(items):
+        rv.append(items[i:i + size])
+        i += size
+
+    return rv
+
+
+def next_power_of_two(num):
+    rv = 1
+    while rv < num:
+        rv = rv << 1
+    return rv
+
+
+class Reducer(object):
+    def __init__(self, target, **kwargs):
+        self.target = target
+
+        self.test_type = kwargs["test_types"][0]
+        run_info = wpttest.get_run_info(kwargs["metadata_root"],
+                                        kwargs["product"],
+                                        debug=False)
+        test_filter = wptrunner.TestFilter(include=kwargs["include"])
+        self.test_loader = wptrunner.TestLoader(kwargs["tests_root"],
+                                                kwargs["metadata_root"],
+                                                [self.test_type],
+                                                run_info,
+                                                manifest_filer=test_filter)
+        if kwargs["repeat"] == 1:
+            logger.critical("Need to specify --repeat with more than one repetition")
+            sys.exit(1)
+        self.kwargs = kwargs
+
+    def run(self):
+        all_tests = self.get_initial_tests()
+
+        tests = all_tests[:-1]
+        target_test = [all_tests[-1]]
+
+        if self.unstable(target_test):
+            return target_test
+
+        if not self.unstable(all_tests):
+            return []
+
+        chunk_size = next_power_of_two(int(len(tests) / 2))
+        logger.debug("Using chunk size %i" % chunk_size)
+
+        while chunk_size >= 1:
+            logger.debug("%i tests remain" % len(tests))
+            chunks = group(tests, chunk_size)
+            chunk_results = [None] * len(chunks)
+
+            for i, chunk in enumerate(chunks):
+                logger.debug("Running chunk %i/%i of size %i" % (i + 1, len(chunks), chunk_size))
+                trial_tests = []
+                chunk_str = ""
+                for j, inc_chunk in enumerate(chunks):
+                    if i != j and chunk_results[j] in (None, False):
+                        chunk_str += "+"
+                        trial_tests.extend(inc_chunk)
+                    else:
+                        chunk_str += "-"
+                logger.debug("Using chunks %s" % chunk_str)
+                trial_tests.extend(target_test)
+
+                chunk_results[i] = self.unstable(trial_tests)
+
+                # if i == len(chunks) - 2 and all(item is False for item in chunk_results[:-1]):
+                # Dangerous? optimisation that if you got stability for 0..N-1 chunks
+                # it must be unstable with the Nth chunk
+                #     chunk_results[i+1] = True
+                #     continue
+
+            new_tests = []
+            keep_str = ""
+            for result, chunk in zip(chunk_results, chunks):
+                if not result:
+                    keep_str += "+"
+                    new_tests.extend(chunk)
+                else:
+                    keep_str += "-"
+
+            logger.debug("Keeping chunks %s" % keep_str)
+
+            tests = new_tests
+
+            chunk_size = int(chunk_size / 2)
+
+        return tests + target_test
+
+    def unstable(self, tests):
+        logger.debug("Running with %i tests" % len(tests))
+
+        self.test_loader.tests = {self.test_type: tests}
+
+        stdout, stderr = sys.stdout, sys.stderr
+        sys.stdout = StringIO()
+        sys.stderr = StringIO()
+
+        with tempfile.NamedTemporaryFile() as f:
+            args = self.kwargs.copy()
+            args["log_raw"] = [f]
+            args["capture_stdio"] = False
+            wptrunner.setup_logging(args, {})
+            wptrunner.run_tests(test_loader=self.test_loader, **args)
+            wptrunner.logger.remove_handler(wptrunner.logger.handlers[0])
+            is_unstable = self.log_is_unstable(f)
+
+            sys.stdout, sys.stderr = stdout, stderr
+
+        logger.debug("Result was unstable with chunk removed"
+                     if is_unstable else "stable")
+
+        return is_unstable
+
+    def log_is_unstable(self, log_f):
+        log_f.seek(0)
+
+        statuses = defaultdict(set)
+
+        def handle_status(item):
+            if item["test"] == self.target:
+                statuses[item["subtest"]].add(item["status"])
+
+        def handle_end(item):
+            if item["test"] == self.target:
+                statuses[None].add(item["status"])
+
+        reader.each_log(reader.read(log_f),
+                        {"test_status": handle_status,
+                         "test_end": handle_end})
+
+        logger.debug(str(statuses))
+
+        if not statuses:
+            logger.error("Didn't get any useful output from wptrunner")
+            log_f.seek(0)
+            for item in reader.read(log_f):
+                logger.debug(item)
+            return None
+
+        return any(len(item) > 1 for item in statuses.itervalues())
+
+    def get_initial_tests(self):
+        # Need to pass in arguments
+
+        all_tests = self.test_loader.tests[self.test_type]
+        tests = []
+        for item in all_tests:
+            tests.append(item)
+            if item.url == self.target:
+                break
+
+        logger.debug("Starting with tests: %s" % ("\n".join(item.id for item in tests)))
+
+        return tests
+
+
+def do_reduce(**kwargs):
+    target = kwargs.pop("target")
+    reducer = Reducer(target, **kwargs)
+
+    unstable_set = reducer.run()
+    return unstable_set
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/testharness_runner.html b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/testharness_runner.html
new file mode 100644
index 0000000..1cc80a2
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/testharness_runner.html
@@ -0,0 +1,6 @@
+<!doctype html>
+<title></title>
+<script>
+var timeout_multiplier = 1;
+var win = null;
+</script>
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/testharnessreport-servo.js b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/testharnessreport-servo.js
new file mode 100644
index 0000000..b672aea
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/testharnessreport-servo.js
@@ -0,0 +1,17 @@
+var props = {output:%(output)d};
+var start_loc = document.createElement('a');
+start_loc.href = location.href;
+setup(props);
+
+add_completion_callback(function (tests, harness_status) {
+    var id = start_loc.pathname + start_loc.search + start_loc.hash;
+    console.log("ALERT: RESULT: " + JSON.stringify([
+        id,
+        harness_status.status,
+        harness_status.message,
+        harness_status.stack,
+        tests.map(function(t) {
+            return [t.name, t.status, t.message, t.stack]
+        }),
+    ]));
+});
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/testharnessreport-servodriver.js b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/testharnessreport-servodriver.js
new file mode 100644
index 0000000..c888413
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/testharnessreport-servodriver.js
@@ -0,0 +1,23 @@
+setup({output:%(output)d});
+
+add_completion_callback(function() {
+    add_completion_callback(function (tests, status) {
+        var subtest_results = tests.map(function(x) {
+            return [x.name, x.status, x.message, x.stack]
+        });
+        var id = location.pathname + location.search + location.hash;
+        var results = JSON.stringify([id,
+                                      status.status,
+                                      status.message,
+                                      status.stack,
+                                      subtest_results]);
+        (function done() {
+            if (window.__wd_results_callback__) {
+                clearTimeout(__wd_results_timer__);
+                __wd_results_callback__(results)
+            } else {
+                setTimeout(done, 20);
+            }
+        })()
+    })
+});
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/testharnessreport.js b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/testharnessreport.js
new file mode 100644
index 0000000..62ddaff
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/testharnessreport.js
@@ -0,0 +1,13 @@
+var props = {output:%(output)d,
+             explicit_timeout: true,
+             message_events: ["completion"]};
+
+if (window.opener && "timeout_multiplier" in window.opener) {
+    props["timeout_multiplier"] = window.opener.timeout_multiplier;
+}
+
+if (window.opener && window.opener.explicit_timeout) {
+    props["explicit_timeout"] = window.opener.explicit_timeout;
+}
+
+setup(props);
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/testloader.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/testloader.py
new file mode 100644
index 0000000..91392fe
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/testloader.py
@@ -0,0 +1,640 @@
+import hashlib
+import json
+import os
+import urlparse
+from abc import ABCMeta, abstractmethod
+from Queue import Empty
+from collections import defaultdict, OrderedDict, deque
+from multiprocessing import Queue
+
+import manifestinclude
+import manifestexpected
+import wpttest
+from mozlog import structured
+
+manifest = None
+manifest_update = None
+
+def do_delayed_imports():
+    # This relies on an already loaded module having set the sys.path correctly :(
+    global manifest, manifest_update
+    from manifest import manifest
+    from manifest import update as manifest_update
+
+class TestChunker(object):
+    def __init__(self, total_chunks, chunk_number):
+        self.total_chunks = total_chunks
+        self.chunk_number = chunk_number
+        assert self.chunk_number <= self.total_chunks
+        self.logger = structured.get_default_logger()
+        assert self.logger
+
+    def __call__(self, manifest):
+        raise NotImplementedError
+
+
+class Unchunked(TestChunker):
+    def __init__(self, *args, **kwargs):
+        TestChunker.__init__(self, *args, **kwargs)
+        assert self.total_chunks == 1
+
+    def __call__(self, manifest):
+        for item in manifest:
+            yield item
+
+
+class HashChunker(TestChunker):
+    def __call__(self, manifest):
+        chunk_index = self.chunk_number - 1
+        for test_type, test_path, tests in manifest:
+            h = int(hashlib.md5(test_path).hexdigest(), 16)
+            if h % self.total_chunks == chunk_index:
+                yield test_type, test_path, tests
+
+
+class DirectoryHashChunker(TestChunker):
+    """Like HashChunker except the directory is hashed.
+
+    This ensures that all tests in the same directory end up in the same
+    chunk.
+    """
+    def __call__(self, manifest):
+        chunk_index = self.chunk_number - 1
+        for test_type, test_path, tests in manifest:
+            h = int(hashlib.md5(os.path.dirname(test_path)).hexdigest(), 16)
+            if h % self.total_chunks == chunk_index:
+                yield test_type, test_path, tests
+
+
+class EqualTimeChunker(TestChunker):
+    def _group_by_directory(self, manifest_items):
+        """Split the list of manifest items into a ordered dict that groups tests in
+        so that anything in the same subdirectory beyond a depth of 3 is in the same
+        group. So all tests in a/b/c, a/b/c/d and a/b/c/e will be grouped together
+        and separate to tests in a/b/f
+
+        Returns: tuple (ordered dict of {test_dir: PathData}, total estimated runtime)
+        """
+
+        class PathData(object):
+            def __init__(self, path):
+                self.path = path
+                self.time = 0
+                self.tests = []
+
+        by_dir = OrderedDict()
+        total_time = 0
+
+        for i, (test_type, test_path, tests) in enumerate(manifest_items):
+            test_dir = tuple(os.path.split(test_path)[0].split(os.path.sep)[:3])
+
+            if not test_dir in by_dir:
+                by_dir[test_dir] = PathData(test_dir)
+
+            data = by_dir[test_dir]
+            time = sum(test.default_timeout if test.timeout !=
+                       "long" else test.long_timeout for test in tests)
+            data.time += time
+            total_time += time
+            data.tests.append((test_type, test_path, tests))
+
+        return by_dir, total_time
+
+    def _maybe_remove(self, chunks, i, direction):
+        """Trial removing a chunk from one chunk to an adjacent one.
+
+        :param chunks: - the list of all chunks
+        :param i: - the chunk index in the list of chunks to try removing from
+        :param direction: either "next" if we are going to move from the end to
+                          the subsequent chunk, or "prev" if we are going to move
+                          from the start into the previous chunk.
+
+        :returns bool: Did a chunk get moved?"""
+        source_chunk = chunks[i]
+        if direction == "next":
+            target_chunk = chunks[i+1]
+            path_index = -1
+            move_func = lambda: target_chunk.appendleft(source_chunk.pop())
+        elif direction == "prev":
+            target_chunk = chunks[i-1]
+            path_index = 0
+            move_func = lambda: target_chunk.append(source_chunk.popleft())
+        else:
+            raise ValueError("Unexpected move direction %s" % direction)
+
+        return self._maybe_move(source_chunk, target_chunk, path_index, move_func)
+
+    def _maybe_add(self, chunks, i, direction):
+        """Trial adding a chunk from one chunk to an adjacent one.
+
+        :param chunks: - the list of all chunks
+        :param i: - the chunk index in the list of chunks to try adding to
+        :param direction: either "next" if we are going to remove from the
+                          the subsequent chunk, or "prev" if we are going to remove
+                          from the the previous chunk.
+
+        :returns bool: Did a chunk get moved?"""
+        target_chunk = chunks[i]
+        if direction == "next":
+            source_chunk = chunks[i+1]
+            path_index = 0
+            move_func = lambda: target_chunk.append(source_chunk.popleft())
+        elif direction == "prev":
+            source_chunk = chunks[i-1]
+            path_index = -1
+            move_func = lambda: target_chunk.appendleft(source_chunk.pop())
+        else:
+            raise ValueError("Unexpected move direction %s" % direction)
+
+        return self._maybe_move(source_chunk, target_chunk, path_index, move_func)
+
+    def _maybe_move(self, source_chunk, target_chunk, path_index, move_func):
+        """Move from one chunk to another, assess the change in badness,
+        and keep the move iff it decreases the badness score.
+
+        :param source_chunk: chunk to move from
+        :param target_chunk: chunk to move to
+        :param path_index: 0 if we are moving from the start or -1 if we are moving from the
+                           end
+        :param move_func: Function that actually moves between chunks"""
+        if len(source_chunk.paths) <= 1:
+            return False
+
+        move_time = source_chunk.paths[path_index].time
+
+        new_source_badness = self._badness(source_chunk.time - move_time)
+        new_target_badness = self._badness(target_chunk.time + move_time)
+
+        delta_badness = ((new_source_badness + new_target_badness) -
+                         (source_chunk.badness + target_chunk.badness))
+        if delta_badness < 0:
+            move_func()
+            return True
+
+        return False
+
+    def _badness(self, time):
+        """Metric of badness for a specific chunk
+
+        :param time: the time for a specific chunk"""
+        return (time - self.expected_time)**2
+
+    def _get_chunk(self, manifest_items):
+        by_dir, total_time = self._group_by_directory(manifest_items)
+
+        if len(by_dir) < self.total_chunks:
+            raise ValueError("Tried to split into %i chunks, but only %i subdirectories included" % (
+                self.total_chunks, len(by_dir)))
+
+        self.expected_time = float(total_time) / self.total_chunks
+
+        chunks = self._create_initial_chunks(by_dir)
+
+        while True:
+            # Move a test from one chunk to the next until doing so no longer
+            # reduces the badness
+            got_improvement = self._update_chunks(chunks)
+            if not got_improvement:
+                break
+
+        self.logger.debug(self.expected_time)
+        for i, chunk in chunks.iteritems():
+            self.logger.debug("%i: %i, %i" % (i + 1, chunk.time, chunk.badness))
+
+        assert self._all_tests(by_dir) == self._chunked_tests(chunks)
+
+        return self._get_tests(chunks)
+
+    @staticmethod
+    def _all_tests(by_dir):
+        """Return a set of all tests in the manifest from a grouping by directory"""
+        return set(x[0] for item in by_dir.itervalues()
+                   for x in item.tests)
+
+    @staticmethod
+    def _chunked_tests(chunks):
+        """Return a set of all tests in the manifest from the chunk list"""
+        return set(x[0] for chunk in chunks.itervalues()
+                   for path in chunk.paths
+                   for x in path.tests)
+
+
+    def _create_initial_chunks(self, by_dir):
+        """Create an initial unbalanced list of chunks.
+
+        :param by_dir: All tests in the manifest grouped by subdirectory
+        :returns list: A list of Chunk objects"""
+
+        class Chunk(object):
+            def __init__(self, paths, index):
+                """List of PathData objects that together form a single chunk of
+                tests"""
+                self.paths = deque(paths)
+                self.time = sum(item.time for item in paths)
+                self.index = index
+
+            def appendleft(self, path):
+                """Add a PathData object to the start of the chunk"""
+                self.paths.appendleft(path)
+                self.time += path.time
+
+            def append(self, path):
+                """Add a PathData object to the end of the chunk"""
+                self.paths.append(path)
+                self.time += path.time
+
+            def pop(self):
+                """Remove PathData object from the end of the chunk"""
+                assert len(self.paths) > 1
+                self.time -= self.paths[-1].time
+                return self.paths.pop()
+
+            def popleft(self):
+                """Remove PathData object from the start of the chunk"""
+                assert len(self.paths) > 1
+                self.time -= self.paths[0].time
+                return self.paths.popleft()
+
+            @property
+            def badness(self_):
+                """Badness metric for this chunk"""
+                return self._badness(self_.time)
+
+        initial_size = len(by_dir) / self.total_chunks
+        chunk_boundaries = [initial_size * i
+                            for i in xrange(self.total_chunks)] + [len(by_dir)]
+
+        chunks = OrderedDict()
+        for i, lower in enumerate(chunk_boundaries[:-1]):
+            upper = chunk_boundaries[i + 1]
+            paths = by_dir.values()[lower:upper]
+            chunks[i] = Chunk(paths, i)
+
+        assert self._all_tests(by_dir) == self._chunked_tests(chunks)
+
+        return chunks
+
+    def _update_chunks(self, chunks):
+        """Run a single iteration of the chunk update algorithm.
+
+        :param chunks: - List of chunks
+        """
+        #TODO: consider replacing this with a heap
+        sorted_chunks = sorted(chunks.values(), key=lambda x:-x.badness)
+        got_improvement = False
+        for chunk in sorted_chunks:
+            if chunk.time < self.expected_time:
+                f = self._maybe_add
+            else:
+                f = self._maybe_remove
+
+            if chunk.index == 0:
+                order = ["next"]
+            elif chunk.index == self.total_chunks - 1:
+                order = ["prev"]
+            else:
+                if chunk.time < self.expected_time:
+                    # First try to add a test from the neighboring chunk with the
+                    # greatest total time
+                    if chunks[chunk.index + 1].time > chunks[chunk.index - 1].time:
+                        order = ["next", "prev"]
+                    else:
+                        order = ["prev", "next"]
+                else:
+                    # First try to remove a test and add to the neighboring chunk with the
+                    # lowest total time
+                    if chunks[chunk.index + 1].time > chunks[chunk.index - 1].time:
+                        order = ["prev", "next"]
+                    else:
+                        order = ["next", "prev"]
+
+            for direction in order:
+                if f(chunks, chunk.index, direction):
+                    got_improvement = True
+                    break
+
+            if got_improvement:
+                break
+
+        return got_improvement
+
+    def _get_tests(self, chunks):
+        """Return the list of tests corresponding to the chunk number we are running.
+
+        :param chunks: List of chunks"""
+        tests = []
+        for path in chunks[self.chunk_number - 1].paths:
+            tests.extend(path.tests)
+
+        return tests
+
+    def __call__(self, manifest_iter):
+        manifest = list(manifest_iter)
+        tests = self._get_chunk(manifest)
+        for item in tests:
+            yield item
+
+
+class TestFilter(object):
+    def __init__(self, test_manifests, include=None, exclude=None, manifest_path=None):
+        if manifest_path is not None and include is None:
+            self.manifest = manifestinclude.get_manifest(manifest_path)
+        else:
+            self.manifest = manifestinclude.IncludeManifest.create()
+
+        if include:
+            self.manifest.set("skip", "true")
+            for item in include:
+                self.manifest.add_include(test_manifests, item)
+
+        if exclude:
+            for item in exclude:
+                self.manifest.add_exclude(test_manifests, item)
+
+    def __call__(self, manifest_iter):
+        for test_type, test_path, tests in manifest_iter:
+            include_tests = set()
+            for test in tests:
+                if self.manifest.include(test):
+                    include_tests.add(test)
+
+            if include_tests:
+                yield test_type, test_path, include_tests
+
+class TagFilter(object):
+    def __init__(self, tags):
+        self.tags = set(tags)
+
+    def __call__(self, test_iter):
+        for test in test_iter:
+            if test.tags & self.tags:
+                yield test
+
+class ManifestLoader(object):
+    def __init__(self, test_paths, force_manifest_update=False):
+        do_delayed_imports()
+        self.test_paths = test_paths
+        self.force_manifest_update = force_manifest_update
+        self.logger = structured.get_default_logger()
+        if self.logger is None:
+            self.logger = structured.structuredlog.StructuredLogger("ManifestLoader")
+
+    def load(self):
+        rv = {}
+        for url_base, paths in self.test_paths.iteritems():
+            manifest_file = self.load_manifest(url_base=url_base,
+                                               **paths)
+            path_data = {"url_base": url_base}
+            path_data.update(paths)
+            rv[manifest_file] = path_data
+        return rv
+
+    def create_manifest(self, manifest_path, tests_path, url_base="/"):
+        self.update_manifest(manifest_path, tests_path, url_base, recreate=True)
+
+    def update_manifest(self, manifest_path, tests_path, url_base="/",
+                        recreate=False):
+        self.logger.info("Updating test manifest %s" % manifest_path)
+
+        json_data = None
+        if not recreate:
+            try:
+                with open(manifest_path) as f:
+                    json_data = json.load(f)
+            except IOError:
+                #If the existing file doesn't exist just create one from scratch
+                pass
+
+        if not json_data:
+            manifest_file = manifest.Manifest(url_base)
+        else:
+            try:
+                manifest_file = manifest.Manifest.from_json(tests_path, json_data)
+            except manifest.ManifestVersionMismatch:
+                manifest_file = manifest.Manifest(url_base)
+
+        manifest_update.update(tests_path, manifest_file, True)
+
+        manifest.write(manifest_file, manifest_path)
+
+    def load_manifest(self, tests_path, metadata_path, url_base="/"):
+        manifest_path = os.path.join(metadata_path, "MANIFEST.json")
+        if (not os.path.exists(manifest_path) or
+            self.force_manifest_update):
+            self.update_manifest(manifest_path, tests_path, url_base)
+        manifest_file = manifest.load(tests_path, manifest_path)
+        if manifest_file.url_base != url_base:
+            self.logger.info("Updating url_base in manifest from %s to %s" % (manifest_file.url_base,
+                                                                              url_base))
+            manifest_file.url_base = url_base
+            manifest.write(manifest_file, manifest_path)
+
+        return manifest_file
+
+def iterfilter(filters, iter):
+    for f in filters:
+        iter = f(iter)
+    for item in iter:
+        yield item
+
+class TestLoader(object):
+    def __init__(self,
+                 test_manifests,
+                 test_types,
+                 run_info,
+                 manifest_filters=None,
+                 meta_filters=None,
+                 chunk_type="none",
+                 total_chunks=1,
+                 chunk_number=1,
+                 include_https=True):
+
+        self.test_types = test_types
+        self.run_info = run_info
+
+        self.manifest_filters = manifest_filters if manifest_filters is not None else []
+        self.meta_filters = meta_filters if meta_filters is not None else []
+
+        self.manifests = test_manifests
+        self.tests = None
+        self.disabled_tests = None
+        self.include_https = include_https
+
+        self.chunk_type = chunk_type
+        self.total_chunks = total_chunks
+        self.chunk_number = chunk_number
+
+        self.chunker = {"none": Unchunked,
+                        "hash": HashChunker,
+                        "dir_hash": DirectoryHashChunker,
+                        "equal_time": EqualTimeChunker}[chunk_type](total_chunks,
+                                                                    chunk_number)
+
+        self._test_ids = None
+
+        self.directory_manifests = {}
+
+        self._load_tests()
+
+    @property
+    def test_ids(self):
+        if self._test_ids is None:
+            self._test_ids = []
+            for test_dict in [self.disabled_tests, self.tests]:
+                for test_type in self.test_types:
+                    self._test_ids += [item.id for item in test_dict[test_type]]
+        return self._test_ids
+
+    def get_test(self, manifest_test, inherit_metadata, test_metadata):
+        if test_metadata is not None:
+            inherit_metadata.append(test_metadata)
+            test_metadata = test_metadata.get_test(manifest_test.id)
+
+        return wpttest.from_manifest(manifest_test, inherit_metadata, test_metadata)
+
+    def load_dir_metadata(self, test_manifest, metadata_path, test_path):
+        rv = []
+        path_parts = os.path.dirname(test_path).split(os.path.sep)
+        for i in xrange(1,len(path_parts) + 1):
+            path = os.path.join(metadata_path, os.path.sep.join(path_parts[:i]), "__dir__.ini")
+            if path not in self.directory_manifests:
+                self.directory_manifests[path] = manifestexpected.get_dir_manifest(path,
+                                                                                   self.run_info)
+            manifest = self.directory_manifests[path]
+            if manifest is not None:
+                rv.append(manifest)
+        return rv
+
+    def load_metadata(self, test_manifest, metadata_path, test_path):
+        inherit_metadata = self.load_dir_metadata(test_manifest, metadata_path, test_path)
+        test_metadata = manifestexpected.get_manifest(
+            metadata_path, test_path, test_manifest.url_base, self.run_info)
+        return inherit_metadata, test_metadata
+
+    def iter_tests(self):
+        manifest_items = []
+
+        for manifest in sorted(self.manifests.keys(), key=lambda x:x.url_base):
+            manifest_iter = iterfilter(self.manifest_filters,
+                                       manifest.itertypes(*self.test_types))
+            manifest_items.extend(manifest_iter)
+
+        if self.chunker is not None:
+            manifest_items = self.chunker(manifest_items)
+
+        for test_type, test_path, tests in manifest_items:
+            manifest_file = iter(tests).next().manifest
+            metadata_path = self.manifests[manifest_file]["metadata_path"]
+            inherit_metadata, test_metadata = self.load_metadata(manifest_file, metadata_path, test_path)
+
+            for test in iterfilter(self.meta_filters,
+                                   self.iter_wpttest(inherit_metadata, test_metadata, tests)):
+                yield test_path, test_type, test
+
+    def iter_wpttest(self, inherit_metadata, test_metadata, tests):
+        for manifest_test in tests:
+            yield self.get_test(manifest_test, inherit_metadata, test_metadata)
+
+    def _load_tests(self):
+        """Read in the tests from the manifest file and add them to a queue"""
+        tests = {"enabled":defaultdict(list),
+                 "disabled":defaultdict(list)}
+
+        for test_path, test_type, test in self.iter_tests():
+            enabled = not test.disabled()
+            if not self.include_https and test.environment["protocol"] == "https":
+                enabled = False
+            key = "enabled" if enabled else "disabled"
+            tests[key][test_type].append(test)
+
+        self.tests = tests["enabled"]
+        self.disabled_tests = tests["disabled"]
+
+    def groups(self, test_types, chunk_type="none", total_chunks=1, chunk_number=1):
+        groups = set()
+
+        for test_type in test_types:
+            for test in self.tests[test_type]:
+                group = test.url.split("/")[1]
+                groups.add(group)
+
+        return groups
+
+
+class TestSource(object):
+    __metaclass__ = ABCMeta
+
+    def __init__(self, test_queue):
+        self.test_queue = test_queue
+        self.current_group = None
+        self.current_metadata = None
+
+    @abstractmethod
+    #@classmethod (doesn't compose with @abstractmethod)
+    def make_queue(cls, tests, **kwargs):
+        pass
+
+    def group(self):
+        if not self.current_group or len(self.current_group) == 0:
+            try:
+                self.current_group, self.current_metadata = self.test_queue.get(block=False)
+            except Empty:
+                return None, None
+        return self.current_group, self.current_metadata
+
+
+class GroupedSource(TestSource):
+    @classmethod
+    def new_group(cls, state, test, **kwargs):
+        raise NotImplementedError
+
+    @classmethod
+    def make_queue(cls, tests, **kwargs):
+        test_queue = Queue()
+        groups = []
+
+        state = {}
+
+        for test in tests:
+            if cls.new_group(state, test, **kwargs):
+                groups.append((deque(), {}))
+
+            group, metadata = groups[-1]
+            group.append(test)
+            test.update_metadata(metadata)
+
+        for item in groups:
+            test_queue.put(item)
+        return test_queue
+
+
+class SingleTestSource(TestSource):
+    @classmethod
+    def make_queue(cls, tests, **kwargs):
+        test_queue = Queue()
+        processes = kwargs["processes"]
+        queues = [deque([]) for _ in xrange(processes)]
+        metadatas = [{} for _ in xrange(processes)]
+        for test in tests:
+            idx = hash(test.id) % processes
+            group = queues[idx]
+            metadata = metadatas[idx]
+            group.append(test)
+            test.update_metadata(metadata)
+
+        for item in zip(queues, metadatas):
+            test_queue.put(item)
+
+        return test_queue
+
+
+class PathGroupedSource(GroupedSource):
+    @classmethod
+    def new_group(cls, state, test, **kwargs):
+        depth = kwargs.get("depth")
+        if depth is True:
+            depth = None
+        path = urlparse.urlsplit(test.url).path.split("/")[1:-1][:depth]
+        rv = path != state.get("prev_path")
+        state["prev_path"] = path
+        return rv
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/testrunner.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/testrunner.py
new file mode 100644
index 0000000..c4655b7
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/testrunner.py
@@ -0,0 +1,761 @@
+from __future__ import unicode_literals
+
+import multiprocessing
+import sys
+import threading
+import traceback
+from Queue import Empty
+from collections import namedtuple
+from multiprocessing import Process, current_process, Queue
+
+from mozlog import structuredlog
+
+# Special value used as a sentinal in various commands
+Stop = object()
+
+
+class MessageLogger(object):
+    def __init__(self, message_func):
+        self.send_message = message_func
+
+    def _log_data(self, action, **kwargs):
+        self.send_message("log", action, kwargs)
+
+    def process_output(self, process, data, command):
+        self._log_data("process_output", process=process, data=data, command=command)
+
+
+def _log_func(level_name):
+    def log(self, message):
+        self._log_data(level_name.lower(), message=message)
+    log.__doc__ = """Log a message with level %s
+
+:param message: The string message to log
+""" % level_name
+    log.__name__ = str(level_name).lower()
+    return log
+
+# Create all the methods on StructuredLog for debug levels
+for level_name in structuredlog.log_levels:
+    setattr(MessageLogger, level_name.lower(), _log_func(level_name))
+
+
+class TestRunner(object):
+    def __init__(self, command_queue, result_queue, executor):
+        """Class implementing the main loop for running tests.
+
+        This class delegates the job of actually running a test to the executor
+        that is passed in.
+
+        :param command_queue: subprocess.Queue used to send commands to the
+                              process
+        :param result_queue: subprocess.Queue used to send results to the
+                             parent TestManager process
+        :param executor: TestExecutor object that will actually run a test.
+        """
+        self.command_queue = command_queue
+        self.result_queue = result_queue
+
+        self.executor = executor
+        self.name = current_process().name
+        self.logger = MessageLogger(self.send_message)
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_value, traceback):
+        self.teardown()
+
+    def setup(self):
+        self.logger.debug("Executor setup")
+        self.executor.setup(self)
+        self.logger.debug("Executor setup done")
+
+    def teardown(self):
+        self.executor.teardown()
+        self.send_message("runner_teardown")
+        self.result_queue = None
+        self.command_queue = None
+        self.browser = None
+
+    def run(self):
+        """Main loop accepting commands over the pipe and triggering
+        the associated methods"""
+        self.setup()
+        commands = {"run_test": self.run_test,
+                    "stop": self.stop,
+                    "wait": self.wait}
+        while True:
+            command, args = self.command_queue.get()
+            try:
+                rv = commands[command](*args)
+            except Exception:
+                self.send_message("error",
+                                  "Error running command %s with arguments %r:\n%s" %
+                                  (command, args, traceback.format_exc()))
+            else:
+                if rv is Stop:
+                    break
+
+    def stop(self):
+        return Stop
+
+    def run_test(self, test):
+        try:
+            return self.executor.run_test(test)
+        except Exception:
+            self.logger.critical(traceback.format_exc())
+            raise
+
+    def wait(self):
+        self.executor.protocol.wait()
+        self.send_message("wait_finished")
+
+    def send_message(self, command, *args):
+        self.result_queue.put((command, args))
+
+
+def start_runner(runner_command_queue, runner_result_queue,
+                 executor_cls, executor_kwargs,
+                 executor_browser_cls, executor_browser_kwargs,
+                 stop_flag):
+    """Launch a TestRunner in a new process"""
+    try:
+        browser = executor_browser_cls(**executor_browser_kwargs)
+        executor = executor_cls(browser, **executor_kwargs)
+        with TestRunner(runner_command_queue, runner_result_queue, executor) as runner:
+            try:
+                runner.run()
+            except KeyboardInterrupt:
+                stop_flag.set()
+    except Exception:
+        runner_result_queue.put(("log", ("critical", {"message": traceback.format_exc()})))
+        print >> sys.stderr, traceback.format_exc()
+        stop_flag.set()
+    finally:
+        runner_command_queue = None
+        runner_result_queue = None
+
+
+manager_count = 0
+
+
+def next_manager_number():
+    global manager_count
+    local = manager_count = manager_count + 1
+    return local
+
+
+class BrowserManager(object):
+    def __init__(self, logger, browser, command_queue, no_timeout=False):
+        self.logger = logger
+        self.browser = browser
+        self.no_timeout = no_timeout
+        self.browser_settings = None
+        self.last_test = None
+
+        self.started = False
+
+        self.init_timer = None
+
+    def update_settings(self, test):
+        browser_settings = self.browser.settings(test)
+        restart_required = ((self.browser_settings is not None and
+                             self.browser_settings != browser_settings) or
+                            (self.last_test != test and test.expected() == "CRASH"))
+        self.browser_settings = browser_settings
+        self.last_test = test
+        return restart_required
+
+    def init(self):
+        """Launch the browser that is being tested,
+        and the TestRunner process that will run the tests."""
+        # It seems that this lock is helpful to prevent some race that otherwise
+        # sometimes stops the spawned processes initialising correctly, and
+        # leaves this thread hung
+        if self.init_timer is not None:
+            self.init_timer.cancel()
+
+        self.logger.debug("Init called, starting browser and runner")
+
+        if not self.no_timeout:
+            self.init_timer = threading.Timer(self.browser.init_timeout,
+                                              self.init_timeout)
+        try:
+            if self.init_timer is not None:
+                self.init_timer.start()
+            self.logger.debug("Starting browser with settings %r" % self.browser_settings)
+            self.browser.start(**self.browser_settings)
+            self.browser_pid = self.browser.pid()
+        except:
+            self.logger.warning("Failure during init %s" % traceback.format_exc())
+            if self.init_timer is not None:
+                self.init_timer.cancel()
+            self.logger.error(traceback.format_exc())
+            succeeded = False
+        else:
+            succeeded = True
+            self.started = True
+
+        return succeeded
+
+    def send_message(self, command, *args):
+        self.command_queue.put((command, args))
+
+    def init_timeout(self):
+        # This is called from a seperate thread, so we send a message to the
+        # main loop so we get back onto the manager thread
+        self.logger.debug("init_failed called from timer")
+        self.send_message("init_failed")
+
+    def after_init(self):
+        """Callback when we have started the browser, started the remote
+        control connection, and we are ready to start testing."""
+        if self.init_timer is not None:
+            self.init_timer.cancel()
+
+    def stop(self, force=False):
+        self.browser.stop(force=force)
+        self.started = False
+
+    def cleanup(self):
+        if self.init_timer is not None:
+            self.init_timer.cancel()
+        self.browser.cleanup()
+
+    def check_for_crashes(self):
+        self.browser.check_for_crashes()
+
+    def log_crash(self, test_id):
+        self.browser.log_crash(process=self.browser_pid, test=test_id)
+
+    def is_alive(self):
+        return self.browser.is_alive()
+
+
+class _RunnerManagerState(object):
+    before_init = namedtuple("before_init", [])
+    initializing = namedtuple("initializing_browser",
+                              ["test", "test_group", "group_metadata", "failure_count"])
+    running = namedtuple("running", ["test", "test_group", "group_metadata"])
+    restarting = namedtuple("restarting", ["test", "test_group", "group_metadata"])
+    error = namedtuple("error", [])
+    stop = namedtuple("stop", [])
+
+
+RunnerManagerState = _RunnerManagerState()
+
+
+class TestRunnerManager(threading.Thread):
+    def __init__(self, suite_name, test_queue, test_source_cls, browser_cls, browser_kwargs,
+                 executor_cls, executor_kwargs, stop_flag, pause_after_test=False,
+                 pause_on_unexpected=False, restart_on_unexpected=True, debug_info=None):
+        """Thread that owns a single TestRunner process and any processes required
+        by the TestRunner (e.g. the Firefox binary).
+
+        TestRunnerManagers are responsible for launching the browser process and the
+        runner process, and for logging the test progress. The actual test running
+        is done by the TestRunner. In particular they:
+
+        * Start the binary of the program under test
+        * Start the TestRunner
+        * Tell the TestRunner to start a test, if any
+        * Log that the test started
+        * Log the test results
+        * Take any remedial action required e.g. restart crashed or hung
+          processes
+        """
+        self.suite_name = suite_name
+
+        self.test_source = test_source_cls(test_queue)
+
+        self.browser_cls = browser_cls
+        self.browser_kwargs = browser_kwargs
+
+        self.executor_cls = executor_cls
+        self.executor_kwargs = executor_kwargs
+
+        # Flags used to shut down this thread if we get a sigint
+        self.parent_stop_flag = stop_flag
+        self.child_stop_flag = multiprocessing.Event()
+
+        self.pause_after_test = pause_after_test
+        self.pause_on_unexpected = pause_on_unexpected
+        self.restart_on_unexpected = restart_on_unexpected
+        self.debug_info = debug_info
+
+        self.manager_number = next_manager_number()
+
+        self.command_queue = Queue()
+        self.remote_queue = Queue()
+
+        self.test_runner_proc = None
+
+        threading.Thread.__init__(self, name="Thread-TestrunnerManager-%i" % self.manager_number)
+        # This is started in the actual new thread
+        self.logger = None
+
+        self.unexpected_count = 0
+
+        # This may not really be what we want
+        self.daemon = True
+
+        self.max_restarts = 5
+
+        self.browser = None
+
+    def run(self):
+        """Main loop for the TestManager.
+
+        TestManagers generally receive commands from their
+        TestRunner updating them on the status of a test. They
+        may also have a stop flag set by the main thread indicating
+        that the manager should shut down the next time the event loop
+        spins."""
+        self.logger = structuredlog.StructuredLogger(self.suite_name)
+        with self.browser_cls(self.logger, **self.browser_kwargs) as browser:
+            self.browser = BrowserManager(self.logger,
+                                          browser,
+                                          self.command_queue,
+                                          no_timeout=self.debug_info is not None)
+            dispatch = {
+                RunnerManagerState.before_init: self.start_init,
+                RunnerManagerState.initializing: self.init,
+                RunnerManagerState.running: self.run_test,
+                RunnerManagerState.restarting: self.restart_runner
+            }
+
+            self.state = RunnerManagerState.before_init()
+            end_states = (RunnerManagerState.stop,
+                          RunnerManagerState.error)
+
+            try:
+                while not isinstance(self.state, end_states):
+                    f = dispatch.get(self.state.__class__)
+                    while f:
+                        self.logger.debug("Dispatch %s" % f.__name__)
+                        if self.should_stop():
+                            return
+                        new_state = f()
+                        if new_state is None:
+                            break
+                        self.state = new_state
+                        self.logger.debug("new state: %s" % self.state.__class__.__name__)
+                        if isinstance(self.state, end_states):
+                            return
+                        f = dispatch.get(self.state.__class__)
+
+                    new_state = None
+                    while new_state is None:
+                        new_state = self.wait_event()
+                        if self.should_stop():
+                            return
+                    self.state = new_state
+                    self.logger.debug("new state: %s" % self.state.__class__.__name__)
+            except Exception as e:
+                self.logger.error(traceback.format_exc(e))
+                raise
+            finally:
+                self.logger.debug("TestRunnerManager main loop terminating, starting cleanup")
+                clean = isinstance(self.state, RunnerManagerState.stop)
+                self.stop_runner(force=not clean)
+                self.teardown()
+        self.logger.debug("TestRunnerManager main loop terminated")
+
+    def wait_event(self):
+        dispatch = {
+            RunnerManagerState.before_init: {},
+            RunnerManagerState.initializing:
+            {
+                "init_succeeded": self.init_succeeded,
+                "init_failed": self.init_failed,
+            },
+            RunnerManagerState.running:
+            {
+                "test_ended": self.test_ended,
+                "wait_finished": self.wait_finished,
+            },
+            RunnerManagerState.restarting: {},
+            RunnerManagerState.error: {},
+            RunnerManagerState.stop: {},
+            None: {
+                "runner_teardown": self.runner_teardown,
+                "log": self.log,
+                "error": self.error
+            }
+        }
+        try:
+            command, data = self.command_queue.get(True, 1)
+        except IOError:
+            self.logger.error("Got IOError from poll")
+            return RunnerManagerState.restarting(0)
+        except Empty:
+            if (self.debug_info and self.debug_info.interactive and
+                self.browser.started and not self.browser.is_alive()):
+                self.logger.debug("Debugger exited")
+                return RunnerManagerState.stop()
+
+            if (isinstance(self.state, RunnerManagerState.running) and
+                not self.test_runner_proc.is_alive()):
+                if not self.command_queue.empty():
+                    # We got a new message so process that
+                    return
+
+                # If we got to here the runner presumably shut down
+                # unexpectedly
+                self.logger.info("Test runner process shut down")
+
+                if self.state.test is not None:
+                    # This could happen if the test runner crashed for some other
+                    # reason
+                    # Need to consider the unlikely case where one test causes the
+                    # runner process to repeatedly die
+                    self.logger.critical("Last test did not complete")
+                    return RunnerManagerState.error()
+                self.logger.warning("More tests found, but runner process died, restarting")
+                return RunnerManagerState.restarting(0)
+        else:
+            f = (dispatch.get(self.state.__class__, {}).get(command) or
+                 dispatch.get(None, {}).get(command))
+            if not f:
+                self.logger.warning("Got command %s in state %s" %
+                                    (command, self.state.__class__.__name__))
+                return
+            return f(*data)
+
+    def should_stop(self):
+        return self.child_stop_flag.is_set() or self.parent_stop_flag.is_set()
+
+    def start_init(self):
+        test, test_group, group_metadata = self.get_next_test()
+        if test is None:
+            return RunnerManagerState.stop()
+        else:
+            return RunnerManagerState.initializing(test, test_group, group_metadata, 0)
+
+    def init(self):
+        assert isinstance(self.state, RunnerManagerState.initializing)
+        if self.state.failure_count > self.max_restarts:
+            self.logger.error("Max restarts exceeded")
+            return RunnerManagerState.error()
+
+        self.browser.update_settings(self.state.test)
+
+        result = self.browser.init()
+        if result is Stop:
+            return RunnerManagerState.error()
+        elif not result:
+            return RunnerManagerState.initializing(self.state.test,
+                                                   self.state.test_group,
+                                                   self.state.group_metadata,
+                                                   self.state.failure_count + 1)
+        else:
+            self.executor_kwargs["group_metadata"] = self.state.group_metadata
+            self.start_test_runner()
+
+    def start_test_runner(self):
+        # Note that we need to be careful to start the browser before the
+        # test runner to ensure that any state set when the browser is started
+        # can be passed in to the test runner.
+        assert isinstance(self.state, RunnerManagerState.initializing)
+        assert self.command_queue is not None
+        assert self.remote_queue is not None
+        self.logger.info("Starting runner")
+        executor_browser_cls, executor_browser_kwargs = self.browser.browser.executor_browser()
+
+        args = (self.remote_queue,
+                self.command_queue,
+                self.executor_cls,
+                self.executor_kwargs,
+                executor_browser_cls,
+                executor_browser_kwargs,
+                self.child_stop_flag)
+        self.test_runner_proc = Process(target=start_runner,
+                                        args=args,
+                                        name="Thread-TestRunner-%i" % self.manager_number)
+        self.test_runner_proc.start()
+        self.logger.debug("Test runner started")
+        # Now we wait for either an init_succeeded event or an init_failed event
+
+    def init_succeeded(self):
+        assert isinstance(self.state, RunnerManagerState.initializing)
+        self.browser.after_init()
+        return RunnerManagerState.running(self.state.test,
+                                          self.state.test_group,
+                                          self.state.group_metadata)
+
+    def init_failed(self):
+        assert isinstance(self.state, RunnerManagerState.initializing)
+        self.browser.after_init()
+        self.stop_runner(force=True)
+        return RunnerManagerState.initializing(self.state.test,
+                                               self.state.test_group,
+                                               self.state.group_metadata,
+                                               self.state.failure_count + 1)
+
+    def get_next_test(self, test_group=None):
+        test = None
+        while test is None:
+            while test_group is None or len(test_group) == 0:
+                test_group, group_metadata = self.test_source.group()
+                if test_group is None:
+                    self.logger.info("No more tests")
+                    return None, None, None
+            test = test_group.popleft()
+        return test, test_group, group_metadata
+
+
+    def run_test(self):
+        assert isinstance(self.state, RunnerManagerState.running)
+        assert self.state.test is not None
+
+        if self.browser.update_settings(self.state.test):
+            self.logger.info("Restarting browser for new test environment")
+            return RunnerManagerState.restarting(self.state.test,
+                                                 self.state.test_group,
+                                                 self.state.group_metadata)
+
+        self.logger.test_start(self.state.test.id)
+        self.send_message("run_test", self.state.test)
+
+    def test_ended(self, test, results):
+        """Handle the end of a test.
+
+        Output the result of each subtest, and the result of the overall
+        harness to the logs.
+        """
+        assert isinstance(self.state, RunnerManagerState.running)
+        assert test == self.state.test
+        # Write the result of each subtest
+        file_result, test_results = results
+        subtest_unexpected = False
+        for result in test_results:
+            if test.disabled(result.name):
+                continue
+            expected = test.expected(result.name)
+            is_unexpected = expected != result.status
+
+            if is_unexpected:
+                self.unexpected_count += 1
+                self.logger.debug("Unexpected count in this thread %i" % self.unexpected_count)
+                subtest_unexpected = True
+            self.logger.test_status(test.id,
+                                    result.name,
+                                    result.status,
+                                    message=result.message,
+                                    expected=expected,
+                                    stack=result.stack)
+
+        # TODO: consider changing result if there is a crash dump file
+
+        # Write the result of the test harness
+        expected = test.expected()
+        status = file_result.status if file_result.status != "EXTERNAL-TIMEOUT" else "TIMEOUT"
+
+        if file_result.status in  ("TIMEOUT", "EXTERNAL-TIMEOUT"):
+            if self.browser.check_for_crashes():
+                status = "CRASH"
+
+        is_unexpected = expected != status
+        if is_unexpected:
+            self.unexpected_count += 1
+            self.logger.debug("Unexpected count in this thread %i" % self.unexpected_count)
+        if status == "CRASH":
+            self.browser.log_crash(test.id)
+
+        self.logger.test_end(test.id,
+                             status,
+                             message=file_result.message,
+                             expected=expected,
+                             extra=file_result.extra)
+
+        restart_before_next = (test.restart_after or
+                               file_result.status in ("CRASH", "EXTERNAL-TIMEOUT") or
+                               ((subtest_unexpected or is_unexpected)
+                                and self.restart_on_unexpected))
+
+        if (self.pause_after_test or
+            (self.pause_on_unexpected and (subtest_unexpected or is_unexpected))):
+            self.logger.info("Pausing until the browser exits")
+            self.send_message("wait")
+        else:
+            return self.after_test_end(restart_before_next)
+
+    def wait_finished(self):
+        assert isinstance(self.state, RunnerManagerState.running)
+        # The browser should be stopped already, but this ensures we do any post-stop
+        # processing
+        self.logger.debug("Wait finished")
+
+        return self.after_test_end(True)
+
+    def after_test_end(self, restart):
+        assert isinstance(self.state, RunnerManagerState.running)
+        test, test_group, group_metadata = self.get_next_test()
+        if test is None:
+            return RunnerManagerState.stop()
+        if test_group != self.state.test_group:
+            # We are starting a new group of tests, so force a restart
+            restart = True
+        if restart:
+            return RunnerManagerState.restarting(test, test_group, group_metadata)
+        else:
+            return RunnerManagerState.running(test, test_group, group_metadata)
+
+    def restart_runner(self):
+        """Stop and restart the TestRunner"""
+        assert isinstance(self.state, RunnerManagerState.restarting)
+        self.stop_runner()
+        return RunnerManagerState.initializing(self.state.test, self.state.test_group, self.state.group_metadata, 0)
+
+    def log(self, action, kwargs):
+        getattr(self.logger, action)(**kwargs)
+
+    def error(self, message):
+        self.logger.error(message)
+        self.restart_runner()
+
+    def stop_runner(self, force=False):
+        """Stop the TestRunner and the browser binary."""
+        if self.test_runner_proc is None:
+            return
+
+        if self.test_runner_proc.is_alive():
+            self.send_message("stop")
+        try:
+            self.browser.stop(force=force)
+            self.ensure_runner_stopped()
+        finally:
+            self.cleanup()
+
+    def teardown(self):
+        self.logger.debug("teardown in testrunnermanager")
+        self.test_runner_proc = None
+        self.command_queue.close()
+        self.remote_queue.close()
+        self.command_queue = None
+        self.remote_queue = None
+
+    def ensure_runner_stopped(self):
+        self.logger.debug("ensure_runner_stopped")
+        if self.test_runner_proc is None:
+            return
+
+        self.logger.debug("waiting for runner process to end")
+        self.test_runner_proc.join(10)
+        self.logger.debug("After join")
+        if self.test_runner_proc.is_alive():
+            # This might leak a file handle from the queue
+            self.logger.warning("Forcibly terminating runner process")
+            self.test_runner_proc.terminate()
+            self.test_runner_proc.join(10)
+        else:
+            self.logger.debug("Testrunner exited with code %i" % self.test_runner_proc.exitcode)
+
+    def runner_teardown(self):
+        self.ensure_runner_stopped()
+        return RunnerManagerState.stop()
+
+    def send_message(self, command, *args):
+        self.remote_queue.put((command, args))
+
+    def cleanup(self):
+        self.logger.debug("TestManager cleanup")
+        if self.browser:
+            self.browser.cleanup()
+        while True:
+            try:
+                self.logger.warning(" ".join(map(repr, self.command_queue.get_nowait())))
+            except Empty:
+                break
+
+
+def make_test_queue(tests, test_source_cls, **test_source_kwargs):
+    queue = test_source_cls.make_queue(tests, **test_source_kwargs)
+
+    # There is a race condition that means sometimes we continue
+    # before the tests have been written to the underlying pipe.
+    # Polling the pipe for data here avoids that
+    queue._reader.poll(10)
+    assert not queue.empty()
+    return queue
+
+
+class ManagerGroup(object):
+    def __init__(self, suite_name, size, test_source_cls, test_source_kwargs,
+                 browser_cls, browser_kwargs,
+                 executor_cls, executor_kwargs,
+                 pause_after_test=False,
+                 pause_on_unexpected=False,
+                 restart_on_unexpected=True,
+                 debug_info=None):
+        """Main thread object that owns all the TestManager threads."""
+        self.suite_name = suite_name
+        self.size = size
+        self.test_source_cls = test_source_cls
+        self.test_source_kwargs = test_source_kwargs
+        self.browser_cls = browser_cls
+        self.browser_kwargs = browser_kwargs
+        self.executor_cls = executor_cls
+        self.executor_kwargs = executor_kwargs
+        self.pause_after_test = pause_after_test
+        self.pause_on_unexpected = pause_on_unexpected
+        self.restart_on_unexpected = restart_on_unexpected
+        self.debug_info = debug_info
+
+        self.pool = set()
+        # Event that is polled by threads so that they can gracefully exit in the face
+        # of sigint
+        self.stop_flag = threading.Event()
+        self.logger = structuredlog.StructuredLogger(suite_name)
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        self.stop()
+
+    def run(self, test_type, tests):
+        """Start all managers in the group"""
+        self.logger.debug("Using %i processes" % self.size)
+        type_tests = tests[test_type]
+        if not type_tests:
+            self.logger.info("No %s tests to run" % test_type)
+            return
+
+        test_queue = make_test_queue(type_tests, self.test_source_cls, **self.test_source_kwargs)
+
+        for _ in range(self.size):
+            manager = TestRunnerManager(self.suite_name,
+                                        test_queue,
+                                        self.test_source_cls,
+                                        self.browser_cls,
+                                        self.browser_kwargs,
+                                        self.executor_cls,
+                                        self.executor_kwargs,
+                                        self.stop_flag,
+                                        self.pause_after_test,
+                                        self.pause_on_unexpected,
+                                        self.restart_on_unexpected,
+                                        self.debug_info)
+            manager.start()
+            self.pool.add(manager)
+        self.wait()
+
+    def is_alive(self):
+        """Boolean indicating whether any manager in the group is still alive"""
+        return any(manager.is_alive() for manager in self.pool)
+
+    def wait(self):
+        """Wait for all the managers in the group to finish"""
+        for item in self.pool:
+            item.join()
+
+    def stop(self):
+        """Set the stop flag so that all managers in the group stop as soon
+        as possible"""
+        self.stop_flag.set()
+        self.logger.debug("Stop flag set in ManagerGroup")
+
+    def unexpected_count(self):
+        return sum(item.unexpected_count for item in self.pool)
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/tests/__init__.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/tests/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/tests/__init__.py
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/tests/test_chunker.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/tests/test_chunker.py
new file mode 100644
index 0000000..062b687
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/tests/test_chunker.py
@@ -0,0 +1,91 @@
+import unittest
+import sys
+from os.path import join, dirname
+from mozlog import structured
+
+import pytest
+
+sys.path.insert(0, join(dirname(__file__), "..", ".."))
+
+from wptrunner.testloader import EqualTimeChunker
+
+structured.set_default_logger(structured.structuredlog.StructuredLogger("TestChunker"))
+
+class MockTest(object):
+    default_timeout = 10
+
+    def __init__(self, id, timeout=10):
+        self.id = id
+        self.item_type = "testharness"
+        self.timeout = timeout
+
+
+def make_mock_manifest(*items):
+    rv = []
+    for test_type, dir_path, num_tests in items:
+        for i in range(num_tests):
+            rv.append((test_type,
+                       dir_path + "/%i.test" % i,
+                       set([MockTest(i)])))
+    return rv
+
+
+class TestEqualTimeChunker(unittest.TestCase):
+
+    def test_include_all(self):
+        tests = make_mock_manifest(("test", "a", 10), ("test", "a/b", 10),
+                                   ("test", "c", 10))
+
+        chunk_1 = list(EqualTimeChunker(3, 1)(tests))
+        chunk_2 = list(EqualTimeChunker(3, 2)(tests))
+        chunk_3 = list(EqualTimeChunker(3, 3)(tests))
+
+        self.assertEquals(tests[:10], chunk_1)
+        self.assertEquals(tests[10:20], chunk_2)
+        self.assertEquals(tests[20:], chunk_3)
+
+    def test_include_all_1(self):
+        tests = make_mock_manifest(("test", "a", 5), ("test", "a/b", 5),
+                                   ("test", "c", 10), ("test", "d", 10))
+
+        chunk_1 = list(EqualTimeChunker(3, 1)(tests))
+        chunk_2 = list(EqualTimeChunker(3, 2)(tests))
+        chunk_3 = list(EqualTimeChunker(3, 3)(tests))
+
+        self.assertEquals(tests[:10], chunk_1)
+        self.assertEquals(tests[10:20], chunk_2)
+        self.assertEquals(tests[20:], chunk_3)
+
+    def test_long(self):
+        tests = make_mock_manifest(("test", "a", 100), ("test", "a/b", 1),
+                                   ("test", "c", 1))
+
+        chunk_1 = list(EqualTimeChunker(3, 1)(tests))
+        chunk_2 = list(EqualTimeChunker(3, 2)(tests))
+        chunk_3 = list(EqualTimeChunker(3, 3)(tests))
+
+        self.assertEquals(tests[:100], chunk_1)
+        self.assertEquals(tests[100:101], chunk_2)
+        self.assertEquals(tests[101:102], chunk_3)
+
+    def test_long_1(self):
+        tests = make_mock_manifest(("test", "a", 1), ("test", "a/b", 100),
+                                   ("test", "c", 1))
+
+        chunk_1 = list(EqualTimeChunker(3, 1)(tests))
+        chunk_2 = list(EqualTimeChunker(3, 2)(tests))
+        chunk_3 = list(EqualTimeChunker(3, 3)(tests))
+
+        self.assertEquals(tests[:1], chunk_1)
+        self.assertEquals(tests[1:101], chunk_2)
+        self.assertEquals(tests[101:102], chunk_3)
+
+    def test_too_few_dirs(self):
+        with self.assertRaises(ValueError):
+            tests = make_mock_manifest(("test", "a", 1), ("test", "a/b", 100),
+                                       ("test", "c", 1))
+            list(EqualTimeChunker(4, 1)(tests))
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/tests/test_hosts.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/tests/test_hosts.py
new file mode 100644
index 0000000..808b816
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/tests/test_hosts.py
@@ -0,0 +1,56 @@
+import unittest
+import sys
+from os.path import join, dirname
+from cStringIO import StringIO
+
+sys.path.insert(0, join(dirname(__file__), "..", ".."))
+
+from wptrunner import hosts
+
+
+class HostsTest(unittest.TestCase):
+    def do_test(self, input, expected):
+        host_file = hosts.HostsFile.from_file(StringIO(input))
+        self.assertEquals(host_file.to_string(), expected)
+
+    def test_simple(self):
+        self.do_test("""127.0.0.1    \tlocalhost  alias # comment
+# Another comment""",
+                     """127.0.0.1 localhost alias # comment
+# Another comment
+""")
+
+    def test_blank_lines(self):
+        self.do_test("""127.0.0.1    \tlocalhost  alias # comment
+
+\r
+    \t
+# Another comment""",
+                     """127.0.0.1 localhost alias # comment
+# Another comment
+""")
+
+    def test_whitespace(self):
+        self.do_test("""    \t127.0.0.1    \tlocalhost  alias # comment     \r
+    \t# Another comment""",
+                     """127.0.0.1 localhost alias # comment
+# Another comment
+""")
+
+    def test_alignment(self):
+        self.do_test("""127.0.0.1    \tlocalhost  alias
+192.168.1.1 another_host    another_alias
+""","""127.0.0.1   localhost    alias
+192.168.1.1 another_host another_alias
+"""
+)
+
+    def test_multiple_same_name(self):
+        # The semantics are that we overwrite earlier entries with the same name
+        self.do_test("""127.0.0.1    \tlocalhost  alias
+192.168.1.1 localhost    another_alias""","""192.168.1.1 localhost another_alias
+"""
+)
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/tests/test_testloader.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/tests/test_testloader.py
new file mode 100644
index 0000000..0d967b0
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/tests/test_testloader.py
@@ -0,0 +1,29 @@
+from __future__ import unicode_literals
+
+import os
+import sys
+import tempfile
+
+sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
+
+from mozlog import structured
+from wptrunner.testloader import TestFilter as Filter
+from .test_chunker import make_mock_manifest
+
+structured.set_default_logger(structured.structuredlog.StructuredLogger("TestLoader"))
+
+include_ini = """\
+skip: true
+[test_\u53F0]
+  skip: false
+"""
+
+def test_filter_unicode():
+    tests = make_mock_manifest(("test", "a", 10), ("test", "a/b", 10),
+                               ("test", "c", 10))
+
+    with tempfile.NamedTemporaryFile("wb", suffix=".ini") as f:
+        f.write(include_ini.encode('utf-8'))
+        f.flush()
+
+        Filter(manifest_path=f.name, test_manifests=tests)
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/tests/test_update.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/tests/test_update.py
new file mode 100644
index 0000000..e5eb4cf
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/tests/test_update.py
@@ -0,0 +1,328 @@
+import unittest
+import StringIO
+
+import pytest
+
+from .. import metadata, manifestupdate
+from mozlog import structuredlog, handlers, formatters
+
+
+class TestExpectedUpdater(unittest.TestCase):
+    def create_manifest(self, data, test_path="path/to/test.ini"):
+        f = StringIO.StringIO(data)
+        return manifestupdate.compile(f, test_path)
+
+    def create_updater(self, data, **kwargs):
+        expected_tree = {}
+        id_path_map = {}
+        for test_path, test_ids, manifest_str in data:
+            if isinstance(test_ids, (str, unicode)):
+                test_ids = [test_ids]
+            expected_tree[test_path] = self.create_manifest(manifest_str, test_path)
+            for test_id in test_ids:
+                id_path_map[test_id] = test_path
+
+        return metadata.ExpectedUpdater(expected_tree, id_path_map, **kwargs)
+
+    def create_log(self, *args, **kwargs):
+        logger = structuredlog.StructuredLogger("expected_test")
+        data = StringIO.StringIO()
+        handler = handlers.StreamHandler(data, formatters.JSONFormatter())
+        logger.add_handler(handler)
+
+        log_entries = ([("suite_start", {"tests": [], "run_info": kwargs.get("run_info", {})})] +
+                       list(args) +
+                       [("suite_end", {})])
+
+        for item in log_entries:
+            action, kwargs = item
+            getattr(logger, action)(**kwargs)
+        logger.remove_handler(handler)
+        data.seek(0)
+        return data
+
+
+    def coalesce_results(self, trees):
+        for tree in trees:
+            for test in tree.iterchildren():
+                for subtest in test.iterchildren():
+                    subtest.coalesce_expected()
+                test.coalesce_expected()
+
+    @pytest.mark.xfail
+    def test_update_0(self):
+        prev_data = [("path/to/test.htm.ini", ["/path/to/test.htm"], """[test.htm]
+  type: testharness
+  [test1]
+    expected: FAIL""")]
+
+        new_data = self.create_log(("test_start", {"test": "/path/to/test.htm"}),
+                                   ("test_status", {"test": "/path/to/test.htm",
+                                                    "subtest": "test1",
+                                                    "status": "PASS",
+                                                    "expected": "FAIL"}),
+                                   ("test_end", {"test": "/path/to/test.htm",
+                                                 "status": "OK"}))
+        updater = self.create_updater(prev_data)
+        updater.update_from_log(new_data)
+
+        new_manifest = updater.expected_tree["path/to/test.htm.ini"]
+        self.coalesce_results([new_manifest])
+        self.assertTrue(new_manifest.is_empty)
+
+    @pytest.mark.xfail
+    def test_update_1(self):
+        test_id = "/path/to/test.htm"
+        prev_data = [("path/to/test.htm.ini", [test_id], """[test.htm]
+  type: testharness
+  [test1]
+    expected: ERROR""")]
+
+        new_data = self.create_log(("test_start", {"test": test_id}),
+                                   ("test_status", {"test": test_id,
+                                                    "subtest": "test1",
+                                                    "status": "FAIL",
+                                                    "expected": "ERROR"}),
+                                   ("test_end", {"test": test_id,
+                                                 "status": "OK"}))
+        updater = self.create_updater(prev_data)
+        updater.update_from_log(new_data)
+
+        new_manifest = updater.expected_tree["path/to/test.htm.ini"]
+        self.coalesce_results([new_manifest])
+        self.assertFalse(new_manifest.is_empty)
+        self.assertEquals(new_manifest.get_test(test_id).children[0].get("expected"), "FAIL")
+
+    @pytest.mark.xfail
+    def test_new_subtest(self):
+        test_id = "/path/to/test.htm"
+        prev_data = [("path/to/test.htm.ini", [test_id], """[test.htm]
+  type: testharness
+  [test1]
+    expected: FAIL""")]
+
+        new_data = self.create_log(("test_start", {"test": test_id}),
+                                   ("test_status", {"test": test_id,
+                                                    "subtest": "test1",
+                                                    "status": "FAIL",
+                                                    "expected": "FAIL"}),
+                                   ("test_status", {"test": test_id,
+                                                    "subtest": "test2",
+                                                    "status": "FAIL",
+                                                    "expected": "PASS"}),
+                                   ("test_end", {"test": test_id,
+                                                 "status": "OK"}))
+        updater = self.create_updater(prev_data)
+        updater.update_from_log(new_data)
+
+        new_manifest = updater.expected_tree["path/to/test.htm.ini"]
+        self.coalesce_results([new_manifest])
+        self.assertFalse(new_manifest.is_empty)
+        self.assertEquals(new_manifest.get_test(test_id).children[0].get("expected"), "FAIL")
+        self.assertEquals(new_manifest.get_test(test_id).children[1].get("expected"), "FAIL")
+
+    @pytest.mark.xfail
+    def test_update_multiple_0(self):
+        test_id = "/path/to/test.htm"
+        prev_data = [("path/to/test.htm.ini", [test_id], """[test.htm]
+  type: testharness
+  [test1]
+    expected: FAIL""")]
+
+        new_data_0 = self.create_log(("test_start", {"test": test_id}),
+                                     ("test_status", {"test": test_id,
+                                                      "subtest": "test1",
+                                                      "status": "FAIL",
+                                                      "expected": "FAIL"}),
+                                     ("test_end", {"test": test_id,
+                                                   "status": "OK"}),
+                                     run_info={"debug": False, "os": "osx"})
+
+        new_data_1 = self.create_log(("test_start", {"test": test_id}),
+                                     ("test_status", {"test": test_id,
+                                                      "subtest": "test1",
+                                                      "status": "TIMEOUT",
+                                                      "expected": "FAIL"}),
+                                     ("test_end", {"test": test_id,
+                                                   "status": "OK"}),
+                                     run_info={"debug": False, "os": "linux"})
+        updater = self.create_updater(prev_data)
+
+        updater.update_from_log(new_data_0)
+        updater.update_from_log(new_data_1)
+
+        new_manifest = updater.expected_tree["path/to/test.htm.ini"]
+
+        self.coalesce_results([new_manifest])
+
+        self.assertFalse(new_manifest.is_empty)
+        self.assertEquals(new_manifest.get_test(test_id).children[0].get(
+            "expected", {"debug": False, "os": "osx"}), "FAIL")
+        self.assertEquals(new_manifest.get_test(test_id).children[0].get(
+            "expected", {"debug": False, "os": "linux"}), "TIMEOUT")
+
+    @pytest.mark.xfail
+    def test_update_multiple_1(self):
+        test_id = "/path/to/test.htm"
+        prev_data = [("path/to/test.htm.ini", [test_id], """[test.htm]
+  type: testharness
+  [test1]
+    expected: FAIL""")]
+
+        new_data_0 = self.create_log(("test_start", {"test": test_id}),
+                                     ("test_status", {"test": test_id,
+                                                      "subtest": "test1",
+                                                      "status": "FAIL",
+                                                      "expected": "FAIL"}),
+                                     ("test_end", {"test": test_id,
+                                                   "status": "OK"}),
+                                     run_info={"debug": False, "os": "osx"})
+
+        new_data_1 = self.create_log(("test_start", {"test": test_id}),
+                                     ("test_status", {"test": test_id,
+                                                      "subtest": "test1",
+                                                      "status": "TIMEOUT",
+                                                      "expected": "FAIL"}),
+                                     ("test_end", {"test": test_id,
+                                                   "status": "OK"}),
+                                     run_info={"debug": False, "os": "linux"})
+        updater = self.create_updater(prev_data)
+
+        updater.update_from_log(new_data_0)
+        updater.update_from_log(new_data_1)
+
+        new_manifest = updater.expected_tree["path/to/test.htm.ini"]
+
+        self.coalesce_results([new_manifest])
+
+        self.assertFalse(new_manifest.is_empty)
+        self.assertEquals(new_manifest.get_test(test_id).children[0].get(
+            "expected", {"debug": False, "os": "osx"}), "FAIL")
+        self.assertEquals(new_manifest.get_test(test_id).children[0].get(
+            "expected", {"debug": False, "os": "linux"}), "TIMEOUT")
+        self.assertEquals(new_manifest.get_test(test_id).children[0].get(
+            "expected", {"debug": False, "os": "windows"}), "FAIL")
+
+    @pytest.mark.xfail
+    def test_update_multiple_2(self):
+        test_id = "/path/to/test.htm"
+        prev_data = [("path/to/test.htm.ini", [test_id], """[test.htm]
+  type: testharness
+  [test1]
+    expected: FAIL""")]
+
+        new_data_0 = self.create_log(("test_start", {"test": test_id}),
+                                     ("test_status", {"test": test_id,
+                                                      "subtest": "test1",
+                                                      "status": "FAIL",
+                                                      "expected": "FAIL"}),
+                                     ("test_end", {"test": test_id,
+                                                   "status": "OK"}),
+                                     run_info={"debug": False, "os": "osx"})
+
+        new_data_1 = self.create_log(("test_start", {"test": test_id}),
+                                     ("test_status", {"test": test_id,
+                                                      "subtest": "test1",
+                                                      "status": "TIMEOUT",
+                                                      "expected": "FAIL"}),
+                                     ("test_end", {"test": test_id,
+                                                   "status": "OK"}),
+                                     run_info={"debug": True, "os": "osx"})
+        updater = self.create_updater(prev_data)
+
+        updater.update_from_log(new_data_0)
+        updater.update_from_log(new_data_1)
+
+        new_manifest = updater.expected_tree["path/to/test.htm.ini"]
+
+        self.coalesce_results([new_manifest])
+
+        self.assertFalse(new_manifest.is_empty)
+        self.assertEquals(new_manifest.get_test(test_id).children[0].get(
+            "expected", {"debug": False, "os": "osx"}), "FAIL")
+        self.assertEquals(new_manifest.get_test(test_id).children[0].get(
+            "expected", {"debug": True, "os": "osx"}), "TIMEOUT")
+
+    @pytest.mark.xfail
+    def test_update_multiple_3(self):
+        test_id = "/path/to/test.htm"
+        prev_data = [("path/to/test.htm.ini", [test_id], """[test.htm]
+  type: testharness
+  [test1]
+    expected:
+      if debug: FAIL
+      if not debug and os == "osx": TIMEOUT""")]
+
+        new_data_0 = self.create_log(("test_start", {"test": test_id}),
+                                     ("test_status", {"test": test_id,
+                                                      "subtest": "test1",
+                                                      "status": "FAIL",
+                                                      "expected": "FAIL"}),
+                                     ("test_end", {"test": test_id,
+                                                   "status": "OK"}),
+                                     run_info={"debug": False, "os": "osx"})
+
+        new_data_1 = self.create_log(("test_start", {"test": test_id}),
+                                     ("test_status", {"test": test_id,
+                                                      "subtest": "test1",
+                                                      "status": "TIMEOUT",
+                                                      "expected": "FAIL"}),
+                                     ("test_end", {"test": test_id,
+                                                   "status": "OK"}),
+                                     run_info={"debug": True, "os": "osx"})
+        updater = self.create_updater(prev_data)
+
+        updater.update_from_log(new_data_0)
+        updater.update_from_log(new_data_1)
+
+        new_manifest = updater.expected_tree["path/to/test.htm.ini"]
+
+        self.coalesce_results([new_manifest])
+
+        self.assertFalse(new_manifest.is_empty)
+        self.assertEquals(new_manifest.get_test(test_id).children[0].get(
+            "expected", {"debug": False, "os": "osx"}), "FAIL")
+        self.assertEquals(new_manifest.get_test(test_id).children[0].get(
+            "expected", {"debug": True, "os": "osx"}), "TIMEOUT")
+
+    @pytest.mark.xfail
+    def test_update_ignore_existing(self):
+        test_id = "/path/to/test.htm"
+        prev_data = [("path/to/test.htm.ini", [test_id], """[test.htm]
+  type: testharness
+  [test1]
+    expected:
+      if debug: TIMEOUT
+      if not debug and os == "osx": NOTRUN""")]
+
+        new_data_0 = self.create_log(("test_start", {"test": test_id}),
+                                     ("test_status", {"test": test_id,
+                                                      "subtest": "test1",
+                                                      "status": "FAIL",
+                                                      "expected": "PASS"}),
+                                     ("test_end", {"test": test_id,
+                                                   "status": "OK"}),
+                                     run_info={"debug": False, "os": "linux"})
+
+        new_data_1 = self.create_log(("test_start", {"test": test_id}),
+                                     ("test_status", {"test": test_id,
+                                                      "subtest": "test1",
+                                                      "status": "FAIL",
+                                                      "expected": "PASS"}),
+                                     ("test_end", {"test": test_id,
+                                                   "status": "OK"}),
+                                     run_info={"debug": True, "os": "windows"})
+        updater = self.create_updater(prev_data, ignore_existing=True)
+
+        updater.update_from_log(new_data_0)
+        updater.update_from_log(new_data_1)
+
+        new_manifest = updater.expected_tree["path/to/test.htm.ini"]
+
+        self.coalesce_results([new_manifest])
+
+        self.assertFalse(new_manifest.is_empty)
+        self.assertEquals(new_manifest.get_test(test_id).children[0].get(
+            "expected", {"debug": True, "os": "osx"}), "FAIL")
+        self.assertEquals(new_manifest.get_test(test_id).children[0].get(
+            "expected", {"debug": False, "os": "osx"}), "FAIL")
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/update/__init__.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/update/__init__.py
new file mode 100644
index 0000000..497cb34
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/update/__init__.py
@@ -0,0 +1,47 @@
+import os
+import sys
+
+from mozlog.structured import structuredlog, commandline
+
+from .. import wptcommandline
+
+from update import WPTUpdate
+
+def remove_logging_args(args):
+    """Take logging args out of the dictionary of command line arguments so
+    they are not passed in as kwargs to the update code. This is particularly
+    necessary here because the arguments are often of type file, which cannot
+    be serialized.
+
+    :param args: Dictionary of command line arguments.
+    """
+    for name in args.keys():
+        if name.startswith("log_"):
+            args.pop(name)
+
+
+def setup_logging(args, defaults):
+    """Use the command line arguments to set up the logger.
+
+    :param args: Dictionary of command line arguments.
+    :param defaults: Dictionary of {formatter_name: stream} to use if
+                     no command line logging is specified"""
+    logger = commandline.setup_logging("web-platform-tests-update", args, defaults)
+
+    remove_logging_args(args)
+
+    return logger
+
+
+def run_update(logger, **kwargs):
+    updater = WPTUpdate(logger, **kwargs)
+    return updater.run()
+
+
+def main():
+    args = wptcommandline.parse_args_update()
+    logger = setup_logging(args, {"mach": sys.stdout})
+    assert structuredlog.get_default_logger() is not None
+    success = run_update(logger, **args)
+    sys.exit(0 if success else 1)
+
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/update/base.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/update/base.py
new file mode 100644
index 0000000..547808e
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/update/base.py
@@ -0,0 +1,65 @@
+exit_unclean = object()
+exit_clean = object()
+
+
+class Step(object):
+    provides = []
+
+    def __init__(self, logger):
+        self.logger = logger
+
+    def run(self, step_index, state):
+        """Base class for state-creating steps.
+
+        When a Step is run() the current state is checked to see
+        if the state from this step has already been created. If it
+        has the restore() method is invoked. Otherwise the create()
+        method is invoked with the state object. This is expected to
+        add items with all the keys in __class__.provides to the state
+        object.
+        """
+
+        name = self.__class__.__name__
+
+        try:
+            stored_step = state.steps[step_index]
+        except IndexError:
+            stored_step = None
+
+        if stored_step == name:
+            self.restore(state)
+        elif stored_step is None:
+            self.create(state)
+            assert set(self.provides).issubset(set(state.keys()))
+            state.steps = state.steps + [name]
+        else:
+            raise ValueError("Expected a %s step, got a %s step" % (name, stored_step))
+
+    def create(self, data):
+        raise NotImplementedError
+
+    def restore(self, state):
+        self.logger.debug("Step %s using stored state" % (self.__class__.__name__,))
+        for key in self.provides:
+            assert key in state
+
+
+class StepRunner(object):
+    steps = []
+
+    def __init__(self, logger, state):
+        """Class that runs a specified series of Steps with a common State"""
+        self.state = state
+        self.logger = logger
+        if "steps" not in state:
+            state.steps = []
+
+    def run(self):
+        rv = None
+        for step_index, step in enumerate(self.steps):
+            self.logger.debug("Starting step %s" % step.__name__)
+            rv = step(self.logger).run(step_index, self.state)
+            if rv in (exit_clean, exit_unclean):
+                break
+
+        return rv
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/update/metadata.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/update/metadata.py
new file mode 100644
index 0000000..5f6925e
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/update/metadata.py
@@ -0,0 +1,71 @@
+import os
+
+from .. import metadata, products
+
+from base import Step, StepRunner
+
+class GetUpdatePropertyList(Step):
+    provides = ["property_order", "boolean_properties"]
+
+
+    def create(self, state):
+        property_order, boolean_properties = products.load_product_update(
+            state.config, state.product)
+        state.property_order = property_order
+        state.boolean_properties = boolean_properties
+
+
+class UpdateExpected(Step):
+    """Do the metadata update on the local checkout"""
+
+    provides = ["needs_human"]
+
+    def create(self, state):
+        if state.sync_tree is not None:
+            sync_root = state.sync_tree.root
+        else:
+            sync_root = None
+
+        state.needs_human = metadata.update_expected(state.paths,
+                                                     state.serve_root,
+                                                     state.run_log,
+                                                     rev_old=None,
+                                                     ignore_existing=state.ignore_existing,
+                                                     sync_root=sync_root,
+                                                     property_order=state.property_order,
+                                                     boolean_properties=state.boolean_properties)
+
+
+class CreateMetadataPatch(Step):
+    """Create a patch/commit for the metadata checkout"""
+
+    def create(self, state):
+        if not state.patch:
+            return
+
+        local_tree = state.local_tree
+        sync_tree = state.sync_tree
+
+        if sync_tree is not None:
+            name = "web-platform-tests_update_%s_metadata" % sync_tree.rev
+            message = "Update %s expected data to revision %s" % (state.suite_name, sync_tree.rev)
+        else:
+            name = "web-platform-tests_update_metadata"
+            message = "Update %s expected data" % state.suite_name
+
+        local_tree.create_patch(name, message)
+
+        if not local_tree.is_clean:
+            metadata_paths = [manifest_path["metadata_path"]
+                              for manifest_path in state.paths.itervalues()]
+            for path in metadata_paths:
+                local_tree.add_new(os.path.relpath(path, local_tree.root))
+            local_tree.update_patch(include=metadata_paths)
+            local_tree.commit_patch()
+
+
+class MetadataUpdateRunner(StepRunner):
+    """(Sub)Runner for updating metadata"""
+    steps = [GetUpdatePropertyList,
+             UpdateExpected,
+             CreateMetadataPatch]
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/update/state.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/update/state.py
new file mode 100644
index 0000000..9454414
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/update/state.py
@@ -0,0 +1,133 @@
+import os
+import cPickle as pickle
+
+here = os.path.abspath(os.path.split(__file__)[0])
+
+class State(object):
+    filename = os.path.join(here, ".wpt-update.lock")
+
+    def __new__(cls, logger):
+        rv = cls.load(logger)
+        if rv is not None:
+            logger.debug("Existing state found")
+            return rv
+
+        logger.debug("No existing state found")
+        return object.__new__(cls, logger)
+
+    def __init__(self, logger):
+        """Object containing state variables created when running Steps.
+
+        On write the state is serialized to disk, such that it can be restored in
+        the event that the program is interrupted before all steps are complete.
+        Note that this only works well if the values are immutable; mutating an
+        existing value will not cause the data to be serialized.
+
+        Variables are set and get as attributes e.g. state_obj.spam = "eggs".
+
+        :param parent: Parent State object or None if this is the root object.
+        """
+
+        if hasattr(self, "_data"):
+            return
+
+        self._data = [{}]
+        self._logger = logger
+        self._index = 0
+
+    def __getstate__(self):
+        rv = self.__dict__.copy()
+        del rv["_logger"]
+        return rv
+
+    @classmethod
+    def load(cls, logger):
+        """Load saved state from a file"""
+        try:
+            with open(cls.filename) as f:
+                try:
+                    rv = pickle.load(f)
+                    logger.debug("Loading data %r" % (rv._data,))
+                    rv._logger = logger
+                    rv._index = 0
+                    return rv
+                except EOFError:
+                    logger.warning("Found empty state file")
+        except IOError:
+            logger.debug("IOError loading stored state")
+
+    def push(self, init_values):
+        """Push a new clean state dictionary
+
+        :param init_values: List of variable names in the current state dict to copy
+                            into the new state dict."""
+
+        return StateContext(self, init_values)
+
+    def save(self):
+        """Write the state to disk"""
+        with open(self.filename, "w") as f:
+            pickle.dump(self, f)
+
+    def is_empty(self):
+        return len(self._data) == 1 and self._data[0] == {}
+
+    def clear(self):
+        """Remove all state and delete the stored copy."""
+        try:
+            os.unlink(self.filename)
+        except OSError:
+            pass
+        self._data = [{}]
+
+
+    def __setattr__(self, key, value):
+        if key.startswith("_"):
+            object.__setattr__(self, key, value)
+        else:
+            self._data[self._index][key] = value
+            self.save()
+
+    def __getattr__(self, key):
+        if key.startswith("_"):
+            raise AttributeError
+        try:
+            return self._data[self._index][key]
+        except KeyError:
+            raise AttributeError
+
+    def __contains__(self, key):
+        return key in self._data[self._index]
+
+    def update(self, items):
+        """Add a dictionary of {name: value} pairs to the state"""
+        self._data[self._index].update(items)
+        self.save()
+
+    def keys(self):
+        return self._data[self._index].keys()
+
+class StateContext(object):
+    def __init__(self, state, init_values):
+        self.state = state
+        self.init_values = init_values
+
+    def __enter__(self):
+        if len(self.state._data) == self.state._index + 1:
+            # This is the case where there is no stored state
+            new_state = {}
+            for key in self.init_values:
+                new_state[key] = self.state._data[self.state._index][key]
+            self.state._data.append(new_state)
+        self.state._index += 1
+        self.state._logger.debug("Incremented index to %s" % self.state._index)
+
+    def __exit__(self, *args, **kwargs):
+        if len(self.state._data) > 1:
+            assert self.state._index == len(self.state._data) - 1
+            self.state._data.pop()
+            self.state._index -= 1
+            self.state._logger.debug("Decremented index to %s" % self.state._index)
+            assert self.state._index >= 0
+        else:
+            raise ValueError("Tried to pop the top state")
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/update/sync.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/update/sync.py
new file mode 100644
index 0000000..b201fe7
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/update/sync.py
@@ -0,0 +1,197 @@
+import fnmatch
+import os
+import re
+import shutil
+import sys
+import uuid
+
+from .. import testloader
+
+from base import Step, StepRunner
+from tree import Commit
+
+here = os.path.abspath(os.path.split(__file__)[0])
+
+bsd_license = """W3C 3-clause BSD License
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+* Redistributions of works must retain the original copyright notice, this
+  list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the original copyright
+  notice, this list of conditions and the following disclaimer in the
+  documentation and/or other materials provided with the distribution.
+
+* Neither the name of the W3C nor the names of its contributors may be
+  used to endorse or promote products derived from this work without
+  specific prior written permission.
+
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+"""
+
+
+def copy_wpt_tree(tree, dest, excludes=None, includes=None):
+    """Copy the working copy of a Tree to a destination directory.
+
+    :param tree: The Tree to copy.
+    :param dest: The destination directory"""
+    if os.path.exists(dest):
+        assert os.path.isdir(dest)
+
+    shutil.rmtree(dest)
+
+    os.mkdir(dest)
+
+    if excludes is None:
+        excludes = []
+
+    excludes = [re.compile(fnmatch.translate(item)) for item in excludes]
+
+    if includes is None:
+        includes = []
+
+    includes = [re.compile(fnmatch.translate(item)) for item in includes]
+
+    for tree_path in tree.paths():
+        if (any(item.match(tree_path) for item in excludes) and
+            not any(item.match(tree_path) for item in includes)):
+            continue
+
+        source_path = os.path.join(tree.root, tree_path)
+        dest_path = os.path.join(dest, tree_path)
+
+        dest_dir = os.path.split(dest_path)[0]
+        if not os.path.isdir(source_path):
+            if not os.path.exists(dest_dir):
+                os.makedirs(dest_dir)
+            shutil.copy2(source_path, dest_path)
+
+    for source, destination in [("testharness_runner.html", ""),
+                                ("testharnessreport.js", "resources/")]:
+        source_path = os.path.join(here, os.pardir, source)
+        dest_path = os.path.join(dest, destination, os.path.split(source)[1])
+        shutil.copy2(source_path, dest_path)
+
+    add_license(dest)
+
+
+def add_license(dest):
+    """Write the bsd license string to a LICENSE file.
+
+    :param dest: Directory in which to place the LICENSE file."""
+    with open(os.path.join(dest, "LICENSE"), "w") as f:
+        f.write(bsd_license)
+
+
+class UpdateCheckout(Step):
+    """Pull changes from upstream into the local sync tree."""
+
+    provides = ["local_branch"]
+
+    def create(self, state):
+        sync_tree = state.sync_tree
+        state.local_branch = uuid.uuid4().hex
+        sync_tree.update(state.sync["remote_url"],
+                         state.sync["branch"],
+                         state.local_branch)
+        sync_path = os.path.abspath(sync_tree.root)
+        if not sync_path in sys.path:
+            from update import setup_paths
+            setup_paths(sync_path)
+
+    def restore(self, state):
+        assert os.path.abspath(state.sync_tree.root) in sys.path
+        Step.restore(self, state)
+
+
+class GetSyncTargetCommit(Step):
+    """Find the commit that we will sync to."""
+
+    provides = ["sync_commit"]
+
+    def create(self, state):
+        if state.target_rev is None:
+            #Use upstream branch HEAD as the base commit
+            state.sync_commit = state.sync_tree.get_remote_sha1(state.sync["remote_url"],
+                                                                state.sync["branch"])
+        else:
+            state.sync_commit = Commit(state.sync_tree, state.rev)
+
+        state.sync_tree.checkout(state.sync_commit.sha1, state.local_branch, force=True)
+        self.logger.debug("New base commit is %s" % state.sync_commit.sha1)
+
+
+class LoadManifest(Step):
+    """Load the test manifest"""
+
+    provides = ["manifest_path", "test_manifest"]
+
+    def create(self, state):
+        from manifest import manifest
+        state.manifest_path = os.path.join(state.metadata_path, "MANIFEST.json")
+        state.test_manifest = manifest.Manifest("/")
+
+
+class UpdateManifest(Step):
+    """Update the manifest to match the tests in the sync tree checkout"""
+
+    def create(self, state):
+        from manifest import manifest, update
+        update.update(state.sync["path"], state.test_manifest)
+        manifest.write(state.test_manifest, state.manifest_path)
+
+
+class CopyWorkTree(Step):
+    """Copy the sync tree over to the destination in the local tree"""
+
+    def create(self, state):
+        copy_wpt_tree(state.sync_tree,
+                      state.tests_path,
+                      excludes=state.path_excludes,
+                      includes=state.path_includes)
+
+
+class CreateSyncPatch(Step):
+    """Add the updated test files to a commit/patch in the local tree."""
+
+    def create(self, state):
+        if not state.patch:
+            return
+
+        local_tree = state.local_tree
+        sync_tree = state.sync_tree
+
+        local_tree.create_patch("web-platform-tests_update_%s" % sync_tree.rev,
+                                "Update %s to revision %s" % (state.suite_name, sync_tree.rev))
+        local_tree.add_new(os.path.relpath(state.tests_path,
+                                           local_tree.root))
+        updated = local_tree.update_patch(include=[state.tests_path,
+                                                   state.metadata_path])
+        local_tree.commit_patch()
+
+        if not updated:
+            self.logger.info("Nothing to sync")
+
+
+class SyncFromUpstreamRunner(StepRunner):
+    """(Sub)Runner for doing an upstream sync"""
+    steps = [UpdateCheckout,
+             GetSyncTargetCommit,
+             LoadManifest,
+             UpdateManifest,
+             CopyWorkTree,
+             CreateSyncPatch]
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/update/tree.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/update/tree.py
new file mode 100644
index 0000000..01df0b4
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/update/tree.py
@@ -0,0 +1,382 @@
+import os
+import re
+import subprocess
+
+from .. import vcs
+from ..vcs import bind_to_repo, git, hg
+
+
+def get_unique_name(existing, initial):
+    """Get a name either equal to initial or of the form initial_N, for some
+    integer N, that is not in the set existing.
+
+
+    :param existing: Set of names that must not be chosen.
+    :param initial: Name, or name prefix, to use"""
+    if initial not in existing:
+        return initial
+    for i in xrange(len(existing) + 1):
+        test = "%s_%s" % (initial, i + 1)
+        if test not in existing:
+            return test
+    assert False
+
+class NoVCSTree(object):
+    name = "non-vcs"
+
+    def __init__(self, root=None):
+        if root is None:
+            root = os.path.abspath(os.curdir)
+        self.root = root
+
+    @classmethod
+    def is_type(cls, path=None):
+        return True
+
+    @property
+    def is_clean(self):
+        return True
+
+    def add_new(self, prefix=None):
+        pass
+
+    def create_patch(self, patch_name, message):
+        pass
+
+    def update_patch(self, include=None):
+        pass
+
+    def commit_patch(self):
+        pass
+
+
+class HgTree(object):
+    name = "mercurial"
+
+    def __init__(self, root=None):
+        if root is None:
+            root = hg("root").strip()
+        self.root = root
+        self.hg = vcs.bind_to_repo(hg, self.root)
+
+    def __getstate__(self):
+        rv = self.__dict__.copy()
+        del rv['hg']
+        return rv
+
+    def __setstate__(self, dict):
+        self.__dict__.update(dict)
+        self.hg = vcs.bind_to_repo(vcs.hg, self.root)
+
+    @classmethod
+    def is_type(cls, path=None):
+        kwargs = {"log_error": False}
+        if path is not None:
+            kwargs["repo"] = path
+        try:
+            hg("root", **kwargs)
+        except:
+            return False
+        return True
+
+    @property
+    def is_clean(self):
+        return self.hg("status").strip() == ""
+
+    def add_new(self, prefix=None):
+        if prefix is not None:
+            args = ("-I", prefix)
+        else:
+            args = ()
+        self.hg("add", *args)
+
+    def create_patch(self, patch_name, message):
+        try:
+            self.hg("qinit", log_error=False)
+        except subprocess.CalledProcessError:
+            pass
+
+        patch_names = [item.strip() for item in self.hg("qseries").split("\n") if item.strip()]
+
+        suffix = 0
+        test_name = patch_name
+        while test_name in patch_names:
+            suffix += 1
+            test_name = "%s-%i" % (patch_name, suffix)
+
+        self.hg("qnew", test_name, "-X", self.root, "-m", message)
+
+    def update_patch(self, include=None):
+        if include is not None:
+            args = []
+            for item in include:
+                args.extend(["-I", item])
+        else:
+            args = ()
+
+        self.hg("qrefresh", *args)
+        return True
+
+    def commit_patch(self):
+        self.hg("qfinish")
+
+    def contains_commit(self, commit):
+        try:
+            self.hg("identify", "-r", commit.sha1)
+            return True
+        except subprocess.CalledProcessError:
+            return False
+
+
+class GitTree(object):
+    name = "git"
+
+    def __init__(self, root=None):
+        if root is None:
+            root = git("rev-parse", "--show-toplevel").strip()
+        self.root = root
+        self.git = vcs.bind_to_repo(git, self.root)
+        self.message = None
+        self.commit_cls = Commit
+
+    def __getstate__(self):
+        rv = self.__dict__.copy()
+        del rv['git']
+        return rv
+
+    def __setstate__(self, dict):
+        self.__dict__.update(dict)
+        self.git = vcs.bind_to_repo(vcs.git, self.root)
+
+    @classmethod
+    def is_type(cls, path=None):
+        kwargs = {"log_error": False}
+        if path is not None:
+            kwargs["repo"] = path
+        try:
+            git("rev-parse", "--show-toplevel", **kwargs)
+        except:
+            return False
+        return True
+
+    @property
+    def rev(self):
+        """Current HEAD revision"""
+        if vcs.is_git_root(self.root):
+            return self.git("rev-parse", "HEAD").strip()
+        else:
+            return None
+
+    @property
+    def is_clean(self):
+        return self.git("status").strip() == ""
+
+    def add_new(self, prefix=None):
+        """Add files to the staging area.
+
+        :param prefix: None to include all files or a path prefix to
+                       add all files under that path.
+        """
+        if prefix is None:
+            args = ("-a",)
+        else:
+            args = ("--no-ignore-removal", prefix)
+        self.git("add", *args)
+
+    def list_refs(self, ref_filter=None):
+        """Get a list of sha1, name tuples for references in a repository.
+
+        :param ref_filter: Pattern that reference name must match (from the end,
+                           matching whole /-delimited segments only
+        """
+        args = []
+        if ref_filter is not None:
+            args.append(ref_filter)
+        data = self.git("show-ref", *args)
+        rv = []
+        for line in data.split("\n"):
+            if not line.strip():
+                continue
+            sha1, ref = line.split()
+            rv.append((sha1, ref))
+        return rv
+
+    def list_remote(self, remote, ref_filter=None):
+        """Return a list of (sha1, name) tupes for references in a remote.
+
+        :param remote: URL of the remote to list.
+        :param ref_filter: Pattern that the reference name must match.
+        """
+        args = []
+        if ref_filter is not None:
+            args.append(ref_filter)
+        data = self.git("ls-remote", remote, *args)
+        rv = []
+        for line in data.split("\n"):
+            if not line.strip():
+                continue
+            sha1, ref = line.split()
+            rv.append((sha1, ref))
+        return rv
+
+    def get_remote_sha1(self, remote, branch):
+        """Return the SHA1 of a particular branch in a remote.
+
+        :param remote: the remote URL
+        :param branch: the branch name"""
+        for sha1, ref in self.list_remote(remote, branch):
+            if ref == "refs/heads/%s" % branch:
+                return self.commit_cls(self, sha1)
+        assert False
+
+    def create_patch(self, patch_name, message):
+        # In git a patch is actually a commit
+        self.message = message
+
+    def update_patch(self, include=None):
+        """Commit the staged changes, or changes to listed files.
+
+        :param include: Either None, to commit staged changes, or a list
+                        of filenames (which must already be in the repo)
+                        to commit
+        """
+        if include is not None:
+            args = tuple(include)
+        else:
+            args = ()
+
+        if self.git("status", "-uno", "-z", *args).strip():
+            self.git("add", *args)
+            return True
+        return False
+
+    def commit_patch(self):
+        assert self.message is not None
+
+        if self.git("diff", "--name-only", "--staged", "-z").strip():
+            self.git("commit", "-m", self.message)
+            return True
+
+        return False
+
+    def init(self):
+        self.git("init")
+        assert vcs.is_git_root(self.root)
+
+    def checkout(self, rev, branch=None, force=False):
+        """Checkout a particular revision, optionally into a named branch.
+
+        :param rev: Revision identifier (e.g. SHA1) to checkout
+        :param branch: Branch name to use
+        :param force: Force-checkout
+        """
+        assert rev is not None
+
+        args = []
+        if branch:
+            branches = [ref[len("refs/heads/"):] for sha1, ref in self.list_refs()
+                        if ref.startswith("refs/heads/")]
+            branch = get_unique_name(branches, branch)
+
+            args += ["-b", branch]
+
+        if force:
+            args.append("-f")
+        args.append(rev)
+        self.git("checkout", *args)
+
+    def update(self, remote, remote_branch, local_branch):
+        """Fetch from the remote and checkout into a local branch.
+
+        :param remote: URL to the remote repository
+        :param remote_branch: Branch on the remote repository to check out
+        :param local_branch: Local branch name to check out into
+        """
+        if not vcs.is_git_root(self.root):
+            self.init()
+        self.git("clean", "-xdf")
+        self.git("fetch", remote, "%s:%s" % (remote_branch, local_branch))
+        self.checkout(local_branch)
+        self.git("submodule", "update", "--init", "--recursive")
+
+    def clean(self):
+        self.git("checkout", self.rev)
+        self.git("branch", "-D", self.local_branch)
+
+    def paths(self):
+        """List paths in the tree"""
+        repo_paths = [self.root] +  [os.path.join(self.root, path)
+                                     for path in self.submodules()]
+
+        rv = []
+
+        for repo_path in repo_paths:
+            paths = vcs.git("ls-tree", "-r", "--name-only", "HEAD", repo=repo_path).split("\n")
+            rv.extend(os.path.relpath(os.path.join(repo_path, item), self.root) for item in paths
+                      if item.strip())
+        return rv
+
+    def submodules(self):
+        """List submodule directories"""
+        output = self.git("submodule", "status", "--recursive")
+        rv = []
+        for line in output.split("\n"):
+            line = line.strip()
+            if not line:
+                continue
+            parts = line.split(" ")
+            rv.append(parts[1])
+        return rv
+
+    def contains_commit(self, commit):
+        try:
+            self.git("rev-parse", "--verify", commit.sha1)
+            return True
+        except subprocess.CalledProcessError:
+            return False
+
+
+class CommitMessage(object):
+    def __init__(self, text):
+        self.text = text
+        self._parse_message()
+
+    def __str__(self):
+        return self.text
+
+    def _parse_message(self):
+        lines = self.text.splitlines()
+        self.full_summary = lines[0]
+        self.body = "\n".join(lines[1:])
+
+
+class Commit(object):
+    msg_cls = CommitMessage
+
+    _sha1_re = re.compile("^[0-9a-f]{40}$")
+
+    def __init__(self, tree, sha1):
+        """Object representing a commit in a specific GitTree.
+
+        :param tree: GitTree to which this commit belongs.
+        :param sha1: Full sha1 string for the commit
+        """
+        assert self._sha1_re.match(sha1)
+
+        self.tree = tree
+        self.git = tree.git
+        self.sha1 = sha1
+        self.author, self.email, self.message = self._get_meta()
+
+    def __getstate__(self):
+        rv = self.__dict__.copy()
+        del rv['git']
+        return rv
+
+    def __setstate__(self, dict):
+        self.__dict__.update(dict)
+        self.git = self.tree.git
+
+    def _get_meta(self):
+        author, email, message = self.git("show", "-s", "--format=format:%an\n%ae\n%B", self.sha1).split("\n", 2)
+        return author, email, self.msg_cls(message)
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/update/update.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/update/update.py
new file mode 100644
index 0000000..3d7d772
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/update/update.py
@@ -0,0 +1,159 @@
+import os
+import sys
+
+from metadata import MetadataUpdateRunner
+from sync import SyncFromUpstreamRunner
+from tree import GitTree, HgTree, NoVCSTree
+
+from .. import environment as env
+from base import Step, StepRunner, exit_clean, exit_unclean
+from state import State
+
+def setup_paths(sync_path):
+    sys.path.insert(0, os.path.abspath(sync_path))
+    try:
+        from tools import localpaths
+    except ImportError:
+        from wpt_tools import localpaths
+
+class LoadConfig(Step):
+    """Step for loading configuration from the ini file and kwargs."""
+
+    provides = ["sync", "paths", "metadata_path", "tests_path"]
+
+    def create(self, state):
+        state.sync = {"remote_url": state.kwargs["remote_url"],
+                      "branch": state.kwargs["branch"],
+                      "path": state.kwargs["sync_path"]}
+
+        state.paths = state.kwargs["test_paths"]
+        state.tests_path = state.paths["/"]["tests_path"]
+        state.metadata_path = state.paths["/"]["metadata_path"]
+
+        assert os.path.isabs(state.tests_path)
+
+
+class LoadTrees(Step):
+    """Step for creating a Tree for the local copy and a GitTree for the
+    upstream sync."""
+
+    provides = ["local_tree", "sync_tree"]
+
+    def create(self, state):
+        if os.path.exists(state.sync["path"]):
+            sync_tree = GitTree(root=state.sync["path"])
+        else:
+            sync_tree = None
+
+        if GitTree.is_type():
+            local_tree = GitTree()
+        elif HgTree.is_type():
+            local_tree = HgTree()
+        else:
+            local_tree = NoVCSTree()
+
+        state.update({"local_tree": local_tree,
+                      "sync_tree": sync_tree})
+
+
+class SyncFromUpstream(Step):
+    """Step that synchronises a local copy of the code with upstream."""
+
+    def create(self, state):
+        if not state.kwargs["sync"]:
+            return
+
+        if not state.sync_tree:
+            os.mkdir(state.sync["path"])
+            state.sync_tree = GitTree(root=state.sync["path"])
+
+        kwargs = state.kwargs
+        with state.push(["sync", "paths", "metadata_path", "tests_path", "local_tree",
+                         "sync_tree"]):
+            state.target_rev = kwargs["rev"]
+            state.patch = kwargs["patch"]
+            state.suite_name = kwargs["suite_name"]
+            state.path_excludes = kwargs["exclude"]
+            state.path_includes = kwargs["include"]
+            runner = SyncFromUpstreamRunner(self.logger, state)
+            runner.run()
+
+
+class UpdateMetadata(Step):
+    """Update the expectation metadata from a set of run logs"""
+
+    def create(self, state):
+        if not state.kwargs["run_log"]:
+            return
+
+        kwargs = state.kwargs
+        with state.push(["local_tree", "sync_tree", "paths", "serve_root"]):
+            state.run_log = kwargs["run_log"]
+            state.ignore_existing = kwargs["ignore_existing"]
+            state.patch = kwargs["patch"]
+            state.suite_name = kwargs["suite_name"]
+            state.product = kwargs["product"]
+            state.config = kwargs["config"]
+            runner = MetadataUpdateRunner(self.logger, state)
+            runner.run()
+
+
+class UpdateRunner(StepRunner):
+    """Runner for doing an overall update."""
+    steps = [LoadConfig,
+             LoadTrees,
+             SyncFromUpstream,
+             UpdateMetadata]
+
+
+class WPTUpdate(object):
+    def __init__(self, logger, runner_cls=UpdateRunner, **kwargs):
+        """Object that controls the running of a whole wptupdate.
+
+        :param runner_cls: Runner subclass holding the overall list of
+                           steps to run.
+        :param kwargs: Command line arguments
+        """
+        self.runner_cls = runner_cls
+        self.serve_root = kwargs["test_paths"]["/"]["tests_path"]
+
+        if not kwargs["sync"]:
+            setup_paths(self.serve_root)
+        else:
+            if os.path.exists(kwargs["sync_path"]):
+                # If the sync path doesn't exist we defer this until it does
+                setup_paths(kwargs["sync_path"])
+
+        self.state = State(logger)
+        self.kwargs = kwargs
+        self.logger = logger
+
+    def run(self, **kwargs):
+        if self.kwargs["abort"]:
+            self.abort()
+            return exit_clean
+
+        if not self.kwargs["continue"] and not self.state.is_empty():
+            self.logger.error("Found existing state. Run with --continue to resume or --abort to clear state")
+            return exit_unclean
+
+        if self.kwargs["continue"]:
+            if self.state.is_empty():
+                self.logger.error("No sync in progress?")
+                return exit_clean
+
+            self.kwargs = self.state.kwargs
+        else:
+            self.state.kwargs = self.kwargs
+
+        self.state.serve_root = self.serve_root
+
+        update_runner = self.runner_cls(self.logger, self.state)
+        rv = update_runner.run()
+        if rv in (exit_clean, None):
+            self.state.clear()
+
+        return rv
+
+    def abort(self):
+        self.state.clear()
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/vcs.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/vcs.py
new file mode 100644
index 0000000..16d53af
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/vcs.py
@@ -0,0 +1,49 @@
+import subprocess
+from functools import partial
+
+from mozlog import get_default_logger
+
+logger = None
+
+def vcs(bin_name):
+    def inner(command, *args, **kwargs):
+        global logger
+
+        if logger is None:
+            logger = get_default_logger("vcs")
+
+        repo = kwargs.pop("repo", None)
+        log_error = kwargs.pop("log_error", True)
+        if kwargs:
+            raise TypeError, kwargs
+
+        args = list(args)
+
+        proc_kwargs = {}
+        if repo is not None:
+            proc_kwargs["cwd"] = repo
+
+        command_line = [bin_name, command] + args
+        logger.debug(" ".join(command_line))
+        try:
+            return subprocess.check_output(command_line, stderr=subprocess.STDOUT, **proc_kwargs)
+        except subprocess.CalledProcessError as e:
+            if log_error:
+                logger.error(e.output)
+            raise
+    return inner
+
+git = vcs("git")
+hg = vcs("hg")
+
+
+def bind_to_repo(vcs_func, repo):
+    return partial(vcs_func, repo=repo)
+
+
+def is_git_root(path):
+    try:
+        rv = git("rev-parse", "--show-cdup", repo=path)
+    except subprocess.CalledProcessError:
+        return False
+    return rv == "\n"
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/webdriver_server.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/webdriver_server.py
new file mode 100644
index 0000000..49d9c73
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/webdriver_server.py
@@ -0,0 +1,233 @@
+import abc
+import errno
+import os
+import platform
+import socket
+import threading
+import time
+import traceback
+import urlparse
+
+import mozprocess
+
+
+__all__ = ["SeleniumServer", "ChromeDriverServer",
+           "GeckoDriverServer", "ServoDriverServer",
+           "WebDriverServer"]
+
+
+class WebDriverServer(object):
+    __metaclass__ = abc.ABCMeta
+
+    default_base_path = "/"
+    _used_ports = set()
+
+    def __init__(self, logger, binary, host="127.0.0.1", port=None,
+                 base_path="", env=None, args=None):
+        if binary is None:
+            raise ValueError("WebDriver server binary must be given "
+                             "to --webdriver-binary argument")
+
+        self.logger = logger
+        self.binary = binary
+        self.host = host
+        if base_path == "":
+            self.base_path = self.default_base_path
+        else:
+            self.base_path = base_path
+        self.env = os.environ.copy() if env is None else env
+
+        self._port = port
+        self._cmd = None
+        self._args = args if args is not None else []
+        self._proc = None
+
+    @abc.abstractmethod
+    def make_command(self):
+        """Returns the full command for starting the server process as a list."""
+
+    def start(self, block=False):
+        try:
+            self._run(block)
+        except KeyboardInterrupt:
+            self.stop()
+
+    def _run(self, block):
+        self._cmd = self.make_command()
+        self._proc = mozprocess.ProcessHandler(
+            self._cmd,
+            processOutputLine=self.on_output,
+            env=self.env,
+            storeOutput=False)
+
+        try:
+            self._proc.run()
+        except OSError as e:
+            if e.errno == errno.ENOENT:
+                raise IOError(
+                    "WebDriver HTTP server executable not found: %s" % self.binary)
+            raise
+
+        self.logger.debug(
+            "Waiting for server to become accessible: %s" % self.url)
+        try:
+            wait_for_service((self.host, self.port))
+        except:
+            self.logger.error(
+                "WebDriver HTTP server was not accessible "
+                "within the timeout:\n%s" % traceback.format_exc())
+            raise
+
+        if block:
+            self._proc.wait()
+
+    def stop(self, force=False):
+        if self.is_alive:
+            return self._proc.kill()
+        return not self.is_alive
+
+    @property
+    def is_alive(self):
+        return hasattr(self._proc, "proc") and self._proc.poll() is None
+
+    def on_output(self, line):
+        self.logger.process_output(self.pid,
+                                   line.decode("utf8", "replace"),
+                                   command=" ".join(self._cmd))
+
+    @property
+    def pid(self):
+        if self._proc is not None:
+            return self._proc.pid
+
+    @property
+    def url(self):
+        return "http://%s:%i%s" % (self.host, self.port, self.base_path)
+
+    @property
+    def port(self):
+        if self._port is None:
+            self._port = self._find_next_free_port()
+        return self._port
+
+    @staticmethod
+    def _find_next_free_port():
+        port = get_free_port(4444, exclude=WebDriverServer._used_ports)
+        WebDriverServer._used_ports.add(port)
+        return port
+
+
+class SeleniumServer(WebDriverServer):
+    default_base_path = "/wd/hub"
+
+    def make_command(self):
+        return ["java", "-jar", self.binary, "-port", str(self.port)] + self._args
+
+
+class ChromeDriverServer(WebDriverServer):
+    default_base_path = "/wd/hub"
+
+    def __init__(self, logger, binary="chromedriver", port=None,
+                 base_path="", args=None):
+        WebDriverServer.__init__(
+            self, logger, binary, port=port, base_path=base_path, args=args)
+
+    def make_command(self):
+        return [self.binary,
+                cmd_arg("port", str(self.port)),
+                cmd_arg("url-base", self.base_path) if self.base_path else ""] + self._args
+
+
+class EdgeDriverServer(WebDriverServer):
+    def __init__(self, logger, binary="MicrosoftWebDriver.exe", port=None,
+                 base_path="", host="localhost", args=None):
+        WebDriverServer.__init__(
+            self, logger, binary, host=host, port=port, args=args)
+
+    def make_command(self):
+        return [self.binary,
+                "--port=%s" % str(self.port)] + self._args
+
+
+class GeckoDriverServer(WebDriverServer):
+    def __init__(self, logger, marionette_port=2828, binary="geckodriver",
+                 host="127.0.0.1", port=None, args=None):
+        env = os.environ.copy()
+        env["RUST_BACKTRACE"] = "1"
+        WebDriverServer.__init__(self, logger, binary, host=host, port=port, env=env, args=args)
+        self.marionette_port = marionette_port
+
+    def make_command(self):
+        return [self.binary,
+                "--marionette-port", str(self.marionette_port),
+                "--host", self.host,
+                "--port", str(self.port)] + self._args
+
+
+class ServoDriverServer(WebDriverServer):
+    def __init__(self, logger, binary="servo", binary_args=None, host="127.0.0.1", port=None):
+        env = os.environ.copy()
+        env["RUST_BACKTRACE"] = "1"
+        WebDriverServer.__init__(self, logger, binary, host=host, port=port, env=env)
+        self.binary_args = binary_args
+
+    def make_command(self):
+        command = [self.binary,
+                   "--webdriver", str(self.port),
+                   "--hard-fail",
+                   "--headless"] + self._args
+        if self.binary_args:
+            command += self.binary_args
+        return command
+
+
+def cmd_arg(name, value=None):
+    prefix = "-" if platform.system() == "Windows" else "--"
+    rv = prefix + name
+    if value is not None:
+        rv += "=" + value
+    return rv
+
+
+def get_free_port(start_port, exclude=None):
+    """Get the first port number after start_port (inclusive) that is
+    not currently bound.
+
+    :param start_port: Integer port number at which to start testing.
+    :param exclude: Set of port numbers to skip"""
+    port = start_port
+    while True:
+        if exclude and port in exclude:
+            port += 1
+            continue
+        s = socket.socket()
+        try:
+            s.bind(("127.0.0.1", port))
+        except socket.error:
+            port += 1
+        else:
+            return port
+        finally:
+            s.close()
+
+
+def wait_for_service(addr, timeout=15):
+    """Waits until network service given as a tuple of (host, port) becomes
+    available or the `timeout` duration is reached, at which point
+    ``socket.error`` is raised."""
+    end = time.time() + timeout
+    while end > time.time():
+        so = socket.socket()
+        try:
+            so.connect(addr)
+        except socket.timeout:
+            pass
+        except socket.error as e:
+            if e[0] != errno.ECONNREFUSED:
+                raise
+        else:
+            return True
+        finally:
+            so.close()
+        time.sleep(0.5)
+    raise socket.error("Service is unavailable: %s:%i" % addr)
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptcommandline.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptcommandline.py
new file mode 100644
index 0000000..b3dc8b9
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptcommandline.py
@@ -0,0 +1,496 @@
+import argparse
+import ast
+import os
+import sys
+from collections import OrderedDict
+from distutils.spawn import find_executable
+
+import config
+import wpttest
+import formatters
+
+
+def abs_path(path):
+    return os.path.abspath(os.path.expanduser(path))
+
+
+def url_or_path(path):
+    import urlparse
+
+    parsed = urlparse.urlparse(path)
+    if len(parsed.scheme) > 2:
+        return path
+    else:
+        return abs_path(path)
+
+
+def require_arg(kwargs, name, value_func=None):
+    if value_func is None:
+        value_func = lambda x: x is not None
+
+    if not name in kwargs or not value_func(kwargs[name]):
+        print >> sys.stderr, "Missing required argument %s" % name
+        sys.exit(1)
+
+
+def create_parser(product_choices=None):
+    from mozlog import commandline
+
+    import products
+
+    if product_choices is None:
+        config_data = config.load()
+        product_choices = products.products_enabled(config_data)
+
+    parser = argparse.ArgumentParser(description="""Runner for web-platform-tests tests.""",
+                                     usage="""%(prog)s [OPTION]... [TEST]...
+
+TEST is either the full path to a test file to run, or the URL of a test excluding
+scheme host and port.""")
+    parser.add_argument("--manifest-update", action="store_true", default=None,
+                        help="Regenerate the test manifest.")
+    parser.add_argument("--no-manifest-update", action="store_false", dest="manifest_update",
+                        help="Prevent regeneration of the test manifest.")
+
+    parser.add_argument("--timeout-multiplier", action="store", type=float, default=None,
+                        help="Multiplier relative to standard test timeout to use")
+    parser.add_argument("--run-by-dir", type=int, nargs="?", default=False,
+                        help="Split run into groups by directories. With a parameter,"
+                        "limit the depth of splits e.g. --run-by-dir=1 to split by top-level"
+                        "directory")
+    parser.add_argument("--processes", action="store", type=int, default=None,
+                        help="Number of simultaneous processes to use")
+
+    parser.add_argument("--no-capture-stdio", action="store_true", default=False,
+                        help="Don't capture stdio and write to logging")
+
+    mode_group = parser.add_argument_group("Mode")
+    mode_group.add_argument("--list-test-groups", action="store_true",
+                            default=False,
+                            help="List the top level directories containing tests that will run.")
+    mode_group.add_argument("--list-disabled", action="store_true",
+                            default=False,
+                            help="List the tests that are disabled on the current platform")
+    mode_group.add_argument("--list-tests", action="store_true",
+                            default=False,
+                            help="List all tests that will run")
+
+    test_selection_group = parser.add_argument_group("Test Selection")
+    test_selection_group.add_argument("--test-types", action="store",
+                                      nargs="*", default=wpttest.enabled_tests,
+                                      choices=wpttest.enabled_tests,
+                                      help="Test types to run")
+    test_selection_group.add_argument("--include", action="append",
+                                      help="URL prefix to include")
+    test_selection_group.add_argument("--exclude", action="append",
+                                      help="URL prefix to exclude")
+    test_selection_group.add_argument("--include-manifest", type=abs_path,
+                                      help="Path to manifest listing tests to include")
+    test_selection_group.add_argument("--tag", action="append", dest="tags",
+                                      help="Labels applied to tests to include in the run. Labels starting dir: are equivalent to top-level directories.")
+
+    debugging_group = parser.add_argument_group("Debugging")
+    debugging_group.add_argument('--debugger', const="__default__", nargs="?",
+                                 help="run under a debugger, e.g. gdb or valgrind")
+    debugging_group.add_argument('--debugger-args', help="arguments to the debugger")
+    debugging_group.add_argument("--repeat", action="store", type=int, default=1,
+                                 help="Number of times to run the tests")
+    debugging_group.add_argument("--repeat-until-unexpected", action="store_true", default=None,
+                                 help="Run tests in a loop until one returns an unexpected result")
+    debugging_group.add_argument('--pause-after-test', action="store_true", default=None,
+                                 help="Halt the test runner after each test (this happens by default if only a single test is run)")
+    debugging_group.add_argument('--no-pause-after-test', dest="pause_after_test", action="store_false",
+                                 help="Don't halt the test runner irrespective of the number of tests run")
+
+    debugging_group.add_argument('--pause-on-unexpected', action="store_true",
+                                 help="Halt the test runner when an unexpected result is encountered")
+    debugging_group.add_argument('--no-restart-on-unexpected', dest="restart_on_unexpected",
+                                 default=True, action="store_false",
+                                 help="Don't restart on an unexpected result")
+
+    debugging_group.add_argument("--symbols-path", action="store", type=url_or_path,
+                                 help="Path or url to symbols file used to analyse crash minidumps.")
+    debugging_group.add_argument("--stackwalk-binary", action="store", type=abs_path,
+                                 help="Path to stackwalker program used to analyse minidumps.")
+
+    debugging_group.add_argument("--pdb", action="store_true",
+                                 help="Drop into pdb on python exception")
+
+    config_group = parser.add_argument_group("Configuration")
+    config_group.add_argument("--binary", action="store",
+                              type=abs_path, help="Binary to run tests against")
+    config_group.add_argument('--binary-arg',
+                              default=[], action="append", dest="binary_args",
+                              help="Extra argument for the binary")
+    config_group.add_argument("--webdriver-binary", action="store", metavar="BINARY",
+                              type=abs_path, help="WebDriver server binary to use")
+    config_group.add_argument('--webdriver-arg',
+                              default=[], action="append", dest="webdriver_args",
+                              help="Extra argument for the WebDriver binary")
+
+    config_group.add_argument("--metadata", action="store", type=abs_path, dest="metadata_root",
+                              help="Path to root directory containing test metadata"),
+    config_group.add_argument("--tests", action="store", type=abs_path, dest="tests_root",
+                              help="Path to root directory containing test files"),
+    config_group.add_argument("--run-info", action="store", type=abs_path,
+                              help="Path to directory containing extra json files to add to run info")
+    config_group.add_argument("--product", action="store", choices=product_choices,
+                              default=None, help="Browser against which to run tests")
+    config_group.add_argument("--config", action="store", type=abs_path, dest="config",
+                              help="Path to config file")
+
+    build_type = parser.add_mutually_exclusive_group()
+    build_type.add_argument("--debug-build", dest="debug", action="store_true",
+                            default=None,
+                            help="Build is a debug build (overrides any mozinfo file)")
+    build_type.add_argument("--release-build", dest="debug", action="store_false",
+                            default=None,
+                            help="Build is a release (overrides any mozinfo file)")
+
+
+    chunking_group = parser.add_argument_group("Test Chunking")
+    chunking_group.add_argument("--total-chunks", action="store", type=int, default=1,
+                                help="Total number of chunks to use")
+    chunking_group.add_argument("--this-chunk", action="store", type=int, default=1,
+                                help="Chunk number to run")
+    chunking_group.add_argument("--chunk-type", action="store", choices=["none", "equal_time", "hash", "dir_hash"],
+                                default=None, help="Chunking type to use")
+
+    ssl_group = parser.add_argument_group("SSL/TLS")
+    ssl_group.add_argument("--ssl-type", action="store", default=None,
+                        choices=["openssl", "pregenerated", "none"],
+                        help="Type of ssl support to enable (running without ssl may lead to spurious errors)")
+
+    ssl_group.add_argument("--openssl-binary", action="store",
+                        help="Path to openssl binary", default="openssl")
+    ssl_group.add_argument("--certutil-binary", action="store",
+                        help="Path to certutil binary for use with Firefox + ssl")
+
+    ssl_group.add_argument("--ca-cert-path", action="store", type=abs_path,
+                        help="Path to ca certificate when using pregenerated ssl certificates")
+    ssl_group.add_argument("--host-key-path", action="store", type=abs_path,
+                        help="Path to host private key when using pregenerated ssl certificates")
+    ssl_group.add_argument("--host-cert-path", action="store", type=abs_path,
+                        help="Path to host certificate when using pregenerated ssl certificates")
+
+    gecko_group = parser.add_argument_group("Gecko-specific")
+    gecko_group.add_argument("--prefs-root", dest="prefs_root", action="store", type=abs_path,
+                             help="Path to the folder containing browser prefs")
+    gecko_group.add_argument("--disable-e10s", dest="gecko_e10s", action="store_false", default=True,
+                             help="Run tests without electrolysis preferences")
+    gecko_group.add_argument("--stackfix-dir", dest="stackfix_dir", action="store",
+                             help="Path to directory containing assertion stack fixing scripts")
+    gecko_group.add_argument("--setpref", dest="extra_prefs", action='append',
+                             default=[], metavar="PREF=VALUE",
+                             help="Defines an extra user preference (overrides those in prefs_root)")
+    gecko_group.add_argument("--leak-check", dest="leak_check", action="store_true",
+                             help="Enable leak checking")
+    gecko_group.add_argument("--stylo-threads", action="store", type=int, default=1,
+                             help="Number of parallel threads to use for stylo")
+    gecko_group.add_argument("--reftest-internal", dest="reftest_internal", action="store_true",
+                             default=None, help="Enable reftest runner implemented inside Marionette")
+    gecko_group.add_argument("--reftest-external", dest="reftest_internal", action="store_false",
+                             help="Disable reftest runner implemented inside Marionette")
+    gecko_group.add_argument("--reftest-screenshot", dest="reftest_screenshot", action="store",
+                             choices=["always", "fail", "unexpected"], default="unexpected",
+                             help="With --reftest-internal, when to take a screenshot")
+
+    servo_group = parser.add_argument_group("Servo-specific")
+    servo_group.add_argument("--user-stylesheet",
+                             default=[], action="append", dest="user_stylesheets",
+                             help="Inject a user CSS stylesheet into every test.")
+
+    sauce_group = parser.add_argument_group("Sauce Labs-specific")
+    sauce_group.add_argument("--sauce-browser", dest="sauce_browser",
+                             help="Sauce Labs browser name")
+    sauce_group.add_argument("--sauce-platform", dest="sauce_platform",
+                             help="Sauce Labs OS platform")
+    sauce_group.add_argument("--sauce-version", dest="sauce_version",
+                             help="Sauce Labs browser version")
+    sauce_group.add_argument("--sauce-build", dest="sauce_build",
+                             help="Sauce Labs build identifier")
+    sauce_group.add_argument("--sauce-tags", dest="sauce_tags", nargs="*",
+                             help="Sauce Labs identifying tag", default=[])
+    sauce_group.add_argument("--sauce-tunnel-id", dest="sauce_tunnel_id",
+                             help="Sauce Connect tunnel identifier")
+    sauce_group.add_argument("--sauce-user", dest="sauce_user",
+                             help="Sauce Labs user name")
+    sauce_group.add_argument("--sauce-key", dest="sauce_key",
+                             default=os.environ.get("SAUCE_ACCESS_KEY"),
+                             help="Sauce Labs access key")
+    sauce_group.add_argument("--sauce-connect-binary",
+                             dest="sauce_connect_binary",
+                             help="Path to Sauce Connect binary")
+
+    parser.add_argument("test_list", nargs="*",
+                        help="List of URLs for tests to run, or paths including tests to run. "
+                             "(equivalent to --include)")
+
+    commandline.log_formatters["wptreport"] = (formatters.WptreportFormatter, "wptreport format")
+
+    commandline.add_logging_group(parser)
+    return parser
+
+
+def set_from_config(kwargs):
+    if kwargs["config"] is None:
+        config_path = config.path()
+    else:
+        config_path = kwargs["config"]
+
+    kwargs["config_path"] = config_path
+
+    kwargs["config"] = config.read(kwargs["config_path"])
+
+    keys = {"paths": [("prefs", "prefs_root", True),
+                      ("run_info", "run_info", True)],
+            "web-platform-tests": [("remote_url", "remote_url", False),
+                                   ("branch", "branch", False),
+                                   ("sync_path", "sync_path", True)],
+            "SSL": [("openssl_binary", "openssl_binary", True),
+                    ("certutil_binary", "certutil_binary", True),
+                    ("ca_cert_path", "ca_cert_path", True),
+                    ("host_cert_path", "host_cert_path", True),
+                    ("host_key_path", "host_key_path", True)]}
+
+    for section, values in keys.iteritems():
+        for config_value, kw_value, is_path in values:
+            if kw_value in kwargs and kwargs[kw_value] is None:
+                if not is_path:
+                    new_value = kwargs["config"].get(section, config.ConfigDict({})).get(config_value)
+                else:
+                    new_value = kwargs["config"].get(section, config.ConfigDict({})).get_path(config_value)
+                kwargs[kw_value] = new_value
+
+    kwargs["test_paths"] = get_test_paths(kwargs["config"])
+
+    if kwargs["tests_root"]:
+        if "/" not in kwargs["test_paths"]:
+            kwargs["test_paths"]["/"] = {}
+        kwargs["test_paths"]["/"]["tests_path"] = kwargs["tests_root"]
+
+    if kwargs["metadata_root"]:
+        if "/" not in kwargs["test_paths"]:
+            kwargs["test_paths"]["/"] = {}
+        kwargs["test_paths"]["/"]["metadata_path"] = kwargs["metadata_root"]
+
+    kwargs["suite_name"] = kwargs["config"].get("web-platform-tests", {}).get("name", "web-platform-tests")
+
+
+def get_test_paths(config):
+    # Set up test_paths
+    test_paths = OrderedDict()
+
+    for section in config.iterkeys():
+        if section.startswith("manifest:"):
+            manifest_opts = config.get(section)
+            url_base = manifest_opts.get("url_base", "/")
+            test_paths[url_base] = {
+                "tests_path": manifest_opts.get_path("tests"),
+                "metadata_path": manifest_opts.get_path("metadata")}
+
+    return test_paths
+
+
+def exe_path(name):
+    if name is None:
+        return
+
+    path = find_executable(name)
+    if os.access(path, os.X_OK):
+        return path
+
+
+def check_args(kwargs):
+    set_from_config(kwargs)
+
+    for test_paths in kwargs["test_paths"].itervalues():
+        if not ("tests_path" in test_paths and
+                "metadata_path" in test_paths):
+            print "Fatal: must specify both a test path and metadata path"
+            sys.exit(1)
+        for key, path in test_paths.iteritems():
+            name = key.split("_", 1)[0]
+
+            if not os.path.exists(path):
+                print "Fatal: %s path %s does not exist" % (name, path)
+                sys.exit(1)
+
+            if not os.path.isdir(path):
+                print "Fatal: %s path %s is not a directory" % (name, path)
+                sys.exit(1)
+
+    if kwargs["product"] is None:
+        kwargs["product"] = "firefox"
+
+    if "sauce" in kwargs["product"]:
+        kwargs["pause_after_test"] = False
+
+    if kwargs["test_list"]:
+        if kwargs["include"] is not None:
+            kwargs["include"].extend(kwargs["test_list"])
+        else:
+            kwargs["include"] = kwargs["test_list"]
+
+    if kwargs["run_info"] is None:
+        kwargs["run_info"] = kwargs["config_path"]
+
+    if kwargs["this_chunk"] > 1:
+        require_arg(kwargs, "total_chunks", lambda x: x >= kwargs["this_chunk"])
+
+    if kwargs["chunk_type"] is None:
+        if kwargs["total_chunks"] > 1:
+            kwargs["chunk_type"] = "dir_hash"
+        else:
+            kwargs["chunk_type"] = "none"
+
+    if kwargs["processes"] is None:
+        kwargs["processes"] = 1
+
+    if kwargs["debugger"] is not None:
+        import mozdebug
+        if kwargs["debugger"] == "__default__":
+            kwargs["debugger"] = mozdebug.get_default_debugger_name()
+        debug_info = mozdebug.get_debugger_info(kwargs["debugger"],
+                                                kwargs["debugger_args"])
+        if debug_info and debug_info.interactive:
+            if kwargs["processes"] != 1:
+                kwargs["processes"] = 1
+            kwargs["no_capture_stdio"] = True
+        kwargs["debug_info"] = debug_info
+    else:
+        kwargs["debug_info"] = None
+
+    if kwargs["binary"] is not None:
+        if not os.path.exists(kwargs["binary"]):
+            print >> sys.stderr, "Binary path %s does not exist" % kwargs["binary"]
+            sys.exit(1)
+
+    if kwargs["ssl_type"] is None:
+        if None not in (kwargs["ca_cert_path"], kwargs["host_cert_path"], kwargs["host_key_path"]):
+            kwargs["ssl_type"] = "pregenerated"
+        elif exe_path(kwargs["openssl_binary"]) is not None:
+            kwargs["ssl_type"] = "openssl"
+        else:
+            kwargs["ssl_type"] = "none"
+
+    if kwargs["ssl_type"] == "pregenerated":
+        require_arg(kwargs, "ca_cert_path", lambda x:os.path.exists(x))
+        require_arg(kwargs, "host_cert_path", lambda x:os.path.exists(x))
+        require_arg(kwargs, "host_key_path", lambda x:os.path.exists(x))
+
+    elif kwargs["ssl_type"] == "openssl":
+        path = exe_path(kwargs["openssl_binary"])
+        if path is None:
+            print >> sys.stderr, "openssl-binary argument missing or not a valid executable"
+            sys.exit(1)
+        kwargs["openssl_binary"] = path
+
+    if kwargs["ssl_type"] != "none" and kwargs["product"] == "firefox":
+        path = exe_path(kwargs["certutil_binary"])
+        if path is None:
+            print >> sys.stderr, "certutil-binary argument missing or not a valid executable"
+            sys.exit(1)
+        kwargs["certutil_binary"] = path
+
+    if kwargs['extra_prefs']:
+        missing = any('=' not in prefarg for prefarg in kwargs['extra_prefs'])
+        if missing:
+            print >> sys.stderr, "Preferences via --setpref must be in key=value format"
+            sys.exit(1)
+        kwargs['extra_prefs'] = [tuple(prefarg.split('=', 1)) for prefarg in
+                                 kwargs['extra_prefs']]
+
+    if kwargs["reftest_internal"] is None:
+        # Default to the internal reftest implementation on Linux and OSX
+        kwargs["reftest_internal"] = sys.platform.startswith("linux") or sys.platform.startswith("darwin")
+
+    return kwargs
+
+
+def check_args_update(kwargs):
+    set_from_config(kwargs)
+
+    if kwargs["product"] is None:
+        kwargs["product"] = "firefox"
+    if kwargs["patch"] is None:
+        kwargs["patch"] = kwargs["sync"]
+
+    for item in kwargs["run_log"]:
+        if os.path.isdir(item):
+            print >> sys.stderr, "Log file %s is a directory" % item
+            sys.exit(1)
+
+    return kwargs
+
+
+def create_parser_update(product_choices=None):
+    from mozlog.structured import commandline
+
+    import products
+
+    if product_choices is None:
+        config_data = config.load()
+        product_choices = products.products_enabled(config_data)
+
+    parser = argparse.ArgumentParser("web-platform-tests-update",
+                                     description="Update script for web-platform-tests tests.")
+    parser.add_argument("--product", action="store", choices=product_choices,
+                        default=None, help="Browser for which metadata is being updated")
+    parser.add_argument("--config", action="store", type=abs_path, help="Path to config file")
+    parser.add_argument("--metadata", action="store", type=abs_path, dest="metadata_root",
+                        help="Path to the folder containing test metadata"),
+    parser.add_argument("--tests", action="store", type=abs_path, dest="tests_root",
+                        help="Path to web-platform-tests"),
+    parser.add_argument("--sync-path", action="store", type=abs_path,
+                        help="Path to store git checkout of web-platform-tests during update"),
+    parser.add_argument("--remote_url", action="store",
+                        help="URL of web-platfrom-tests repository to sync against"),
+    parser.add_argument("--branch", action="store", type=abs_path,
+                        help="Remote branch to sync against")
+    parser.add_argument("--rev", action="store", help="Revision to sync to")
+    parser.add_argument("--patch", action="store_true", dest="patch", default=None,
+                        help="Create a VCS commit containing the changes.")
+    parser.add_argument("--no-patch", action="store_false", dest="patch",
+                        help="Don't create a VCS commit containing the changes.")
+    parser.add_argument("--sync", dest="sync", action="store_true", default=False,
+                        help="Sync the tests with the latest from upstream (implies --patch)")
+    parser.add_argument("--ignore-existing", action="store_true", help="When updating test results only consider results from the logfiles provided, not existing expectations.")
+    parser.add_argument("--continue", action="store_true", help="Continue a previously started run of the update script")
+    parser.add_argument("--abort", action="store_true", help="Clear state from a previous incomplete run of the update script")
+    parser.add_argument("--exclude", action="store", nargs="*",
+                        help="List of glob-style paths to exclude when syncing tests")
+    parser.add_argument("--include", action="store", nargs="*",
+                        help="List of glob-style paths to include which would otherwise be excluded when syncing tests")
+    # Should make this required iff run=logfile
+    parser.add_argument("run_log", nargs="*", type=abs_path,
+                        help="Log file from run of tests")
+    commandline.add_logging_group(parser)
+    return parser
+
+
+def create_parser_reduce(product_choices=None):
+    parser = create_parser(product_choices)
+    parser.add_argument("target", action="store", help="Test id that is unstable")
+    return parser
+
+
+def parse_args():
+    parser = create_parser()
+    rv = vars(parser.parse_args())
+    check_args(rv)
+    return rv
+
+
+def parse_args_update():
+    parser = create_parser_update()
+    rv = vars(parser.parse_args())
+    check_args_update(rv)
+    return rv
+
+
+def parse_args_reduce():
+    parser = create_parser_reduce()
+    rv = vars(parser.parse_args())
+    check_args(rv)
+    return rv
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptlogging.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptlogging.py
new file mode 100644
index 0000000..1ab6755
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptlogging.py
@@ -0,0 +1,121 @@
+import logging
+import sys
+import threading
+from StringIO import StringIO
+from multiprocessing import Queue
+
+from mozlog import commandline, stdadapter
+
+def setup(args, defaults):
+    logger = commandline.setup_logging("web-platform-tests", args, defaults)
+    setup_stdlib_logger()
+
+    for name in args.keys():
+        if name.startswith("log_"):
+            args.pop(name)
+
+    return logger
+
+
+def setup_stdlib_logger():
+    logging.root.handlers = []
+    logging.root = stdadapter.std_logging_adapter(logging.root)
+
+
+class LogLevelRewriter(object):
+    """Filter that replaces log messages at specified levels with messages
+    at a different level.
+
+    This can be used to e.g. downgrade log messages from ERROR to WARNING
+    in some component where ERRORs are not critical.
+
+    :param inner: Handler to use for messages that pass this filter
+    :param from_levels: List of levels which should be affected
+    :param to_level: Log level to set for the affected messages
+    """
+    def __init__(self, inner, from_levels, to_level):
+        self.inner = inner
+        self.from_levels = [item.upper() for item in from_levels]
+        self.to_level = to_level.upper()
+
+    def __call__(self, data):
+        if data["action"] == "log" and data["level"].upper() in self.from_levels:
+            data = data.copy()
+            data["level"] = self.to_level
+        return self.inner(data)
+
+
+
+class LogThread(threading.Thread):
+    def __init__(self, queue, logger, level):
+        self.queue = queue
+        self.log_func = getattr(logger, level)
+        threading.Thread.__init__(self, name="Thread-Log")
+        self.daemon = True
+
+    def run(self):
+        while True:
+            try:
+                msg = self.queue.get()
+            except (EOFError, IOError):
+                break
+            if msg is None:
+                break
+            else:
+                self.log_func(msg)
+
+
+class LoggingWrapper(StringIO):
+    """Wrapper for file like objects to redirect output to logger
+    instead"""
+
+    def __init__(self, queue, prefix=None):
+        StringIO.__init__(self)
+        self.queue = queue
+        self.prefix = prefix
+
+    def write(self, data):
+        if isinstance(data, str):
+            data = data.decode("utf8")
+
+        if data.endswith("\n"):
+            data = data[:-1]
+        if data.endswith("\r"):
+            data = data[:-1]
+        if not data:
+            return
+        if self.prefix is not None:
+            data = "%s: %s" % (self.prefix, data)
+        self.queue.put(data)
+
+    def flush(self):
+        pass
+
+
+class CaptureIO(object):
+    def __init__(self, logger, do_capture):
+        self.logger = logger
+        self.do_capture = do_capture
+        self.logging_queue = None
+        self.logging_thread = None
+        self.original_stdio = None
+
+    def __enter__(self):
+        if self.do_capture:
+            self.original_stdio = (sys.stdout, sys.stderr)
+            self.logging_queue = Queue()
+            self.logging_thread = LogThread(self.logging_queue, self.logger, "info")
+            sys.stdout = LoggingWrapper(self.logging_queue, prefix="STDOUT")
+            sys.stderr = LoggingWrapper(self.logging_queue, prefix="STDERR")
+            self.logging_thread.start()
+
+    def __exit__(self, *args, **kwargs):
+        if self.do_capture:
+            sys.stdout, sys.stderr = self.original_stdio
+            if self.logging_queue is not None:
+                self.logger.info("Closing logging queue")
+                self.logging_queue.put(None)
+                if self.logging_thread is not None:
+                    self.logging_thread.join(10)
+                self.logging_queue.close()
+                self.logger.info("queue closed")
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptmanifest/__init__.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptmanifest/__init__.py
new file mode 100644
index 0000000..6b64784
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptmanifest/__init__.py
@@ -0,0 +1,4 @@
+from serializer import serialize
+from parser import parse
+from backends.static import compile as compile_static
+from backends.conditional import compile as compile_condition
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptmanifest/backends/__init__.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptmanifest/backends/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptmanifest/backends/__init__.py
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptmanifest/backends/conditional.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptmanifest/backends/conditional.py
new file mode 100644
index 0000000..f0f4ecf
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptmanifest/backends/conditional.py
@@ -0,0 +1,330 @@
+import operator
+
+from ..node import NodeVisitor, DataNode, ConditionalNode, KeyValueNode, ListNode, ValueNode
+from ..parser import parse
+
+
+class ConditionalValue(object):
+    def __init__(self, node, condition_func):
+        self.node = node
+        self.condition_func = condition_func
+        if isinstance(node, ConditionalNode):
+            assert len(node.children) == 2
+            self.condition_node = self.node.children[0]
+            self.value_node = self.node.children[1]
+        else:
+            assert isinstance(node, (ValueNode, ListNode))
+            self.condition_node = None
+            self.value_node = self.node
+
+    @property
+    def value(self):
+        if isinstance(self.value_node, ValueNode):
+            return self.value_node.data
+        else:
+            return [item.data for item in self.value_node.children]
+
+    @value.setter
+    def value(self, value):
+        self.value_node.data = value
+
+    def __call__(self, run_info):
+        return self.condition_func(run_info)
+
+    def set_value(self, value):
+        self.value = value
+
+    def remove(self):
+        if len(self.node.parent.children) == 1:
+            self.node.parent.remove()
+        self.node.remove()
+
+
+class Compiler(NodeVisitor):
+    def compile(self, tree, data_cls_getter=None, **kwargs):
+        """Compile a raw AST into a form where conditional expressions
+        are represented by ConditionalValue objects that can be evaluated
+        at runtime.
+
+        tree - The root node of the wptmanifest AST to compile
+
+        data_cls_getter - A function taking two parameters; the previous
+                          output node and the current ast node and returning
+                          the class of the output node to use for the current
+                          ast node
+        """
+        if data_cls_getter is None:
+            self.data_cls_getter = lambda x, y: ManifestItem
+        else:
+            self.data_cls_getter = data_cls_getter
+
+        self.tree = tree
+        self.output_node = self._initial_output_node(tree, **kwargs)
+        self.visit(tree)
+        assert self.output_node is not None
+        return self.output_node
+
+    def compile_condition(self, condition):
+        """Compile a ConditionalNode into a ConditionalValue.
+
+        condition: A ConditionalNode"""
+        data_node = DataNode()
+        key_value_node = KeyValueNode()
+        key_value_node.append(condition.copy())
+        data_node.append(key_value_node)
+        manifest_item = self.compile(data_node)
+        return manifest_item._data[None][0]
+
+    def _initial_output_node(self, node, **kwargs):
+        return self.data_cls_getter(None, None)(node, **kwargs)
+
+    def visit_DataNode(self, node):
+        if node != self.tree:
+            output_parent = self.output_node
+            self.output_node = self.data_cls_getter(self.output_node, node)(node)
+        else:
+            output_parent = None
+
+        assert self.output_node is not None
+
+        for child in node.children:
+            self.visit(child)
+
+        if output_parent is not None:
+            # Append to the parent *after* processing all the node data
+            output_parent.append(self.output_node)
+            self.output_node = self.output_node.parent
+
+        assert self.output_node is not None
+
+    def visit_KeyValueNode(self, node):
+        key_values = []
+        for child in node.children:
+            condition, value = self.visit(child)
+            key_values.append(ConditionalValue(child, condition))
+
+        self.output_node._add_key_value(node, key_values)
+
+    def visit_ListNode(self, node):
+        return (lambda x:True, [self.visit(child) for child in node.children])
+
+    def visit_ValueNode(self, node):
+        return (lambda x: True, node.data)
+
+    def visit_AtomNode(self, node):
+        return (lambda x: True, node.data)
+
+    def visit_ConditionalNode(self, node):
+        return self.visit(node.children[0]), self.visit(node.children[1])
+
+    def visit_StringNode(self, node):
+        indexes = [self.visit(child) for child in node.children]
+
+        def value(x):
+            rv = node.data
+            for index in indexes:
+                rv = rv[index(x)]
+            return rv
+        return value
+
+    def visit_NumberNode(self, node):
+        if "." in node.data:
+            return lambda x: float(node.data)
+        else:
+            return lambda x: int(node.data)
+
+    def visit_VariableNode(self, node):
+        indexes = [self.visit(child) for child in node.children]
+
+        def value(x):
+            data = x[node.data]
+            for index in indexes:
+                data = data[index(x)]
+            return data
+        return value
+
+    def visit_IndexNode(self, node):
+        assert len(node.children) == 1
+        return self.visit(node.children[0])
+
+    def visit_UnaryExpressionNode(self, node):
+        assert len(node.children) == 2
+        operator = self.visit(node.children[0])
+        operand = self.visit(node.children[1])
+
+        return lambda x: operator(operand(x))
+
+    def visit_BinaryExpressionNode(self, node):
+        assert len(node.children) == 3
+        operator = self.visit(node.children[0])
+        operand_0 = self.visit(node.children[1])
+        operand_1 = self.visit(node.children[2])
+
+        assert operand_0 is not None
+        assert operand_1 is not None
+
+        return lambda x: operator(operand_0(x), operand_1(x))
+
+    def visit_UnaryOperatorNode(self, node):
+        return {"not": operator.not_}[node.data]
+
+    def visit_BinaryOperatorNode(self, node):
+        return {"and": operator.and_,
+                "or": operator.or_,
+                "==": operator.eq,
+                "!=": operator.ne}[node.data]
+
+
+class ManifestItem(object):
+    def __init__(self, node=None, **kwargs):
+        self.node = node
+        self.parent = None
+        self.children = []
+        self._data = {}
+
+    def __repr__(self):
+        return "<ManifestItem %s>" % (self.node.data)
+
+    def __str__(self):
+        rv = [repr(self)]
+        for item in self.children:
+            rv.extend("  %s" % line for line in str(item).split("\n"))
+        return "\n".join(rv)
+
+    def __contains__(self, key):
+        return key in self._data
+
+    @property
+    def is_empty(self):
+        if self._data:
+            return False
+        return all(child.is_empty for child in self.children)
+
+    @property
+    def root(self):
+        node = self
+        while node.parent is not None:
+            node = node.parent
+        return node
+
+    @property
+    def name(self):
+        return self.node.data
+
+    def has_key(self, key):
+        for node in [self, self.root]:
+            if key in node._data:
+                return True
+        return False
+
+    def get(self, key, run_info=None):
+        if run_info is None:
+            run_info = {}
+
+        for node in [self, self.root]:
+            if key in node._data:
+                for cond_value in node._data[key]:
+                    try:
+                        matches = cond_value(run_info)
+                    except KeyError:
+                        matches = False
+                    if matches:
+                        return cond_value.value
+        raise KeyError
+
+    def set(self, key, value, condition=None):
+        # First try to update the existing value
+        if key in self._data:
+            cond_values = self._data[key]
+            for cond_value in cond_values:
+                if cond_value.condition_node == condition:
+                    cond_value.value = value
+                    return
+            # If there isn't a conditional match reuse the existing KeyValueNode as the
+            # parent
+            node = None
+            for child in self.node.children:
+                if child.data == key:
+                    node = child
+                    break
+            assert node is not None
+
+        else:
+            node = KeyValueNode(key)
+            self.node.append(node)
+
+        value_node = ValueNode(value)
+        if condition is not None:
+            conditional_node = ConditionalNode()
+            conditional_node.append(condition)
+            conditional_node.append(value_node)
+            node.append(conditional_node)
+            cond_value = Compiler().compile_condition(conditional_node)
+        else:
+            node.append(value_node)
+            cond_value = ConditionalValue(value_node, lambda x: True)
+
+        # Update the cache of child values. This is pretty annoying and maybe
+        # it should just work directly on the tree
+        if key not in self._data:
+            self._data[key] = []
+        if self._data[key] and self._data[key][-1].condition_node is None:
+            self._data[key].insert(len(self._data[key]) - 1, cond_value)
+        else:
+            self._data[key].append(cond_value)
+
+    def _add_key_value(self, node, values):
+        """Called during construction to set a key-value node"""
+        self._data[node.data] = values
+
+    def append(self, child):
+        self.children.append(child)
+        child.parent = self
+        if child.node.parent != self.node:
+            self.node.append(child.node)
+        return child
+
+    def remove(self):
+        if self.parent:
+            self.parent._remove_child(self)
+
+    def _remove_child(self, child):
+        self.children.remove(child)
+        child.parent = None
+
+    def iterchildren(self, name=None):
+        for item in self.children:
+            if item.name == name or name is None:
+                yield item
+
+    def _flatten(self):
+        rv = {}
+        for node in [self, self.root]:
+            for name, value in node._data.iteritems():
+                if name not in rv:
+                    rv[name] = value
+        return rv
+
+    def iteritems(self):
+        for item in self._flatten().iteritems():
+            yield item
+
+    def iterkeys(self):
+        for item in self._flatten().iterkeys():
+            yield item
+
+    def remove_value(self, key, value):
+        self._data[key].remove(value)
+        if not self._data[key]:
+            del self._data[key]
+        value.remove()
+
+
+def compile_ast(ast, data_cls_getter=None, **kwargs):
+    return Compiler().compile(ast, data_cls_getter=data_cls_getter, **kwargs)
+
+
+def compile(stream, data_cls_getter=None, **kwargs):
+    return compile_ast(parse(stream),
+                       data_cls_getter=data_cls_getter,
+                       **kwargs)
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptmanifest/backends/static.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptmanifest/backends/static.py
new file mode 100644
index 0000000..b2b9fbe
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptmanifest/backends/static.py
@@ -0,0 +1,220 @@
+import operator
+
+from ..node import NodeVisitor
+from ..parser import parse
+
+
+class Compiler(NodeVisitor):
+    """Compiler backend that evaluates conditional expressions
+    to give static output"""
+
+    def compile(self, tree, expr_data, data_cls_getter=None, **kwargs):
+        """Compile a raw AST into a form with conditional expressions
+        evaluated.
+
+        tree - The root node of the wptmanifest AST to compile
+
+        expr_data - A dictionary of key / value pairs to use when
+                    evaluating conditional expressions
+
+        data_cls_getter - A function taking two parameters; the previous
+                          output node and the current ast node and returning
+                          the class of the output node to use for the current
+                          ast node
+        """
+
+        self._kwargs = kwargs
+        self.expr_data = expr_data
+
+        if data_cls_getter is None:
+            self.data_cls_getter = lambda x, y: ManifestItem
+        else:
+            self.data_cls_getter = data_cls_getter
+
+        self.output_node = None
+        self.visit(tree)
+        return self.output_node
+
+    def visit_DataNode(self, node):
+        output_parent = self.output_node
+        if self.output_node is None:
+            assert node.parent is None
+            self.output_node = self.data_cls_getter(None, None)(None, **self._kwargs)
+        else:
+            self.output_node = self.data_cls_getter(self.output_node, node)(node.data)
+
+        for child in node.children:
+            self.visit(child)
+
+        if output_parent is not None:
+            output_parent.append(self.output_node)
+            self.output_node = self.output_node.parent
+
+    def visit_KeyValueNode(self, node):
+        key_name = node.data
+        key_value = None
+        for child in node.children:
+            value = self.visit(child)
+            if value is not None:
+                key_value = value
+                break
+        if key_value is not None:
+            self.output_node.set(key_name, key_value)
+
+    def visit_ValueNode(self, node):
+        return node.data
+
+    def visit_AtomNode(self, node):
+        return node.data
+
+    def visit_ListNode(self, node):
+        return [self.visit(child) for child in node.children]
+
+    def visit_ConditionalNode(self, node):
+        assert len(node.children) == 2
+        if self.visit(node.children[0]):
+            return self.visit(node.children[1])
+
+    def visit_StringNode(self, node):
+        value = node.data
+        for child in node.children:
+            value = self.visit(child)(value)
+        return value
+
+    def visit_NumberNode(self, node):
+        if "." in node.data:
+            return float(node.data)
+        else:
+            return int(node.data)
+
+    def visit_VariableNode(self, node):
+        value = self.expr_data[node.data]
+        for child in node.children:
+            value = self.visit(child)(value)
+        return value
+
+    def visit_IndexNode(self, node):
+        assert len(node.children) == 1
+        index = self.visit(node.children[0])
+        return lambda x: x[index]
+
+    def visit_UnaryExpressionNode(self, node):
+        assert len(node.children) == 2
+        operator = self.visit(node.children[0])
+        operand = self.visit(node.children[1])
+
+        return operator(operand)
+
+    def visit_BinaryExpressionNode(self, node):
+        assert len(node.children) == 3
+        operator = self.visit(node.children[0])
+        operand_0 = self.visit(node.children[1])
+        operand_1 = self.visit(node.children[2])
+
+        return operator(operand_0, operand_1)
+
+    def visit_UnaryOperatorNode(self, node):
+        return {"not": operator.not_}[node.data]
+
+    def visit_BinaryOperatorNode(self, node):
+        return {"and": operator.and_,
+                "or": operator.or_,
+                "==": operator.eq,
+                "!=": operator.ne}[node.data]
+
+
+class ManifestItem(object):
+    def __init__(self, name, **kwargs):
+        self.parent = None
+        self.name = name
+        self.children = []
+        self._data = {}
+
+    def __repr__(self):
+        return "<ManifestItem %s>" % (self.name)
+
+    def __str__(self):
+        rv = [repr(self)]
+        for item in self.children:
+            rv.extend("  %s" % line for line in str(item).split("\n"))
+        return "\n".join(rv)
+
+    @property
+    def is_empty(self):
+        if self._data:
+            return False
+        return all(child.is_empty for child in self.children)
+
+    @property
+    def root(self):
+        node = self
+        while node.parent is not None:
+            node = node.parent
+        return node
+
+    def has_key(self, key):
+        for node in [self, self.root]:
+            if key in node._data:
+                return True
+        return False
+
+    def get(self, key):
+        for node in [self, self.root]:
+            if key in node._data:
+                return node._data[key]
+        raise KeyError
+
+    def set(self, name, value):
+        self._data[name] = value
+
+    def remove(self):
+        if self.parent:
+            self.parent._remove_child(self)
+
+    def _remove_child(self, child):
+        self.children.remove(child)
+        child.parent = None
+
+    def iterchildren(self, name=None):
+        for item in self.children:
+            if item.name == name or name is None:
+                yield item
+
+    def _flatten(self):
+        rv = {}
+        for node in [self, self.root]:
+            for name, value in node._data.iteritems():
+                if name not in rv:
+                    rv[name] = value
+        return rv
+
+    def iteritems(self):
+        for item in self._flatten().iteritems():
+            yield item
+
+    def iterkeys(self):
+        for item in self._flatten().iterkeys():
+            yield item
+
+    def itervalues(self):
+        for item in self._flatten().itervalues():
+            yield item
+
+    def append(self, child):
+        child.parent = self
+        self.children.append(child)
+        return child
+
+
+def compile_ast(ast, expr_data, data_cls_getter=None, **kwargs):
+    return Compiler().compile(ast,
+                              expr_data,
+                              data_cls_getter=data_cls_getter,
+                              **kwargs)
+
+
+def compile(stream, expr_data, data_cls_getter=None, **kwargs):
+    return compile_ast(parse(stream),
+                       expr_data,
+                       data_cls_getter=data_cls_getter,
+                       **kwargs)
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptmanifest/node.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptmanifest/node.py
new file mode 100644
index 0000000..33e9796
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptmanifest/node.py
@@ -0,0 +1,157 @@
+class NodeVisitor(object):
+    def visit(self, node):
+        # This is ugly as hell, but we don't have multimethods and
+        # they aren't trivial to fake without access to the class
+        # object from the class body
+        func = getattr(self, "visit_%s" % (node.__class__.__name__))
+        return func(node)
+
+
+class Node(object):
+    def __init__(self, data=None):
+        self.data = data
+        self.parent = None
+        self.children = []
+
+    def append(self, other):
+        other.parent = self
+        self.children.append(other)
+
+    def remove(self):
+        self.parent.children.remove(self)
+
+    def __repr__(self):
+        return "<%s %s>" % (self.__class__.__name__, self.data)
+
+    def __str__(self):
+        rv = [repr(self)]
+        for item in self.children:
+            rv.extend("  %s" % line for line in str(item).split("\n"))
+        return "\n".join(rv)
+
+    def __eq__(self, other):
+        if not (self.__class__ == other.__class__ and
+                self.data == other.data and
+                len(self.children) == len(other.children)):
+            return False
+        for child, other_child in zip(self.children, other.children):
+            if not child == other_child:
+                return False
+        return True
+
+    def copy(self):
+        new = self.__class__(self.data)
+        for item in self.children:
+            new.append(item.copy())
+        return new
+
+
+class DataNode(Node):
+    def append(self, other):
+        # Append that retains the invariant that child data nodes
+        # come after child nodes of other types
+        other.parent = self
+        if isinstance(other, DataNode):
+            self.children.append(other)
+        else:
+            index = len(self.children)
+            while index > 0 and isinstance(self.children[index - 1], DataNode):
+                index -= 1
+            for i in xrange(index):
+                assert other.data != self.children[i].data
+            self.children.insert(index, other)
+
+
+class KeyValueNode(Node):
+    def append(self, other):
+        # Append that retains the invariant that conditional nodes
+        # come before unconditional nodes
+        other.parent = self
+        if isinstance(other, ValueNode):
+            if self.children:
+                assert not isinstance(self.children[-1], ValueNode)
+            self.children.append(other)
+        else:
+            if self.children and isinstance(self.children[-1], ValueNode):
+                self.children.insert(len(self.children) - 1, other)
+            else:
+                self.children.append(other)
+
+
+class ListNode(Node):
+    def append(self, other):
+        other.parent = self
+        self.children.append(other)
+
+
+class ValueNode(Node):
+    def append(self, other):
+        raise TypeError
+
+
+class AtomNode(ValueNode):
+    pass
+
+
+class ConditionalNode(Node):
+    pass
+
+
+class UnaryExpressionNode(Node):
+    def __init__(self, operator, operand):
+        Node.__init__(self)
+        self.append(operator)
+        self.append(operand)
+
+    def append(self, other):
+        Node.append(self, other)
+        assert len(self.children) <= 2
+
+    def copy(self):
+        new = self.__class__(self.children[0].copy(),
+                             self.children[1].copy())
+        return new
+
+
+class BinaryExpressionNode(Node):
+    def __init__(self, operator, operand_0, operand_1):
+        Node.__init__(self)
+        self.append(operator)
+        self.append(operand_0)
+        self.append(operand_1)
+
+    def append(self, other):
+        Node.append(self, other)
+        assert len(self.children) <= 3
+
+    def copy(self):
+        new = self.__class__(self.children[0].copy(),
+                             self.children[1].copy(),
+                             self.children[2].copy())
+        return new
+
+
+class UnaryOperatorNode(Node):
+    def append(self, other):
+        raise TypeError
+
+
+class BinaryOperatorNode(Node):
+    def append(self, other):
+        raise TypeError
+
+
+class IndexNode(Node):
+    pass
+
+
+class VariableNode(Node):
+    pass
+
+
+class StringNode(Node):
+    pass
+
+
+class NumberNode(ValueNode):
+    pass
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptmanifest/parser.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptmanifest/parser.py
new file mode 100644
index 0000000..d84a914
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptmanifest/parser.py
@@ -0,0 +1,746 @@
+#default_value:foo
+#include: other.manifest
+#
+#[test_name.js]
+#  expected: ERROR
+#
+#  [subtest 1]
+#    expected:
+#      os == win: FAIL #This is a comment
+#      PASS
+#
+
+# TODO: keep comments in the tree
+
+from __future__ import unicode_literals
+
+import types
+from cStringIO import StringIO
+
+from node import *
+
+
+class ParseError(Exception):
+    def __init__(self, filename, line, detail):
+        self.line = line
+        self.filename = filename
+        self.detail = detail
+        self.message = "%s: %s line %s" % (self.detail, self.filename, self.line)
+        Exception.__init__(self, self.message)
+
+eol = object
+group_start = object
+group_end = object
+digits = "0123456789"
+open_parens = "[("
+close_parens = "])"
+parens = open_parens + close_parens
+operator_chars = "=!"
+
+unary_operators = ["not"]
+binary_operators = ["==", "!=", "and", "or"]
+
+operators = ["==", "!=", "not", "and", "or"]
+
+atoms = {"True": True,
+         "False": False,
+         "Reset": object()}
+
+def decode(s):
+    assert isinstance(s, unicode)
+    return s
+
+
+def precedence(operator_node):
+    return len(operators) - operators.index(operator_node.data)
+
+
+class TokenTypes(object):
+    def __init__(self):
+        for type in ["group_start", "group_end", "paren", "list_start", "list_end", "separator", "ident", "string", "number", "atom", "eof"]:
+            setattr(self, type, type)
+
+token_types = TokenTypes()
+
+
+class Tokenizer(object):
+    def __init__(self):
+        self.reset()
+
+    def reset(self):
+        self.indent_levels = [0]
+        self.state = self.line_start_state
+        self.next_state = self.data_line_state
+        self.line_number = 0
+
+    def tokenize(self, stream):
+        self.reset()
+        assert not isinstance(stream, unicode)
+        if isinstance(stream, str):
+            stream = StringIO(stream)
+        if not hasattr(stream, "name"):
+            self.filename = ""
+        else:
+            self.filename = stream.name
+
+        self.next_line_state = self.line_start_state
+        for i, line in enumerate(stream):
+            assert isinstance(line, str)
+            self.state = self.next_line_state
+            assert self.state is not None
+            states = []
+            self.next_line_state = None
+            self.line_number = i + 1
+            self.index = 0
+            self.line = line.decode('utf-8').rstrip()
+            assert isinstance(self.line, unicode)
+            while self.state != self.eol_state:
+                states.append(self.state)
+                tokens = self.state()
+                if tokens:
+                    for token in tokens:
+                        yield token
+            self.state()
+        while True:
+            yield (token_types.eof, None)
+
+    def char(self):
+        if self.index == len(self.line):
+            return eol
+        return self.line[self.index]
+
+    def consume(self):
+        if self.index < len(self.line):
+            self.index += 1
+
+    def peek(self, length):
+        return self.line[self.index:self.index + length]
+
+    def skip_whitespace(self):
+        while self.char() == " ":
+            self.consume()
+
+    def eol_state(self):
+        if self.next_line_state is None:
+            self.next_line_state = self.line_start_state
+
+    def line_start_state(self):
+        self.skip_whitespace()
+        if self.char() == eol:
+            self.state = self.eol_state
+            return
+        if self.index > self.indent_levels[-1]:
+            self.indent_levels.append(self.index)
+            yield (token_types.group_start, None)
+        else:
+            while self.index < self.indent_levels[-1]:
+                self.indent_levels.pop()
+                yield (token_types.group_end, None)
+                # This is terrible; if we were parsing an expression
+                # then the next_state will be expr_or_value but when we deindent
+                # it must always be a heading or key next so we go back to data_line_state
+                self.next_state = self.data_line_state
+            if self.index != self.indent_levels[-1]:
+                raise ParseError(self.filename, self.line_number, "Unexpected indent")
+
+        self.state = self.next_state
+
+    def data_line_state(self):
+        if self.char() == "[":
+            yield (token_types.paren, self.char())
+            self.consume()
+            self.state = self.heading_state
+        else:
+            self.state = self.key_state
+
+    def heading_state(self):
+        rv = ""
+        while True:
+            c = self.char()
+            if c == "\\":
+                rv += self.consume_escape()
+            elif c == "]":
+                break
+            elif c == eol:
+                raise ParseError(self.filename, self.line_number, "EOL in heading")
+            else:
+                rv += c
+                self.consume()
+
+        yield (token_types.string, decode(rv))
+        yield (token_types.paren, "]")
+        self.consume()
+        self.state = self.line_end_state
+        self.next_state = self.data_line_state
+
+    def key_state(self):
+        rv = ""
+        while True:
+            c = self.char()
+            if c == " ":
+                self.skip_whitespace()
+                if self.char() != ":":
+                    raise ParseError(self.filename, self.line_number, "Space in key name")
+                break
+            elif c == ":":
+                break
+            elif c == eol:
+                raise ParseError(self.filename, self.line_number, "EOL in key name (missing ':'?)")
+            elif c == "\\":
+                rv += self.consume_escape()
+            else:
+                rv += c
+                self.consume()
+        yield (token_types.string, decode(rv))
+        yield (token_types.separator, ":")
+        self.consume()
+        self.state = self.after_key_state
+
+    def after_key_state(self):
+        self.skip_whitespace()
+        c = self.char()
+        if c == "#":
+            self.next_state = self.expr_or_value_state
+            self.state = self.comment_state
+        elif c == eol:
+            self.next_state = self.expr_or_value_state
+            self.state = self.eol_state
+        elif c == "[":
+            self.state = self.list_start_state
+        else:
+            self.state = self.value_state
+
+    def list_start_state(self):
+        yield (token_types.list_start, "[")
+        self.consume()
+        self.state = self.list_value_start_state
+
+    def list_value_start_state(self):
+        self.skip_whitespace()
+        if self.char() == "]":
+            self.state = self.list_end_state
+        elif self.char() in ("'", '"'):
+            quote_char = self.char()
+            self.consume()
+            yield (token_types.string, self.consume_string(quote_char))
+            self.skip_whitespace()
+            if self.char() == "]":
+                self.state = self.list_end_state
+            elif self.char() != ",":
+                raise ParseError(self.filename, self.line_number, "Junk after quoted string")
+            self.consume()
+        elif self.char() == "#":
+            self.state = self.comment_state
+            self.next_line_state = self.list_value_start_state
+        elif self.char() == eol:
+            self.next_line_state = self.list_value_start_state
+            self.state = self.eol_state
+        elif self.char() == ",":
+            raise ParseError(self.filename, self.line_number, "List item started with separator")
+        elif self.char() == "@":
+            self.state = self.list_value_atom_state
+        else:
+            self.state = self.list_value_state
+
+    def list_value_state(self):
+        rv = ""
+        spaces = 0
+        while True:
+            c = self.char()
+            if c == "\\":
+                escape = self.consume_escape()
+                rv += escape
+            elif c == eol:
+                raise ParseError(self.filename, self.line_number, "EOL in list value")
+            elif c == "#":
+                raise ParseError(self.filename, self.line_number, "EOL in list value (comment)")
+            elif c == ",":
+                self.state = self.list_value_start_state
+                self.consume()
+                break
+            elif c == " ":
+                spaces += 1
+                self.consume()
+            elif c == "]":
+                self.state = self.list_end_state
+                self.consume()
+                break
+            else:
+                rv += " " * spaces
+                spaces = 0
+                rv += c
+                self.consume()
+
+        if rv:
+            yield (token_types.string, decode(rv))
+
+    def list_value_atom_state(self):
+        self.consume()
+        for _, value in self.list_value_state():
+            yield token_types.atom, value
+
+    def list_end_state(self):
+        self.consume()
+        yield (token_types.list_end, "]")
+        self.state = self.line_end_state
+
+    def value_state(self):
+        self.skip_whitespace()
+        if self.char() in ("'", '"'):
+            quote_char = self.char()
+            self.consume()
+            yield (token_types.string, self.consume_string(quote_char))
+            if self.char() == "#":
+                self.state = self.comment_state
+            else:
+                self.state = self.line_end_state
+        elif self.char() == "@":
+            self.consume()
+            for _, value in self.value_inner_state():
+                yield token_types.atom, value
+        else:
+            self.state = self.value_inner_state
+
+    def value_inner_state(self):
+        rv = ""
+        spaces = 0
+        while True:
+            c = self.char()
+            if c == "\\":
+                rv += self.consume_escape()
+            elif c == "#":
+                self.state = self.comment_state
+                break
+            elif c == " ":
+                # prevent whitespace before comments from being included in the value
+                spaces += 1
+                self.consume()
+            elif c == eol:
+                self.state = self.line_end_state
+                break
+            else:
+                rv += " " * spaces
+                spaces = 0
+                rv += c
+                self.consume()
+        yield (token_types.string, decode(rv))
+
+    def comment_state(self):
+        while self.char() is not eol:
+            self.consume()
+        self.state = self.eol_state
+
+    def line_end_state(self):
+        self.skip_whitespace()
+        c = self.char()
+        if c == "#":
+            self.state = self.comment_state
+        elif c == eol:
+            self.state = self.eol_state
+        else:
+            raise ParseError(self.filename, self.line_number, "Junk before EOL %s" % c)
+
+    def consume_string(self, quote_char):
+        rv = ""
+        while True:
+            c = self.char()
+            if c == "\\":
+                rv += self.consume_escape()
+            elif c == quote_char:
+                self.consume()
+                break
+            elif c == eol:
+                raise ParseError(self.filename, self.line_number, "EOL in quoted string")
+            else:
+                rv += c
+                self.consume()
+
+        return decode(rv)
+
+    def expr_or_value_state(self):
+        if self.peek(3) == "if ":
+            self.state = self.expr_state
+        else:
+            self.state = self.value_state
+
+    def expr_state(self):
+        self.skip_whitespace()
+        c = self.char()
+        if c == eol:
+            raise ParseError(self.filename, self.line_number, "EOL in expression")
+        elif c in "'\"":
+            self.consume()
+            yield (token_types.string, self.consume_string(c))
+        elif c == "#":
+            raise ParseError(self.filename, self.line_number, "Comment before end of expression")
+        elif c == ":":
+            yield (token_types.separator, c)
+            self.consume()
+            self.state = self.value_state
+        elif c in parens:
+            self.consume()
+            yield (token_types.paren, c)
+        elif c in ("!", "="):
+            self.state = self.operator_state
+        elif c in digits:
+            self.state = self.digit_state
+        else:
+            self.state = self.ident_state
+
+    def operator_state(self):
+        # Only symbolic operators
+        index_0 = self.index
+        while True:
+            c = self.char()
+            if c == eol:
+                break
+            elif c in operator_chars:
+                self.consume()
+            else:
+                self.state = self.expr_state
+                break
+        yield (token_types.ident, self.line[index_0:self.index])
+
+    def digit_state(self):
+        index_0 = self.index
+        seen_dot = False
+        while True:
+            c = self.char()
+            if c == eol:
+                break
+            elif c in digits:
+                self.consume()
+            elif c == ".":
+                if seen_dot:
+                    raise ParseError(self.filename, self.line_number, "Invalid number")
+                self.consume()
+                seen_dot = True
+            elif c in parens:
+                break
+            elif c in operator_chars:
+                break
+            elif c == " ":
+                break
+            elif c == ":":
+                break
+            else:
+                raise ParseError(self.filename, self.line_number, "Invalid character in number")
+
+        self.state = self.expr_state
+        yield (token_types.number, self.line[index_0:self.index])
+
+    def ident_state(self):
+        index_0 = self.index
+        while True:
+            c = self.char()
+            if c == eol:
+                break
+            elif c == ".":
+                break
+            elif c in parens:
+                break
+            elif c in operator_chars:
+                break
+            elif c == " ":
+                break
+            elif c == ":":
+                break
+            else:
+                self.consume()
+        self.state = self.expr_state
+        yield (token_types.ident, self.line[index_0:self.index])
+
+    def consume_escape(self):
+        assert self.char() == "\\"
+        self.consume()
+        c = self.char()
+        self.consume()
+        if c == "x":
+            return self.decode_escape(2)
+        elif c == "u":
+            return self.decode_escape(4)
+        elif c == "U":
+            return self.decode_escape(6)
+        elif c in ["a", "b", "f", "n", "r", "t", "v"]:
+            return eval("'\%s'" % c)
+        elif c is eol:
+            raise ParseError(self.filename, self.line_number, "EOL in escape")
+        else:
+            return c
+
+    def decode_escape(self, length):
+        value = 0
+        for i in xrange(length):
+            c = self.char()
+            value *= 16
+            value += self.escape_value(c)
+            self.consume()
+
+        return unichr(value)
+
+    def escape_value(self, c):
+        if '0' <= c <= '9':
+            return ord(c) - ord('0')
+        elif 'a' <= c <= 'f':
+            return ord(c) - ord('a') + 10
+        elif 'A' <= c <= 'F':
+            return ord(c) - ord('A') + 10
+        else:
+            raise ParseError(self.filename, self.line_number, "Invalid character escape")
+
+
+class Parser(object):
+    def __init__(self):
+        self.reset()
+
+    def reset(self):
+        self.token = None
+        self.unary_operators = "!"
+        self.binary_operators = frozenset(["&&", "||", "=="])
+        self.tokenizer = Tokenizer()
+        self.token_generator = None
+        self.tree = Treebuilder(DataNode(None))
+        self.expr_builder = None
+        self.expr_builders = []
+
+    def parse(self, input):
+        self.reset()
+        self.token_generator = self.tokenizer.tokenize(input)
+        self.consume()
+        self.manifest()
+        return self.tree.node
+
+    def consume(self):
+        self.token = self.token_generator.next()
+
+    def expect(self, type, value=None):
+        if self.token[0] != type:
+            raise ParseError
+        if value is not None:
+            if self.token[1] != value:
+                raise ParseError
+
+        self.consume()
+
+    def manifest(self):
+        self.data_block()
+        self.expect(token_types.eof)
+
+    def data_block(self):
+        while self.token[0] == token_types.string:
+            self.tree.append(KeyValueNode(self.token[1]))
+            self.consume()
+            self.expect(token_types.separator)
+            self.value_block()
+            self.tree.pop()
+
+        while self.token == (token_types.paren, "["):
+            self.consume()
+            if self.token[0] != token_types.string:
+                raise ParseError
+            self.tree.append(DataNode(self.token[1]))
+            self.consume()
+            self.expect(token_types.paren, "]")
+            if self.token[0] == token_types.group_start:
+                self.consume()
+                self.data_block()
+                self.eof_or_end_group()
+            self.tree.pop()
+
+    def eof_or_end_group(self):
+        if self.token[0] != token_types.eof:
+            self.expect(token_types.group_end)
+
+    def value_block(self):
+        if self.token[0] == token_types.list_start:
+            self.consume()
+            self.list_value()
+        elif self.token[0] == token_types.string:
+            self.value()
+        elif self.token[0] == token_types.group_start:
+            self.consume()
+            self.expression_values()
+            if self.token[0] == token_types.string:
+                self.value()
+            self.eof_or_end_group()
+        elif self.token[0] == token_types.atom:
+            self.atom()
+        else:
+            raise ParseError
+
+    def list_value(self):
+        self.tree.append(ListNode())
+        while self.token[0] in (token_types.atom, token_types.string):
+            if self.token[0] == token_types.atom:
+                self.atom()
+            else:
+                self.value()
+        self.expect(token_types.list_end)
+        self.tree.pop()
+
+    def expression_values(self):
+        while self.token == (token_types.ident, "if"):
+            self.consume()
+            self.tree.append(ConditionalNode())
+            self.expr_start()
+            self.expect(token_types.separator)
+            if self.token[0] == token_types.string:
+                self.value()
+            else:
+                raise ParseError
+            self.tree.pop()
+
+    def value(self):
+        self.tree.append(ValueNode(self.token[1]))
+        self.consume()
+        self.tree.pop()
+
+    def atom(self):
+        if self.token[1] not in atoms:
+            raise ParseError(self.tokenizer.filename, self.tokenizer.line_number, "Unrecognised symbol @%s" % self.token[1])
+        self.tree.append(AtomNode(atoms[self.token[1]]))
+        self.consume()
+        self.tree.pop()
+
+    def expr_start(self):
+        self.expr_builder = ExpressionBuilder(self.tokenizer)
+        self.expr_builders.append(self.expr_builder)
+        self.expr()
+        expression = self.expr_builder.finish()
+        self.expr_builders.pop()
+        self.expr_builder = self.expr_builders[-1] if self.expr_builders else None
+        if self.expr_builder:
+            self.expr_builder.operands[-1].children[-1].append(expression)
+        else:
+            self.tree.append(expression)
+            self.tree.pop()
+
+    def expr(self):
+        self.expr_operand()
+        while (self.token[0] == token_types.ident and self.token[1] in binary_operators):
+            self.expr_bin_op()
+            self.expr_operand()
+
+    def expr_operand(self):
+        if self.token == (token_types.paren, "("):
+            self.consume()
+            self.expr_builder.left_paren()
+            self.expr()
+            self.expect(token_types.paren, ")")
+            self.expr_builder.right_paren()
+        elif self.token[0] == token_types.ident and self.token[1] in unary_operators:
+            self.expr_unary_op()
+            self.expr_operand()
+        elif self.token[0] in [token_types.string, token_types.ident]:
+            self.expr_value()
+        elif self.token[0] == token_types.number:
+            self.expr_number()
+        else:
+            raise ParseError(self.tokenizer.filename, self.tokenizer.line_number, "Unrecognised operand")
+
+    def expr_unary_op(self):
+        if self.token[1] in unary_operators:
+            self.expr_builder.push_operator(UnaryOperatorNode(self.token[1]))
+            self.consume()
+        else:
+            raise ParseError(self.tokenizer.filename, self.tokenizer.line_number, "Expected unary operator")
+
+    def expr_bin_op(self):
+        if self.token[1] in binary_operators:
+            self.expr_builder.push_operator(BinaryOperatorNode(self.token[1]))
+            self.consume()
+        else:
+            raise ParseError(self.tokenizer.filename, self.tokenizer.line_number, "Expected binary operator")
+
+    def expr_value(self):
+        node_type = {token_types.string: StringNode,
+                     token_types.ident: VariableNode}[self.token[0]]
+        self.expr_builder.push_operand(node_type(self.token[1]))
+        self.consume()
+        if self.token == (token_types.paren, "["):
+            self.consume()
+            self.expr_builder.operands[-1].append(IndexNode())
+            self.expr_start()
+            self.expect(token_types.paren, "]")
+
+    def expr_number(self):
+        self.expr_builder.push_operand(NumberNode(self.token[1]))
+        self.consume()
+
+
+class Treebuilder(object):
+    def __init__(self, root):
+        self.root = root
+        self.node = root
+
+    def append(self, node):
+        self.node.append(node)
+        self.node = node
+        return node
+
+    def pop(self):
+        node = self.node
+        self.node = self.node.parent
+        return node
+
+
+class ExpressionBuilder(object):
+    def __init__(self, tokenizer):
+        self.operands = []
+        self.operators = [None]
+        self.tokenizer = tokenizer
+
+    def finish(self):
+        while self.operators[-1] is not None:
+            self.pop_operator()
+        rv = self.pop_operand()
+        assert self.is_empty()
+        return rv
+
+    def left_paren(self):
+        self.operators.append(None)
+
+    def right_paren(self):
+        while self.operators[-1] is not None:
+            self.pop_operator()
+            if not self.operators:
+                raise ParseError(self.tokenizer.filename, self.tokenizer.line,
+                                 "Unbalanced parens")
+
+        assert self.operators.pop() is None
+
+    def push_operator(self, operator):
+        assert operator is not None
+        while self.precedence(self.operators[-1]) > self.precedence(operator):
+            self.pop_operator()
+
+        self.operators.append(operator)
+
+    def pop_operator(self):
+        operator = self.operators.pop()
+        if isinstance(operator, BinaryOperatorNode):
+            operand_1 = self.operands.pop()
+            operand_0 = self.operands.pop()
+            self.operands.append(BinaryExpressionNode(operator, operand_0, operand_1))
+        else:
+            operand_0 = self.operands.pop()
+            self.operands.append(UnaryExpressionNode(operator, operand_0))
+
+    def push_operand(self, node):
+        self.operands.append(node)
+
+    def pop_operand(self):
+        return self.operands.pop()
+
+    def is_empty(self):
+        return len(self.operands) == 0 and all(item is None for item in self.operators)
+
+    def precedence(self, operator):
+        if operator is None:
+            return 0
+        return precedence(operator)
+
+
+def parse(stream):
+    p = Parser()
+    return p.parse(stream)
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptmanifest/serializer.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptmanifest/serializer.py
new file mode 100644
index 0000000..52203ab
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptmanifest/serializer.py
@@ -0,0 +1,136 @@
+from node import NodeVisitor, ValueNode, ListNode, BinaryExpressionNode
+from parser import atoms, precedence
+
+atom_names = {v:"@%s" % k for (k,v) in atoms.iteritems()}
+
+named_escapes = set(["\a", "\b", "\f", "\n", "\r", "\t", "\v"])
+
+def escape(string, extras=""):
+    rv = ""
+    for c in string:
+        if c in named_escapes:
+            rv += c.encode("unicode_escape")
+        elif c == "\\":
+            rv += "\\\\"
+        elif c < '\x20':
+            rv += "\\x%02x" % ord(c)
+        elif c in extras:
+            rv += "\\" + c
+        else:
+            rv += c
+    return rv.encode("utf8")
+
+
+class ManifestSerializer(NodeVisitor):
+    def __init__(self, skip_empty_data=False):
+        self.skip_empty_data = skip_empty_data
+
+    def serialize(self, root):
+        self.indent = 2
+        rv = "\n".join(self.visit(root))
+        if rv[-1] != "\n":
+            rv = rv + "\n"
+        return rv
+
+    def visit_DataNode(self, node):
+        rv = []
+        if not self.skip_empty_data or node.children:
+            if node.data:
+                rv.append("[%s]" % escape(node.data, extras="]"))
+                indent = self.indent * " "
+            else:
+                indent = ""
+
+            for child in node.children:
+                rv.extend("%s%s" % (indent if item else "", item) for item in self.visit(child))
+
+            if node.parent:
+                rv.append("")
+
+        return rv
+
+    def visit_KeyValueNode(self, node):
+        rv = [escape(node.data, ":") + ":"]
+        indent = " " * self.indent
+
+        if len(node.children) == 1 and isinstance(node.children[0], (ValueNode, ListNode)):
+            rv[0] += " %s" % self.visit(node.children[0])[0]
+        else:
+            for child in node.children:
+                rv.append(indent + self.visit(child)[0])
+
+        return rv
+
+    def visit_ListNode(self, node):
+        rv = ["["]
+        rv.extend(", ".join(self.visit(child)[0] for child in node.children))
+        rv.append("]")
+        return ["".join(rv)]
+
+    def visit_ValueNode(self, node):
+        if "#" in node.data or (isinstance(node.parent, ListNode) and
+                                ("," in node.data or "]" in node.data)):
+            if "\"" in node.data:
+                quote = "'"
+            else:
+                quote = "\""
+        else:
+            quote = ""
+        return [quote + escape(node.data, extras=quote) + quote]
+
+    def visit_AtomNode(self, node):
+        return [atom_names[node.data]]
+
+    def visit_ConditionalNode(self, node):
+        return ["if %s: %s" % tuple(self.visit(item)[0] for item in node.children)]
+
+    def visit_StringNode(self, node):
+        rv = ["\"%s\"" % escape(node.data, extras="\"")]
+        for child in node.children:
+            rv[0] += self.visit(child)[0]
+        return rv
+
+    def visit_NumberNode(self, node):
+        return [str(node.data)]
+
+    def visit_VariableNode(self, node):
+        rv = escape(node.data)
+        for child in node.children:
+            rv += self.visit(child)
+        return [rv]
+
+    def visit_IndexNode(self, node):
+        assert len(node.children) == 1
+        return ["[%s]" % self.visit(node.children[0])[0]]
+
+    def visit_UnaryExpressionNode(self, node):
+        children = []
+        for child in node.children:
+            child_str = self.visit(child)[0]
+            if isinstance(child, BinaryExpressionNode):
+                child_str = "(%s)" % child_str
+            children.append(child_str)
+        return [" ".join(children)]
+
+    def visit_BinaryExpressionNode(self, node):
+        assert len(node.children) == 3
+        children = []
+        for child_index in [1, 0, 2]:
+            child = node.children[child_index]
+            child_str = self.visit(child)[0]
+            if (isinstance(child, BinaryExpressionNode) and
+                precedence(node.children[0]) < precedence(child.children[0])):
+                child_str = "(%s)" % child_str
+            children.append(child_str)
+        return [" ".join(children)]
+
+    def visit_UnaryOperatorNode(self, node):
+        return [str(node.data)]
+
+    def visit_BinaryOperatorNode(self, node):
+        return [str(node.data)]
+
+
+def serialize(tree, *args, **kwargs):
+    s = ManifestSerializer(*args, **kwargs)
+    return s.serialize(tree)
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptmanifest/tests/__init__.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptmanifest/tests/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptmanifest/tests/__init__.py
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptmanifest/tests/test_conditional.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptmanifest/tests/test_conditional.py
new file mode 100644
index 0000000..d9ffdf2
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptmanifest/tests/test_conditional.py
@@ -0,0 +1,143 @@
+import unittest
+
+from cStringIO import StringIO
+
+from ..backends import conditional
+from ..node import BinaryExpressionNode, BinaryOperatorNode, VariableNode, NumberNode
+
+
+class TestConditional(unittest.TestCase):
+    def compile(self, input_text):
+        return conditional.compile(input_text)
+
+    def test_get_0(self):
+        data = """
+key: value
+
+[Heading 1]
+  other_key:
+    if a == 1: value_1
+    if a == 2: value_2
+    value_3
+"""
+
+        manifest = self.compile(data)
+
+        self.assertEquals(manifest.get("key"), "value")
+        children = list(item for item in manifest.iterchildren())
+        self.assertEquals(len(children), 1)
+        section = children[0]
+        self.assertEquals(section.name, "Heading 1")
+
+        self.assertEquals(section.get("other_key", {"a": 1}), "value_1")
+        self.assertEquals(section.get("other_key", {"a": 2}), "value_2")
+        self.assertEquals(section.get("other_key", {"a": 7}), "value_3")
+        self.assertEquals(section.get("key"), "value")
+
+    def test_get_1(self):
+        data = """
+key: value
+
+[Heading 1]
+  other_key:
+    if a == "1": value_1
+    if a == 2: value_2
+    value_3
+"""
+
+        manifest = self.compile(data)
+
+        children = list(item for item in manifest.iterchildren())
+        section = children[0]
+
+        self.assertEquals(section.get("other_key", {"a": "1"}), "value_1")
+        self.assertEquals(section.get("other_key", {"a": 1}), "value_3")
+
+    def test_get_2(self):
+        data = """
+key:
+  if a[1] == "b": value_1
+  if a[1] == 2: value_2
+  value_3
+"""
+
+        manifest = self.compile(data)
+
+        self.assertEquals(manifest.get("key", {"a": "ab"}), "value_1")
+        self.assertEquals(manifest.get("key", {"a": [1, 2]}), "value_2")
+
+    def test_get_3(self):
+        data = """
+key:
+  if a[1] == "ab"[1]: value_1
+  if a[1] == 2: value_2
+  value_3
+"""
+
+        manifest = self.compile(data)
+
+        self.assertEquals(manifest.get("key", {"a": "ab"}), "value_1")
+        self.assertEquals(manifest.get("key", {"a": [1, 2]}), "value_2")
+
+    def test_set_0(self):
+        data = """
+key:
+  if a == "a": value_1
+  if a == "b": value_2
+  value_3
+"""
+        manifest = self.compile(data)
+
+        manifest.set("new_key", "value_new")
+
+        self.assertEquals(manifest.get("new_key"), "value_new")
+
+    def test_set_1(self):
+        data = """
+key:
+  if a == "a": value_1
+  if a == "b": value_2
+  value_3
+"""
+
+        manifest = self.compile(data)
+
+        manifest.set("key", "value_new")
+
+        self.assertEquals(manifest.get("key"), "value_new")
+        self.assertEquals(manifest.get("key", {"a": "a"}), "value_1")
+
+    def test_set_2(self):
+        data = """
+key:
+  if a == "a": value_1
+  if a == "b": value_2
+  value_3
+"""
+
+        manifest = self.compile(data)
+
+        expr = BinaryExpressionNode(BinaryOperatorNode("=="),
+                                    VariableNode("a"),
+                                    NumberNode("1"))
+
+        manifest.set("key", "value_new", expr)
+
+        self.assertEquals(manifest.get("key", {"a": 1}), "value_new")
+        self.assertEquals(manifest.get("key", {"a": "a"}), "value_1")
+
+    def test_api_0(self):
+        data = """
+key:
+  if a == 1.5: value_1
+  value_2
+key_1: other_value
+"""
+        manifest = self.compile(data)
+
+        self.assertFalse(manifest.is_empty)
+        self.assertEquals(manifest.root, manifest)
+        self.assertTrue(manifest.has_key("key_1"))
+        self.assertFalse(manifest.has_key("key_2"))
+
+        self.assertEquals(set(manifest.iterkeys()), set(["key", "key_1"]))
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptmanifest/tests/test_parser.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptmanifest/tests/test_parser.py
new file mode 100644
index 0000000..765c984
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptmanifest/tests/test_parser.py
@@ -0,0 +1,75 @@
+import unittest
+
+from cStringIO import StringIO
+
+from .. import parser
+
+# There aren't many tests here because it turns out to be way more convenient to
+# use test_serializer for the majority of cases
+
+
+class TestExpression(unittest.TestCase):
+    def setUp(self):
+        self.parser = parser.Parser()
+
+    def parse(self, input_str):
+        return self.parser.parse(StringIO(input_str))
+
+    def compare(self, input_text, expected):
+        actual = self.parse(input_text)
+        self.match(expected, actual)
+
+    def match(self, expected_node, actual_node):
+        self.assertEquals(expected_node[0], actual_node.__class__.__name__)
+        self.assertEquals(expected_node[1], actual_node.data)
+        self.assertEquals(len(expected_node[2]), len(actual_node.children))
+        for expected_child, actual_child in zip(expected_node[2], actual_node.children):
+            self.match(expected_child, actual_child)
+
+    def test_expr_0(self):
+        self.compare(
+            """
+key:
+  if x == 1 : value""",
+            ["DataNode", None,
+             [["KeyValueNode", "key",
+               [["ConditionalNode", None,
+                 [["BinaryExpressionNode", None,
+                   [["BinaryOperatorNode", "==", []],
+                    ["VariableNode", "x", []],
+                       ["NumberNode", "1", []]
+                    ]],
+                     ["ValueNode", "value", []],
+                  ]]]]]]
+        )
+
+    def test_expr_1(self):
+        self.compare(
+            """
+key:
+  if not x and y : value""",
+            ["DataNode", None,
+             [["KeyValueNode", "key",
+               [["ConditionalNode", None,
+                 [["BinaryExpressionNode", None,
+                   [["BinaryOperatorNode", "and", []],
+                    ["UnaryExpressionNode", None,
+                       [["UnaryOperatorNode", "not", []],
+                        ["VariableNode", "x", []]
+                        ]],
+                       ["VariableNode", "y", []]
+                    ]],
+                     ["ValueNode", "value", []],
+                  ]]]]]]
+        )
+
+    def test_atom_0(self):
+        with self.assertRaises(parser.ParseError):
+            self.parse("key: @Unknown")
+
+    def test_atom_1(self):
+        with self.assertRaises(parser.ParseError):
+            self.parse("key: @true")
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptmanifest/tests/test_serializer.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptmanifest/tests/test_serializer.py
new file mode 100644
index 0000000..6db2cbb
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptmanifest/tests/test_serializer.py
@@ -0,0 +1,227 @@
+import sys
+import unittest
+
+from cStringIO import StringIO
+
+import pytest
+
+from .. import parser, serializer
+
+
+class TokenizerTest(unittest.TestCase):
+    def setUp(self):
+        self.serializer = serializer.ManifestSerializer()
+        self.parser = parser.Parser()
+
+    def serialize(self, input_str):
+        return self.serializer.serialize(self.parser.parse(input_str))
+
+    def compare(self, input_str, expected=None):
+        if expected is None:
+            expected = input_str
+        expected = expected.encode("utf8")
+        actual = self.serialize(input_str)
+        self.assertEquals(actual, expected)
+
+    def test_0(self):
+        self.compare("""key: value
+[Heading 1]
+  other_key: other_value
+""")
+
+    def test_1(self):
+        self.compare("""key: value
+[Heading 1]
+  other_key:
+    if a or b: other_value
+""")
+
+    def test_2(self):
+        self.compare("""key: value
+[Heading 1]
+  other_key:
+    if a or b: other_value
+    fallback_value
+""")
+
+    def test_3(self):
+        self.compare("""key: value
+[Heading 1]
+  other_key:
+    if a == 1: other_value
+    fallback_value
+""")
+
+    def test_4(self):
+        self.compare("""key: value
+[Heading 1]
+  other_key:
+    if a == "1": other_value
+    fallback_value
+""")
+
+    def test_5(self):
+        self.compare("""key: value
+[Heading 1]
+  other_key:
+    if a == "abc"[1]: other_value
+    fallback_value
+""")
+
+    def test_6(self):
+        self.compare("""key: value
+[Heading 1]
+  other_key:
+    if a == "abc"[c]: other_value
+    fallback_value
+""")
+
+    def test_7(self):
+        self.compare("""key: value
+[Heading 1]
+  other_key:
+    if (a or b) and c: other_value
+    fallback_value
+""",
+"""key: value
+[Heading 1]
+  other_key:
+    if a or b and c: other_value
+    fallback_value
+""")
+
+    def test_8(self):
+        self.compare("""key: value
+[Heading 1]
+  other_key:
+    if a or (b and c): other_value
+    fallback_value
+""")
+
+    def test_9(self):
+        self.compare("""key: value
+[Heading 1]
+  other_key:
+    if not (a and b): other_value
+    fallback_value
+""")
+
+    def test_10(self):
+        self.compare("""key: value
+[Heading 1]
+  some_key: some_value
+
+[Heading 2]
+  other_key: other_value
+""")
+
+    def test_11(self):
+        self.compare("""key:
+  if not a and b and c and d: true
+""")
+
+    def test_12(self):
+        self.compare("""[Heading 1]
+  key: [a:1, b:2]
+""")
+
+    def test_13(self):
+        self.compare("""key: [a:1, "b:#"]
+""")
+
+    def test_14(self):
+        self.compare("""key: [","]
+""")
+
+    def test_15(self):
+        self.compare("""key: ,
+""")
+
+    def test_16(self):
+        self.compare("""key: ["]", b]
+""")
+
+    def test_17(self):
+        self.compare("""key: ]
+""")
+
+    def test_18(self):
+        self.compare("""key: \]
+        """, """key: ]
+""")
+
+    def test_escape_0(self):
+        self.compare(r"""k\t\:y: \a\b\f\n\r\t\v""",
+                     r"""k\t\:y: \x07\x08\x0c\n\r\t\x0b
+""")
+
+    def test_escape_1(self):
+        self.compare(r"""k\x00: \x12A\x45""",
+                     r"""k\x00: \x12AE
+""")
+
+    def test_escape_2(self):
+        self.compare(r"""k\u0045y: \u1234A\uABc6""",
+                     u"""kEy: \u1234A\uabc6
+""")
+
+    def test_escape_3(self):
+        self.compare(r"""k\u0045y: \u1234A\uABc6""",
+                     u"""kEy: \u1234A\uabc6
+""")
+
+    def test_escape_4(self):
+        self.compare(r"""key: '\u1234A\uABc6'""",
+                     u"""key: \u1234A\uabc6
+""")
+
+    def test_escape_5(self):
+        self.compare(r"""key: [\u1234A\uABc6]""",
+                     u"""key: [\u1234A\uabc6]
+""")
+
+    def test_escape_6(self):
+        self.compare(r"""key: [\u1234A\uABc6\,]""",
+                     u"""key: ["\u1234A\uabc6,"]
+""")
+
+    def test_escape_7(self):
+        self.compare(r"""key: [\,\]\#]""",
+                     r"""key: [",]#"]
+""")
+
+    def test_escape_8(self):
+        self.compare(r"""key: \#""",
+                     r"""key: "#"
+""")
+
+    @pytest.mark.xfail(sys.maxunicode == 0xFFFF, reason="narrow unicode")
+    def test_escape_9(self):
+        self.compare(r"""key: \U10FFFFabc""",
+                     u"""key: \U0010FFFFabc
+""")
+
+    def test_escape_10(self):
+        self.compare(r"""key: \u10FFab""",
+                     u"""key: \u10FFab
+""")
+
+    def test_escape_11(self):
+        self.compare(r"""key: \\ab
+""")
+
+    def test_atom_1(self):
+            self.compare(r"""key: @True
+""")
+
+    def test_atom_2(self):
+            self.compare(r"""key: @False
+""")
+
+    def test_atom_3(self):
+            self.compare(r"""key: @Reset
+""")
+
+    def test_atom_4(self):
+        self.compare(r"""key: [a, @Reset, b]
+""")
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptmanifest/tests/test_static.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptmanifest/tests/test_static.py
new file mode 100644
index 0000000..ed28578
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptmanifest/tests/test_static.py
@@ -0,0 +1,98 @@
+import unittest
+
+from cStringIO import StringIO
+
+from ..backends import static
+
+# There aren't many tests here because it turns out to be way more convenient to
+# use test_serializer for the majority of cases
+
+
+class TestStatic(unittest.TestCase):
+    def compile(self, input_text, input_data):
+        return static.compile(input_text, input_data)
+
+    def test_get_0(self):
+        data = """
+key: value
+
+[Heading 1]
+  other_key:
+    if a == 1: value_1
+    if a == 2: value_2
+    value_3
+"""
+
+        manifest = self.compile(data, {"a": 2})
+
+        self.assertEquals(manifest.get("key"), "value")
+        children = list(item for item in manifest.iterchildren())
+        self.assertEquals(len(children), 1)
+        section = children[0]
+        self.assertEquals(section.name, "Heading 1")
+
+        self.assertEquals(section.get("other_key"), "value_2")
+        self.assertEquals(section.get("key"), "value")
+
+    def test_get_1(self):
+        data = """
+key: value
+
+[Heading 1]
+  other_key:
+    if a == 1: value_1
+    if a == 2: value_2
+    value_3
+"""
+        manifest = self.compile(data, {"a": 3})
+
+        children = list(item for item in manifest.iterchildren())
+        section = children[0]
+        self.assertEquals(section.get("other_key"), "value_3")
+
+    def test_get_3(self):
+        data = """key:
+  if a == "1": value_1
+  if a[0] == "ab"[0]: value_2
+"""
+        manifest = self.compile(data, {"a": "1"})
+        self.assertEquals(manifest.get("key"), "value_1")
+
+        manifest = self.compile(data, {"a": "ac"})
+        self.assertEquals(manifest.get("key"), "value_2")
+
+    def test_get_4(self):
+        data = """key:
+  if not a: value_1
+  value_2
+"""
+        manifest = self.compile(data, {"a": True})
+        self.assertEquals(manifest.get("key"), "value_2")
+
+        manifest = self.compile(data, {"a": False})
+        self.assertEquals(manifest.get("key"), "value_1")
+
+    def test_api(self):
+        data = """key:
+  if a == 1.5: value_1
+  value_2
+key_1: other_value
+"""
+        manifest = self.compile(data, {"a": 1.5})
+
+        self.assertFalse(manifest.is_empty)
+        self.assertEquals(manifest.root, manifest)
+        self.assertTrue(manifest.has_key("key_1"))
+        self.assertFalse(manifest.has_key("key_2"))
+
+        self.assertEquals(set(manifest.iterkeys()), set(["key", "key_1"]))
+        self.assertEquals(set(manifest.itervalues()), set(["value_1", "other_value"]))
+
+    def test_is_empty_1(self):
+        data = """
+[Section]
+  [Subsection]
+"""
+        manifest = self.compile(data, {})
+
+        self.assertTrue(manifest.is_empty)
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptmanifest/tests/test_tokenizer.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptmanifest/tests/test_tokenizer.py
new file mode 100644
index 0000000..88176c5
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptmanifest/tests/test_tokenizer.py
@@ -0,0 +1,357 @@
+import sys
+import os
+import unittest
+
+sys.path.insert(0, os.path.abspath(".."))
+from cStringIO import StringIO
+
+from .. import parser
+from ..parser import token_types
+
+
+class TokenizerTest(unittest.TestCase):
+    def setUp(self):
+        self.tokenizer = parser.Tokenizer()
+
+    def tokenize(self, input_str):
+        rv = []
+        for item in self.tokenizer.tokenize(StringIO(input_str)):
+            rv.append(item)
+            if item[0] == token_types.eof:
+                break
+        return rv
+
+    def compare(self, input_text, expected):
+        expected = expected + [(token_types.eof, None)]
+        actual = self.tokenize(input_text)
+        self.assertEquals(actual, expected)
+
+    def test_heading_0(self):
+        self.compare("""[Heading text]""",
+                     [(token_types.paren, "["),
+                      (token_types.string, "Heading text"),
+                      (token_types.paren, "]")])
+
+    def test_heading_1(self):
+        self.compare("""[Heading [text\]]""",
+                     [(token_types.paren, "["),
+                      (token_types.string, "Heading [text]"),
+                      (token_types.paren, "]")])
+
+    def test_heading_2(self):
+        self.compare("""[Heading #text]""",
+                     [(token_types.paren, "["),
+                      (token_types.string, "Heading #text"),
+                      (token_types.paren, "]")])
+
+    def test_heading_3(self):
+        self.compare("""[Heading [\]text]""",
+                     [(token_types.paren, "["),
+                      (token_types.string, "Heading []text"),
+                      (token_types.paren, "]")])
+
+    def test_heading_4(self):
+        with self.assertRaises(parser.ParseError):
+            self.tokenize("[Heading")
+
+    def test_heading_5(self):
+        self.compare("""[Heading [\]text] #comment""",
+                     [(token_types.paren, "["),
+                      (token_types.string, "Heading []text"),
+                      (token_types.paren, "]")])
+
+    def test_heading_6(self):
+        self.compare(r"""[Heading \ttext]""",
+                     [(token_types.paren, "["),
+                      (token_types.string, "Heading \ttext"),
+                      (token_types.paren, "]")])
+
+    def test_key_0(self):
+        self.compare("""key:value""",
+                     [(token_types.string, "key"),
+                      (token_types.separator, ":"),
+                      (token_types.string, "value")])
+
+    def test_key_1(self):
+        self.compare("""key  :  value""",
+                     [(token_types.string, "key"),
+                      (token_types.separator, ":"),
+                      (token_types.string, "value")])
+
+    def test_key_2(self):
+        self.compare("""key  :  val ue""",
+                     [(token_types.string, "key"),
+                      (token_types.separator, ":"),
+                      (token_types.string, "val ue")])
+
+    def test_key_3(self):
+        self.compare("""key: value#comment""",
+                     [(token_types.string, "key"),
+                      (token_types.separator, ":"),
+                      (token_types.string, "value")])
+
+    def test_key_4(self):
+        with self.assertRaises(parser.ParseError):
+            self.tokenize("""ke y: value""")
+
+    def test_key_5(self):
+        with self.assertRaises(parser.ParseError):
+            self.tokenize("""key""")
+
+    def test_key_6(self):
+        self.compare("""key: "value\"""",
+                     [(token_types.string, "key"),
+                      (token_types.separator, ":"),
+                      (token_types.string, "value")])
+
+    def test_key_7(self):
+        self.compare("""key: 'value'""",
+                     [(token_types.string, "key"),
+                      (token_types.separator, ":"),
+                      (token_types.string, "value")])
+
+    def test_key_8(self):
+        self.compare("""key: "#value\"""",
+                     [(token_types.string, "key"),
+                      (token_types.separator, ":"),
+                      (token_types.string, "#value")])
+
+    def test_key_9(self):
+        self.compare("""key: '#value\'""",
+                     [(token_types.string, "key"),
+                      (token_types.separator, ":"),
+                      (token_types.string, "#value")])
+
+    def test_key_10(self):
+        with self.assertRaises(parser.ParseError):
+            self.tokenize("""key: "value""")
+
+    def test_key_11(self):
+        with self.assertRaises(parser.ParseError):
+            self.tokenize("""key: 'value""")
+
+    def test_key_12(self):
+        with self.assertRaises(parser.ParseError):
+            self.tokenize("""key: 'value""")
+
+    def test_key_13(self):
+        with self.assertRaises(parser.ParseError):
+            self.tokenize("""key: 'value' abc""")
+
+    def test_key_14(self):
+        self.compare(r"""key: \\nb""",
+                     [(token_types.string, "key"),
+                      (token_types.separator, ":"),
+                      (token_types.string, r"\nb")])
+
+    def test_list_0(self):
+        self.compare(
+"""
+key: []""",
+            [(token_types.string, "key"),
+             (token_types.separator, ":"),
+             (token_types.list_start, "["),
+             (token_types.list_end, "]")])
+
+    def test_list_1(self):
+        self.compare(
+"""
+key: [a, "b"]""",
+            [(token_types.string, "key"),
+             (token_types.separator, ":"),
+             (token_types.list_start, "["),
+             (token_types.string, "a"),
+             (token_types.string, "b"),
+             (token_types.list_end, "]")])
+
+    def test_list_2(self):
+        self.compare(
+"""
+key: [a,
+      b]""",
+            [(token_types.string, "key"),
+             (token_types.separator, ":"),
+             (token_types.list_start, "["),
+             (token_types.string, "a"),
+             (token_types.string, "b"),
+             (token_types.list_end, "]")])
+
+    def test_list_3(self):
+        self.compare(
+"""
+key: [a, #b]
+      c]""",
+            [(token_types.string, "key"),
+             (token_types.separator, ":"),
+             (token_types.list_start, "["),
+             (token_types.string, "a"),
+             (token_types.string, "c"),
+             (token_types.list_end, "]")])
+
+    def test_list_4(self):
+        with self.assertRaises(parser.ParseError):
+            self.tokenize("""key: [a #b]
+            c]""")
+
+    def test_list_5(self):
+        with self.assertRaises(parser.ParseError):
+            self.tokenize("""key: [a \\
+            c]""")
+
+    def test_list_6(self):
+        self.compare(
+"""key: [a , b]""",
+        [(token_types.string, "key"),
+         (token_types.separator, ":"),
+         (token_types.list_start, "["),
+         (token_types.string, "a"),
+         (token_types.string, "b"),
+         (token_types.list_end, "]")])
+
+    def test_expr_0(self):
+        self.compare(
+"""
+key:
+  if cond == 1: value""",
+            [(token_types.string, "key"),
+             (token_types.separator, ":"),
+             (token_types.group_start, None),
+             (token_types.ident, "if"),
+             (token_types.ident, "cond"),
+             (token_types.ident, "=="),
+             (token_types.number, "1"),
+             (token_types.separator, ":"),
+             (token_types.string, "value")])
+
+    def test_expr_1(self):
+        self.compare(
+"""
+key:
+  if cond == 1: value1
+  value2""",
+            [(token_types.string, "key"),
+             (token_types.separator, ":"),
+             (token_types.group_start, None),
+             (token_types.ident, "if"),
+             (token_types.ident, "cond"),
+             (token_types.ident, "=="),
+             (token_types.number, "1"),
+             (token_types.separator, ":"),
+             (token_types.string, "value1"),
+             (token_types.string, "value2")])
+
+    def test_expr_2(self):
+        self.compare(
+"""
+key:
+  if cond=="1": value""",
+            [(token_types.string, "key"),
+             (token_types.separator, ":"),
+             (token_types.group_start, None),
+             (token_types.ident, "if"),
+             (token_types.ident, "cond"),
+             (token_types.ident, "=="),
+             (token_types.string, "1"),
+             (token_types.separator, ":"),
+             (token_types.string, "value")])
+
+    def test_expr_3(self):
+        self.compare(
+"""
+key:
+  if cond==1.1: value""",
+            [(token_types.string, "key"),
+             (token_types.separator, ":"),
+             (token_types.group_start, None),
+             (token_types.ident, "if"),
+             (token_types.ident, "cond"),
+             (token_types.ident, "=="),
+             (token_types.number, "1.1"),
+             (token_types.separator, ":"),
+             (token_types.string, "value")])
+
+    def test_expr_4(self):
+        self.compare(
+            """
+key:
+  if cond==1.1 and cond2 == "a": value""",
+            [(token_types.string, "key"),
+             (token_types.separator, ":"),
+             (token_types.group_start, None),
+             (token_types.ident, "if"),
+             (token_types.ident, "cond"),
+             (token_types.ident, "=="),
+             (token_types.number, "1.1"),
+             (token_types.ident, "and"),
+             (token_types.ident, "cond2"),
+             (token_types.ident, "=="),
+             (token_types.string, "a"),
+             (token_types.separator, ":"),
+             (token_types.string, "value")])
+
+    def test_expr_5(self):
+        self.compare(
+"""
+key:
+  if (cond==1.1 ): value""",
+            [(token_types.string, "key"),
+             (token_types.separator, ":"),
+             (token_types.group_start, None),
+             (token_types.ident, "if"),
+             (token_types.paren, "("),
+             (token_types.ident, "cond"),
+             (token_types.ident, "=="),
+             (token_types.number, "1.1"),
+             (token_types.paren, ")"),
+             (token_types.separator, ":"),
+             (token_types.string, "value")])
+
+    def test_expr_6(self):
+        self.compare(
+"""
+key:
+  if "\\ttest": value""",
+            [(token_types.string, "key"),
+             (token_types.separator, ":"),
+             (token_types.group_start, None),
+             (token_types.ident, "if"),
+             (token_types.string, "\ttest"),
+             (token_types.separator, ":"),
+             (token_types.string, "value")])
+
+    def test_expr_7(self):
+        with self.assertRaises(parser.ParseError):
+            self.tokenize(
+"""
+key:
+  if 1A: value""")
+
+    def test_expr_8(self):
+        with self.assertRaises(parser.ParseError):
+            self.tokenize(
+"""
+key:
+  if 1a: value""")
+
+    def test_expr_9(self):
+        with self.assertRaises(parser.ParseError):
+            self.tokenize(
+"""
+key:
+  if 1.1.1: value""")
+
+    def test_expr_10(self):
+        self.compare(
+"""
+key:
+  if 1.: value""",
+            [(token_types.string, "key"),
+             (token_types.separator, ":"),
+             (token_types.group_start, None),
+             (token_types.ident, "if"),
+             (token_types.number, "1."),
+             (token_types.separator, ":"),
+             (token_types.string, "value")])
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptrunner.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptrunner.py
new file mode 100644
index 0000000..c8e3e54
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wptrunner.py
@@ -0,0 +1,284 @@
+from __future__ import unicode_literals
+
+import json
+import os
+import sys
+
+import environment as env
+import products
+import testloader
+import wptcommandline
+import wptlogging
+import wpttest
+from testrunner import ManagerGroup
+from browsers.base import NullBrowser
+
+here = os.path.split(__file__)[0]
+
+logger = None
+
+"""Runner for web-platform-tests
+
+The runner has several design goals:
+
+* Tests should run with no modification from upstream.
+
+* Tests should be regarded as "untrusted" so that errors, timeouts and even
+  crashes in the tests can be handled without failing the entire test run.
+
+* For performance tests can be run in multiple browsers in parallel.
+
+The upstream repository has the facility for creating a test manifest in JSON
+format. This manifest is used directly to determine which tests exist. Local
+metadata files are used to store the expected test results.
+"""
+
+def setup_logging(*args, **kwargs):
+    global logger
+    logger = wptlogging.setup(*args, **kwargs)
+
+def get_loader(test_paths, product, ssl_env, debug=None, run_info_extras=None, **kwargs):
+    if run_info_extras is None:
+        run_info_extras = {}
+
+    run_info = wpttest.get_run_info(kwargs["run_info"], product, debug=debug,
+                                    extras=run_info_extras)
+
+    test_manifests = testloader.ManifestLoader(test_paths, force_manifest_update=kwargs["manifest_update"]).load()
+
+    manifest_filters = []
+    meta_filters = []
+
+    if kwargs["include"] or kwargs["exclude"] or kwargs["include_manifest"]:
+        manifest_filters.append(testloader.TestFilter(include=kwargs["include"],
+                                                      exclude=kwargs["exclude"],
+                                                      manifest_path=kwargs["include_manifest"],
+                                                      test_manifests=test_manifests))
+    if kwargs["tags"]:
+        meta_filters.append(testloader.TagFilter(tags=kwargs["tags"]))
+
+    test_loader = testloader.TestLoader(test_manifests,
+                                        kwargs["test_types"],
+                                        run_info,
+                                        manifest_filters=manifest_filters,
+                                        meta_filters=meta_filters,
+                                        chunk_type=kwargs["chunk_type"],
+                                        total_chunks=kwargs["total_chunks"],
+                                        chunk_number=kwargs["this_chunk"],
+                                        include_https=ssl_env.ssl_enabled)
+    return run_info, test_loader
+
+def list_test_groups(test_paths, product, **kwargs):
+    env.do_delayed_imports(logger, test_paths)
+
+    ssl_env = env.ssl_env(logger, **kwargs)
+
+    run_info_extras = products.load_product(kwargs["config"], product)[-1](**kwargs)
+
+    run_info, test_loader = get_loader(test_paths, product, ssl_env,
+                                       run_info_extras=run_info_extras, **kwargs)
+
+    for item in sorted(test_loader.groups(kwargs["test_types"])):
+        print item
+
+
+def list_disabled(test_paths, product, **kwargs):
+    env.do_delayed_imports(logger, test_paths)
+
+    rv = []
+
+    run_info_extras = products.load_product(kwargs["config"], product)[-1](**kwargs)
+
+    ssl_env = env.ssl_env(logger, **kwargs)
+
+    run_info, test_loader = get_loader(test_paths, product, ssl_env,
+                                       run_info_extras=run_info_extras, **kwargs)
+
+    for test_type, tests in test_loader.disabled_tests.iteritems():
+        for test in tests:
+            rv.append({"test": test.id, "reason": test.disabled()})
+    print json.dumps(rv, indent=2)
+
+
+def list_tests(test_paths, product, **kwargs):
+    env.do_delayed_imports(logger, test_paths)
+
+    rv = []
+
+    ssl_env = env.ssl_env(logger, **kwargs)
+
+    run_info_extras = products.load_product(kwargs["config"], product)[-1](**kwargs)
+
+    run_info, test_loader = get_loader(test_paths, product, ssl_env,
+                                       run_info_extras=run_info_extras, **kwargs)
+
+    for test in test_loader.test_ids:
+        print test
+
+
+def get_pause_after_test(test_loader, **kwargs):
+    total_tests = sum(len(item) for item in test_loader.tests.itervalues())
+    if kwargs["pause_after_test"] is None:
+        if kwargs["repeat_until_unexpected"]:
+            return False
+        if kwargs["repeat"] == 1 and total_tests == 1:
+            return True
+        return False
+    return kwargs["pause_after_test"]
+
+
+def run_tests(config, test_paths, product, **kwargs):
+    with wptlogging.CaptureIO(logger, not kwargs["no_capture_stdio"]):
+        env.do_delayed_imports(logger, test_paths)
+
+        (check_args,
+         target_browser_cls, get_browser_kwargs,
+         executor_classes, get_executor_kwargs,
+         env_options, get_env_extras, run_info_extras) = products.load_product(config, product)
+
+        ssl_env = env.ssl_env(logger, **kwargs)
+        env_extras = get_env_extras(**kwargs)
+
+        check_args(**kwargs)
+
+        if "test_loader" in kwargs:
+            run_info = wpttest.get_run_info(kwargs["run_info"], product, debug=None,
+                                            extras=run_info_extras(**kwargs))
+            test_loader = kwargs["test_loader"]
+        else:
+            run_info, test_loader = get_loader(test_paths,
+                                               product,
+                                               ssl_env,
+                                               run_info_extras=run_info_extras(**kwargs),
+                                               **kwargs)
+
+        test_source_kwargs = {"processes": kwargs["processes"]}
+        if kwargs["run_by_dir"] is False:
+            test_source_cls = testloader.SingleTestSource
+        else:
+            # A value of None indicates infinite depth
+            test_source_cls = testloader.PathGroupedSource
+            test_source_kwargs["depth"] = kwargs["run_by_dir"]
+
+        logger.info("Using %i client processes" % kwargs["processes"])
+
+        unexpected_total = 0
+
+        kwargs["pause_after_test"] = get_pause_after_test(test_loader, **kwargs)
+
+        with env.TestEnvironment(test_paths,
+                                 ssl_env,
+                                 kwargs["pause_after_test"],
+                                 kwargs["debug_info"],
+                                 env_options,
+                                 env_extras) as test_environment:
+            try:
+                test_environment.ensure_started()
+            except env.TestEnvironmentError as e:
+                logger.critical("Error starting test environment: %s" % e.message)
+                raise
+
+            repeat = kwargs["repeat"]
+            repeat_count = 0
+            repeat_until_unexpected = kwargs["repeat_until_unexpected"]
+
+            while repeat_count < repeat or repeat_until_unexpected:
+                repeat_count += 1
+                if repeat_until_unexpected:
+                    logger.info("Repetition %i" % (repeat_count))
+                elif repeat > 1:
+                    logger.info("Repetition %i / %i" % (repeat_count, repeat))
+
+                unexpected_count = 0
+                logger.suite_start(test_loader.test_ids, run_info)
+                for test_type in kwargs["test_types"]:
+                    logger.info("Running %s tests" % test_type)
+
+                    # WebDriver tests may create and destroy multiple browser
+                    # processes as part of their expected behavior. These
+                    # processes are managed by a WebDriver server binary. This
+                    # obviates the need for wptrunner to provide a browser, so
+                    # the NullBrowser is used in place of the "target" browser
+                    if test_type == "wdspec":
+                        browser_cls = NullBrowser
+                    else:
+                        browser_cls = target_browser_cls
+
+                    browser_kwargs = get_browser_kwargs(test_type,
+                                                        run_info,
+                                                        ssl_env=ssl_env,
+                                                        **kwargs)
+
+
+                    executor_cls = executor_classes.get(test_type)
+                    executor_kwargs = get_executor_kwargs(test_type,
+                                                          test_environment.external_config,
+                                                          test_environment.cache_manager,
+                                                          run_info,
+                                                          **kwargs)
+
+                    if executor_cls is None:
+                        logger.error("Unsupported test type %s for product %s" %
+                                     (test_type, product))
+                        continue
+
+                    for test in test_loader.disabled_tests[test_type]:
+                        logger.test_start(test.id)
+                        logger.test_end(test.id, status="SKIP")
+
+                    with ManagerGroup("web-platform-tests",
+                                      kwargs["processes"],
+                                      test_source_cls,
+                                      test_source_kwargs,
+                                      browser_cls,
+                                      browser_kwargs,
+                                      executor_cls,
+                                      executor_kwargs,
+                                      kwargs["pause_after_test"],
+                                      kwargs["pause_on_unexpected"],
+                                      kwargs["restart_on_unexpected"],
+                                      kwargs["debug_info"]) as manager_group:
+                        try:
+                            manager_group.run(test_type, test_loader.tests)
+                        except KeyboardInterrupt:
+                            logger.critical("Main thread got signal")
+                            manager_group.stop()
+                            raise
+                    unexpected_count += manager_group.unexpected_count()
+
+                unexpected_total += unexpected_count
+                logger.info("Got %i unexpected results" % unexpected_count)
+                if repeat_until_unexpected and unexpected_total > 0:
+                    break
+                logger.suite_end()
+
+    return unexpected_total == 0
+
+def start(**kwargs):
+    if kwargs["list_test_groups"]:
+        list_test_groups(**kwargs)
+    elif kwargs["list_disabled"]:
+        list_disabled(**kwargs)
+    elif kwargs["list_tests"]:
+        list_tests(**kwargs)
+    else:
+        return not run_tests(**kwargs)
+
+def main():
+    """Main entry point when calling from the command line"""
+    kwargs = wptcommandline.parse_args()
+
+    try:
+        if kwargs["prefs_root"] is None:
+            kwargs["prefs_root"] = os.path.abspath(os.path.join(here, "prefs"))
+
+        setup_logging(kwargs, {"raw": sys.stdout})
+
+        return start(**kwargs)
+    except Exception:
+        if kwargs["pdb"]:
+            import pdb, traceback
+            print traceback.format_exc()
+            pdb.post_mortem()
+        else:
+            raise
diff --git a/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wpttest.py b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wpttest.py
new file mode 100644
index 0000000..9c2604d
--- /dev/null
+++ b/src/third_party/web_platform_tests/tools/wptrunner/wptrunner/wpttest.py
@@ -0,0 +1,373 @@
+import os
+from collections import defaultdict
+
+from wptmanifest.parser import atoms
+
+atom_reset = atoms["Reset"]
+enabled_tests = set(["testharness", "reftest", "wdspec"])
+
+
+class Result(object):
+    def __init__(self, status, message, expected=None, extra=None):
+        if status not in self.statuses:
+            raise ValueError("Unrecognised status %s" % status)
+        self.status = status
+        self.message = message
+        self.expected = expected
+        self.extra = extra
+
+    def __repr__(self):
+        return "<%s.%s %s>" % (self.__module__, self.__class__.__name__, self.status)
+
+
+class SubtestResult(object):
+    def __init__(self, name, status, message, stack=None, expected=None):
+        self.name = name
+        if status not in self.statuses:
+            raise ValueError("Unrecognised status %s" % status)
+        self.status = status
+        self.message = message
+        self.stack = stack
+        self.expected = expected
+
+    def __repr__(self):
+        return "<%s.%s %s %s>" % (self.__module__, self.__class__.__name__, self.name, self.status)
+
+
+class TestharnessResult(Result):
+    default_expected = "OK"
+    statuses = set(["OK", "ERROR", "TIMEOUT", "EXTERNAL-TIMEOUT", "CRASH"])
+
+
+class TestharnessSubtestResult(SubtestResult):
+    default_expected = "PASS"
+    statuses = set(["PASS", "FAIL", "TIMEOUT", "NOTRUN"])
+
+
+class ReftestResult(Result):
+    default_expected = "PASS"
+    statuses = set(["PASS", "FAIL", "ERROR", "TIMEOUT", "EXTERNAL-TIMEOUT", "CRASH"])
+
+
+class WdspecResult(Result):
+    default_expected = "OK"
+    statuses = set(["OK", "ERROR", "TIMEOUT", "EXTERNAL-TIMEOUT", "CRASH"])
+
+
+class WdspecSubtestResult(SubtestResult):
+    default_expected = "PASS"
+    statuses = set(["PASS", "FAIL", "ERROR"])
+
+
+def get_run_info(metadata_root, product, **kwargs):
+    return RunInfo(metadata_root, product, **kwargs)
+
+
+class RunInfo(dict):
+    def __init__(self, metadata_root, product, debug, extras=None):
+        import mozinfo
+
+        self._update_mozinfo(metadata_root)
+        self.update(mozinfo.info)
+        self["product"] = product
+        if debug is not None:
+            self["debug"] = debug
+        elif "debug" not in self:
+            # Default to release
+            self["debug"] = False
+        if product == "firefox" and "stylo" not in self:
+            self["stylo"] = False
+        if 'STYLO_FORCE_ENABLED' in os.environ:
+            self["stylo"] = True
+        if extras is not None:
+            self.update(extras)
+
+    def _update_mozinfo(self, metadata_root):
+        """Add extra build information from a mozinfo.json file in a parent
+        directory"""
+        import mozinfo
+
+        path = metadata_root
+        dirs = set()
+        while path != os.path.expanduser('~'):
+            if path in dirs:
+                break
+            dirs.add(str(path))
+            path = os.path.split(path)[0]
+
+        mozinfo.find_and_update_from_json(*dirs)
+
+
+class Test(object):
+
+    result_cls = None
+    subtest_result_cls = None
+    test_type = None
+
+    default_timeout = 10  # seconds
+    long_timeout = 60  # seconds
+
+    def __init__(self, tests_root, url, inherit_metadata, test_metadata,
+                 timeout=None, path=None, protocol="http"):
+        self.tests_root = tests_root
+        self.url = url
+        self._inherit_metadata = inherit_metadata
+        self._test_metadata = test_metadata
+        self.timeout = timeout if timeout is not None else self.default_timeout
+        self.path = path
+        self.environment = {"protocol": protocol, "prefs": self.prefs}
+
+    def __eq__(self, other):
+        return self.id == other.id
+
+    def update_metadata(self, metadata=None):
+        if metadata is None:
+            metadata = {}
+        return metadata
+
+    @classmethod
+    def from_manifest(cls, manifest_item, inherit_metadata, test_metadata):
+        timeout = cls.long_timeout if manifest_item.timeout == "long" else cls.default_timeout
+        protocol = "https" if hasattr(manifest_item, "https") and manifest_item.https else "http"
+        return cls(manifest_item.source_file.tests_root,
+                   manifest_item.url,
+                   inherit_metadata,
+                   test_metadata,
+                   timeout=timeout,
+                   path=manifest_item.source_file.path,
+                   protocol=protocol)
+
+    @property
+    def id(self):
+        return self.url
+
+    @property
+    def keys(self):
+        return tuple()
+
+    @property
+    def abs_path(self):
+        return os.path.join(self.tests_root, self.path)
+
+    def _get_metadata(self, subtest=None):
+        if self._test_metadata is not None and subtest is not None:
+            return self._test_metadata.get_subtest(subtest)
+        else:
+            return self._test_metadata
+
+    def itermeta(self, subtest=None):
+        for metadata in self._inherit_metadata:
+            yield metadata
+
+        if self._test_metadata is not None:
+            yield self._get_metadata()
+            if subtest is not None:
+                subtest_meta = self._get_metadata(subtest)
+                if subtest_meta is not None:
+                    yield subtest_meta
+
+    def disabled(self, subtest=None):
+        for meta in self.itermeta(subtest):
+            disabled = meta.disabled
+            if disabled is not None:
+                return disabled
+        return None
+
+    @property
+    def restart_after(self):
+        for meta in self.itermeta(None):
+            restart_after = meta.restart_after
+            if restart_after is not None:
+                return True
+        return False
+
+    @property
+    def leaks(self):
+        for meta in self.itermeta(None):
+            leaks = meta.leaks
+            if leaks is not None:
+                return leaks
+        return False
+
+    @property
+    def tags(self):
+        tags = set()
+        for meta in self.itermeta():
+            meta_tags = meta.tags
+            if atom_reset in meta_tags:
+                tags = meta_tags.copy()
+                tags.remove(atom_reset)
+            else:
+                tags |= meta_tags
+
+        tags.add("dir:%s" % self.id.lstrip("/").split("/")[0])
+
+        return tags
+
+    @property
+    def prefs(self):
+        prefs = {}
+        for meta in self.itermeta():
+            meta_prefs = meta.prefs
+            if atom_reset in prefs:
+                prefs = meta_prefs.copy()
+                del prefs[atom_reset]
+            else:
+                prefs.update(meta_prefs)
+        return prefs
+
+    def expected(self, subtest=None):
+        if subtest is None:
+            default = self.result_cls.default_expected
+        else:
+            default = self.subtest_result_cls.default_expected
+
+        metadata = self._get_metadata(subtest)
+        if metadata is None:
+            return default
+
+        try:
+            return metadata.get("expected")
+        except KeyError:
+            return default
+
+    def __repr__(self):
+        return "<%s.%s %s>" % (self.__module__, self.__class__.__name__, self.id)
+
+
+class TestharnessTest(Test):
+    result_cls = TestharnessResult
+    subtest_result_cls = TestharnessSubtestResult
+    test_type = "testharness"
+
+    @property
+    def id(self):
+        return self.url
+
+
+class ManualTest(Test):
+    test_type = "manual"
+
+    @property
+    def id(self):
+        return self.url
+
+
+class ReftestTest(Test):
+    result_cls = ReftestResult
+    test_type = "reftest"
+
+    def __init__(self, tests_root, url, inherit_metadata, test_metadata, references,
+                 timeout=None, path=None, viewport_size=None, dpi=None, protocol="http"):
+        Test.__init__(self, tests_root, url, inherit_metadata, test_metadata, timeout,
+                      path, protocol)
+
+        for _, ref_type in references:
+            if ref_type not in ("==", "!="):
+                raise ValueError
+
+        self.references = references
+        self.viewport_size = viewport_size
+        self.dpi = dpi
+
+    @classmethod
+    def from_manifest(cls,
+                      manifest_test,
+                      inherit_metadata,
+                      test_metadata,
+                      nodes=None,
+                      references_seen=None):
+
+        timeout = cls.long_timeout if manifest_test.timeout == "long" else cls.default_timeout
+
+        if nodes is None:
+            nodes = {}
+        if references_seen is None:
+            references_seen = set()
+
+        url = manifest_test.url
+
+        node = cls(manifest_test.source_file.tests_root,
+                   manifest_test.url,
+                   inherit_metadata,
+                   test_metadata,
+                   [],
+                   timeout=timeout,
+                   path=manifest_test.path,
+                   viewport_size=manifest_test.viewport_size,
+                   dpi=manifest_test.dpi,
+                   protocol="https" if hasattr(manifest_test, "https") and manifest_test.https else "http")
+
+        nodes[url] = node
+
+        for ref_url, ref_type in manifest_test.references:
+            comparison_key = (ref_type,) + tuple(sorted([url, ref_url]))
+            if ref_url in nodes:
+                manifest_node = ref_url
+                if comparison_key in references_seen:
+                    # We have reached a cycle so stop here
+                    # Note that just seeing a node for the second time is not
+                    # enough to detect a cycle because
+                    # A != B != C != A must include C != A
+                    # but A == B == A should not include the redundant B == A.
+                    continue
+
+            references_seen.add(comparison_key)
+
+            manifest_node = manifest_test.manifest.get_reference(ref_url)
+            if manifest_node:
+                reference = ReftestTest.from_manifest(manifest_node,
+                                                      [],
+                                                      None,
+                                                      nodes,
+                                                      references_seen)
+            else:
+                reference = ReftestTest(manifest_test.source_file.tests_root,
+                                        ref_url,
+                                        [],
+                                        None,
+                                        [])
+
+            node.references.append((reference, ref_type))
+
+        return node
+
+    def update_metadata(self, metadata):
+        if not "url_count" in metadata:
+            metadata["url_count"] = defaultdict(int)
+        for reference, _ in self.references:
+            # We assume a naive implementation in which a url with multiple
+            # possible screenshots will need to take both the lhs and rhs screenshots
+            # for each possible match
+            metadata["url_count"][(self.environment["protocol"], reference.url)] += 1
+            reference.update_metadata(metadata)
+        return metadata
+
+    @property
+    def id(self):
+        return self.url
+
+    @property
+    def keys(self):
+        return ("reftype", "refurl")
+
+
+class WdspecTest(Test):
+
+    result_cls = WdspecResult
+    subtest_result_cls = WdspecSubtestResult
+    test_type = "wdspec"
+
+    default_timeout = 25
+    long_timeout = 120
+
+
+manifest_test_cls = {"reftest": ReftestTest,
+                     "testharness": TestharnessTest,
+                     "manual": ManualTest,
+                     "wdspec": WdspecTest}
+
+
+def from_manifest(manifest_test, inherit_metadata, test_metadata):
+    test_cls = manifest_test_cls[manifest_test.item_type]
+    return test_cls.from_manifest(manifest_test, inherit_metadata, test_metadata)